summaryrefslogtreecommitdiff
path: root/usr/src/cmd/svc
diff options
context:
space:
mode:
authorstevel@tonic-gate <none@none>2005-06-14 00:00:00 -0700
committerstevel@tonic-gate <none@none>2005-06-14 00:00:00 -0700
commit7c478bd95313f5f23a4c958a745db2134aa03244 (patch)
treec871e58545497667cbb4b0a4f2daf204743e1fe7 /usr/src/cmd/svc
downloadillumos-gate-7c478bd95313f5f23a4c958a745db2134aa03244.tar.gz
OpenSolaris Launch
Diffstat (limited to 'usr/src/cmd/svc')
-rw-r--r--usr/src/cmd/svc/Makefile80
-rw-r--r--usr/src/cmd/svc/Makefile.ctf35
-rw-r--r--usr/src/cmd/svc/common/configd_exit.h52
-rw-r--r--usr/src/cmd/svc/common/manifest_hash.c492
-rw-r--r--usr/src/cmd/svc/common/manifest_hash.h58
-rw-r--r--usr/src/cmd/svc/configd/Makefile140
-rw-r--r--usr/src/cmd/svc/configd/backend.c1950
-rw-r--r--usr/src/cmd/svc/configd/client.c2212
-rw-r--r--usr/src/cmd/svc/configd/configd.c701
-rw-r--r--usr/src/cmd/svc/configd/configd.h743
-rw-r--r--usr/src/cmd/svc/configd/file_object.c2174
-rw-r--r--usr/src/cmd/svc/configd/maindoor.c190
-rw-r--r--usr/src/cmd/svc/configd/object.c559
-rw-r--r--usr/src/cmd/svc/configd/rc_node.c5345
-rw-r--r--usr/src/cmd/svc/configd/restore_repository.sh334
-rw-r--r--usr/src/cmd/svc/configd/snapshot.c271
-rw-r--r--usr/src/cmd/svc/configd/sqlite/Makefile305
-rw-r--r--usr/src/cmd/svc/configd/sqlite/inc.flg9
-rw-r--r--usr/src/cmd/svc/configd/sqlite/llib-lsqlite12
-rw-r--r--usr/src/cmd/svc/configd/sqlite/main.mk448
-rw-r--r--usr/src/cmd/svc/configd/sqlite/mapfile-sqlite64
-rw-r--r--usr/src/cmd/svc/configd/sqlite/sqlite-misc.h24
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/attach.c314
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/auth.c222
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/btree.c3593
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/btree.h159
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/btree_rb.c1491
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/build.c2159
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/config.h27
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/copy.c113
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/date.c878
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/delete.c396
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/encode.c257
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/expr.c1665
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/func.c661
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/hash.c359
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/hash.h112
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/insert.c922
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/main.c1146
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/md5.c388
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/os.c1848
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/os.h194
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/pager.c2229
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/pager.h110
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/parse.y900
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/pragma.c715
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/printf.c861
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/random.c100
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/select.c2437
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/shell.c1364
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/sqlite.h.in871
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/sqliteInt.h1273
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/table.c206
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/tclsqlite.c1296
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/test1.c1030
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/test2.c570
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/test3.c995
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/test4.c637
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/tokenize.c682
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/trigger.c767
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/update.c462
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/util.c1138
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/vacuum.c330
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/vdbe.c4928
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/vdbe.h115
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/vdbeInt.h306
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/vdbeaux.c1064
-rw-r--r--usr/src/cmd/svc/configd/sqlite/src/where.c1238
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/all.test112
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/attach.test589
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/attach2.test149
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/auth.test1895
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/bigfile.test180
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/bigrow.test218
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/bind.test75
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/btree.test1023
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/btree2.test449
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/btree3.test89
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/btree3rb.test87
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/btree4.test101
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/btree4rb.test98
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/capi2.test478
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/conflict.test697
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/copy.test268
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/crashme2.off52
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/crashtest1.c99
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/date.test260
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/delete.test294
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/expr.test522
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/fkey1.test56
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/format3.test741
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/func.test348
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/hook.test86
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/in.test306
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/index.test536
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/insert.test289
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/insert2.test197
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/interrupt.test170
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/intpkey.test490
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/ioerr.test123
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/join.test396
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/join2.test76
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/join3_28.test37
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/join4_28.test80
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/lastinsert.test322
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/laststmtchanges.test247
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/limit.test320
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/lock.test352
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/main.test300
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/malloc.test228
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/memdb.test399
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/memleak.test94
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/minmax.test362
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/misc1.test543
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/misc2.test238
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/misc3.test307
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/misuse.test169
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/notnull.test503
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/null.test240
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/pager.test426
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/pragma.test420
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/printf.test129
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/progress.test121
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/quick.test56
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/quote.test91
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/rowid.test636
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/select1.test744
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/select2.test165
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/select3.test228
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/select4.test498
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/select5.test122
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/select6.test438
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/sort.test364
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/subselect.test158
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/table.test506
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/tableapi.test204
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/tclsqlite.test122
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/temptable.test402
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/tester.tcl267
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/thread1.test161
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/threadtest1.c285
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/threadtest2.c127
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/trans.test905
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/trigger1.test522
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/trigger2.test721
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/trigger3.test169
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/trigger4.test130
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/unique.test235
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/update.test565
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/vacuum.test176
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/version.test201
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/view.test410
-rw-r--r--usr/src/cmd/svc/configd/sqlite/test/where.test745
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/diffdb.c47
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/lemon.c4386
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/lempar.c690
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/memleak.awk32
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/memleak2.awk32
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/mkopts.tcl54
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/opcodeDoc.awk26
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/report1.txt69
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/showdb.c88
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/showjournal.c79
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/space_used.tcl114
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/spaceanal.tcl439
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/speedtest.tcl278
-rw-r--r--usr/src/cmd/svc/configd/sqlite/tool/speedtest2.tcl210
-rw-r--r--usr/src/cmd/svc/dtd/service_bundle.dtd.1777
-rw-r--r--usr/src/cmd/svc/inc.flg30
-rw-r--r--usr/src/cmd/svc/lsvcrun/Makefile59
-rw-r--r--usr/src/cmd/svc/lsvcrun/lsvcrun.c951
-rw-r--r--usr/src/cmd/svc/mfstscan/Makefile74
-rw-r--r--usr/src/cmd/svc/mfstscan/mfstscan.c146
-rw-r--r--usr/src/cmd/svc/milestone/Makefile153
-rw-r--r--usr/src/cmd/svc/milestone/README36
-rw-r--r--usr/src/cmd/svc/milestone/README.share148
-rw-r--r--usr/src/cmd/svc/milestone/aggregation40
-rw-r--r--usr/src/cmd/svc/milestone/aggregation.xml73
-rw-r--r--usr/src/cmd/svc/milestone/boot-archive79
-rw-r--r--usr/src/cmd/svc/milestone/boot-archive.xml91
-rw-r--r--usr/src/cmd/svc/milestone/console-login68
-rw-r--r--usr/src/cmd/svc/milestone/datalink37
-rw-r--r--usr/src/cmd/svc/milestone/datalink-init44
-rw-r--r--usr/src/cmd/svc/milestone/datalink-init.xml83
-rw-r--r--usr/src/cmd/svc/milestone/datalink.xml88
-rw-r--r--usr/src/cmd/svc/milestone/devices-local77
-rw-r--r--usr/src/cmd/svc/milestone/devices-local.xml132
-rw-r--r--usr/src/cmd/svc/milestone/fs-local85
-rw-r--r--usr/src/cmd/svc/milestone/fs-minimal55
-rw-r--r--usr/src/cmd/svc/milestone/fs-root181
-rw-r--r--usr/src/cmd/svc/milestone/fs-usr124
-rw-r--r--usr/src/cmd/svc/milestone/identity-domain47
-rw-r--r--usr/src/cmd/svc/milestone/identity-node96
-rw-r--r--usr/src/cmd/svc/milestone/identity.xml126
-rw-r--r--usr/src/cmd/svc/milestone/local-fs.xml93
-rw-r--r--usr/src/cmd/svc/milestone/make-console-login-xml154
-rw-r--r--usr/src/cmd/svc/milestone/manifest-import530
-rw-r--r--usr/src/cmd/svc/milestone/manifest-import.xml90
-rw-r--r--usr/src/cmd/svc/milestone/minimal-fs.xml93
-rw-r--r--usr/src/cmd/svc/milestone/multi-user-server.xml112
-rw-r--r--usr/src/cmd/svc/milestone/multi-user.xml144
-rw-r--r--usr/src/cmd/svc/milestone/name-services.xml106
-rw-r--r--usr/src/cmd/svc/milestone/net-init362
-rw-r--r--usr/src/cmd/svc/milestone/net-loopback87
-rw-r--r--usr/src/cmd/svc/milestone/net-physical347
-rw-r--r--usr/src/cmd/svc/milestone/net-svc340
-rw-r--r--usr/src/cmd/svc/milestone/network-initial.xml119
-rw-r--r--usr/src/cmd/svc/milestone/network-loopback.xml77
-rw-r--r--usr/src/cmd/svc/milestone/network-physical.xml77
-rw-r--r--usr/src/cmd/svc/milestone/network-service.xml125
-rw-r--r--usr/src/cmd/svc/milestone/network.xml92
-rw-r--r--usr/src/cmd/svc/milestone/restarter.xml67
-rw-r--r--usr/src/cmd/svc/milestone/rmtmpfiles77
-rw-r--r--usr/src/cmd/svc/milestone/rmtmpfiles.xml90
-rw-r--r--usr/src/cmd/svc/milestone/root-fs.xml73
-rw-r--r--usr/src/cmd/svc/milestone/single-user.xml164
-rw-r--r--usr/src/cmd/svc/milestone/sysconfig.xml82
-rw-r--r--usr/src/cmd/svc/milestone/usr-fs.xml91
-rw-r--r--usr/src/cmd/svc/profile/Makefile76
-rw-r--r--usr/src/cmd/svc/profile/README45
-rw-r--r--usr/src/cmd/svc/profile/generic_limited_net.xml269
-rw-r--r--usr/src/cmd/svc/profile/generic_open.xml120
-rw-r--r--usr/src/cmd/svc/profile/inetd_generic.xml89
-rw-r--r--usr/src/cmd/svc/profile/inetd_upgrade.xml40
-rw-r--r--usr/src/cmd/svc/profile/ns_dns.xml42
-rw-r--r--usr/src/cmd/svc/profile/ns_files.xml36
-rw-r--r--usr/src/cmd/svc/profile/ns_ldap.xml39
-rw-r--r--usr/src/cmd/svc/profile/ns_nis.xml42
-rw-r--r--usr/src/cmd/svc/profile/ns_nisplus.xml42
-rw-r--r--usr/src/cmd/svc/profile/ns_none.xml68
-rw-r--r--usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-15000.xml46
-rw-r--r--usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-880.xml44
-rw-r--r--usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire.xml40
-rw-r--r--usr/src/cmd/svc/profile/platform_SUNW,Ultra-Enterprise-10000.xml42
-rw-r--r--usr/src/cmd/svc/profile/platform_SUNW,UltraSPARC-IIi-Netract.xml40
-rw-r--r--usr/src/cmd/svc/profile/platform_i86pc.xml39
-rw-r--r--usr/src/cmd/svc/profile/platform_none.xml36
-rw-r--r--usr/src/cmd/svc/prophist/Makefile80
-rw-r--r--usr/src/cmd/svc/prophist/prophist.SUNWcsr700
-rw-r--r--usr/src/cmd/svc/prophist/prophist.c539
-rw-r--r--usr/src/cmd/svc/req.flg41
-rw-r--r--usr/src/cmd/svc/seed/Makefile153
-rw-r--r--usr/src/cmd/svc/seed/inc.flg36
-rw-r--r--usr/src/cmd/svc/shell/Makefile47
-rw-r--r--usr/src/cmd/svc/shell/fs_include.sh309
-rw-r--r--usr/src/cmd/svc/shell/krb_include.sh105
-rw-r--r--usr/src/cmd/svc/shell/net_include.sh583
-rw-r--r--usr/src/cmd/svc/shell/smf_include.sh159
-rw-r--r--usr/src/cmd/svc/startd/Makefile109
-rw-r--r--usr/src/cmd/svc/startd/contract.c373
-rw-r--r--usr/src/cmd/svc/startd/dict.c145
-rw-r--r--usr/src/cmd/svc/startd/env.c324
-rw-r--r--usr/src/cmd/svc/startd/expand.c646
-rw-r--r--usr/src/cmd/svc/startd/file.c86
-rw-r--r--usr/src/cmd/svc/startd/fork.c647
-rw-r--r--usr/src/cmd/svc/startd/graph.c5925
-rw-r--r--usr/src/cmd/svc/startd/libscf.c3844
-rw-r--r--usr/src/cmd/svc/startd/log.c656
-rw-r--r--usr/src/cmd/svc/startd/method.c1137
-rw-r--r--usr/src/cmd/svc/startd/misc.c170
-rw-r--r--usr/src/cmd/svc/startd/proc.c60
-rw-r--r--usr/src/cmd/svc/startd/protocol.c420
-rw-r--r--usr/src/cmd/svc/startd/protocol.h106
-rw-r--r--usr/src/cmd/svc/startd/restarter.c2308
-rw-r--r--usr/src/cmd/svc/startd/specials.c250
-rw-r--r--usr/src/cmd/svc/startd/startd.c925
-rw-r--r--usr/src/cmd/svc/startd/startd.h731
-rw-r--r--usr/src/cmd/svc/startd/utmpx.c420
-rw-r--r--usr/src/cmd/svc/startd/wait.c343
-rw-r--r--usr/src/cmd/svc/svcadm/Makefile58
-rw-r--r--usr/src/cmd/svc/svcadm/svcadm.c2370
-rw-r--r--usr/src/cmd/svc/svcadm/synch.c588
-rw-r--r--usr/src/cmd/svc/svccfg/Makefile159
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg.h362
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg.l229
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg.y452
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg_engine.c725
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg_help.c121
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg_internal.c1231
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg_libscf.c12018
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg_main.c235
-rw-r--r--usr/src/cmd/svc/svccfg/svccfg_xml.c1937
-rw-r--r--usr/src/cmd/svc/svcprop/Makefile55
-rw-r--r--usr/src/cmd/svc/svcprop/svcprop.c1117
-rw-r--r--usr/src/cmd/svc/svcs/Makefile59
-rw-r--r--usr/src/cmd/svc/svcs/explain.c2122
-rw-r--r--usr/src/cmd/svc/svcs/svcs.c2919
-rw-r--r--usr/src/cmd/svc/svcs/svcs.h66
288 files changed, 151896 insertions, 0 deletions
diff --git a/usr/src/cmd/svc/Makefile b/usr/src/cmd/svc/Makefile
new file mode 100644
index 0000000000..22f3ce003e
--- /dev/null
+++ b/usr/src/cmd/svc/Makefile
@@ -0,0 +1,80 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+include ../Makefile.cmd
+
+SUBDIR_CMD= lsvcrun mfstscan prophist svcadm svccfg svcprop svcs
+SUBDIR_DAEMON= configd startd
+SUBDIR_REPO= milestone profile seed
+SUBDIR_MISC= shell
+SUBDIRS= $(SUBDIR_CMD) $(SUBDIR_DAEMON) $(SUBDIR_REPO) $(SUBDIR_MISC)
+
+all := TARGET = all
+install := TARGET = install
+clean := TARGET = clean
+clobber := TARGET = clobber
+lint := TARGET = lint
+_msg := TARGET = _msg
+
+.KEEP_STATE:
+
+#
+# Definitions for DTDs
+#
+DTDS = dtd/service_bundle.dtd.1
+XMLDIR = $(ROOT)/usr/share/lib/xml
+ROOTDTDS = $(DTDS:%=$(XMLDIR)/%)
+
+$(ROOTDTDS) := FILEMODE = 444
+$(ROOTDTDS) := OWNER = root
+$(ROOTDTDS) := GROUP = bin
+
+#
+# Definitions for class action scripts
+#
+
+CLASSACTIONS = i.manifest r.manifest
+ROOTCLASSACTIONS = $(CLASSACTIONS:%=$(ROOT)/usr/sadm/install/scripts/%)
+
+install: $(ROOTDTDS) $(ROOTCLASSACTIONS)
+
+all install lint clean clobber: $(SUBDIRS)
+
+_msg: $(SUBDIR_CMD) startd
+
+seed: svccfg configd
+
+$(SUBDIRS): FRC
+ @cd $@; pwd; $(MAKE) $(MFLAGS) $(TARGET)
+
+$(XMLDIR)/%: %
+ $(INS.file)
+
+$(ROOT)/usr/sadm/install/scripts/%: $(SRC)/pkgdefs/common_files/%
+ $(INS.file)
+
+FRC:
diff --git a/usr/src/cmd/svc/Makefile.ctf b/usr/src/cmd/svc/Makefile.ctf
new file mode 100644
index 0000000000..f44a38389a
--- /dev/null
+++ b/usr/src/cmd/svc/Makefile.ctf
@@ -0,0 +1,35 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+CTFMERGE_HOOK = && $(CTFMERGE) -L VERSION -o $@ $(OBJS)
+CTFCONVERT_HOOK = && $(CTFCONVERT_O)
+CFLAGS += $(CTF_FLAGS)
+CFLAGS64 += $(CTF_FLAGS)
+NATIVE_CFLAGS += $(CTF_FLAGS)
+STRIPFLAG = # CTF requires the symbol table
+
diff --git a/usr/src/cmd/svc/common/configd_exit.h b/usr/src/cmd/svc/common/configd_exit.h
new file mode 100644
index 0000000000..bb3980d967
--- /dev/null
+++ b/usr/src/cmd/svc/common/configd_exit.h
@@ -0,0 +1,52 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _CONFIGD_EXIT_H
+#define _CONFIGD_EXIT_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum configd_exit_codes {
+ CONFIGD_EXIT_OKAY = 0,
+ CONFIGD_EXIT_BAD_ARGS = 2,
+ CONFIGD_EXIT_INIT_FAILED = 100,
+ CONFIGD_EXIT_DOOR_INIT_FAILED,
+ CONFIGD_EXIT_DATABASE_INIT_FAILED,
+ CONFIGD_EXIT_DATABASE_LOCKED,
+ CONFIGD_EXIT_DATABASE_BAD,
+ CONFIGD_EXIT_NO_THREADS,
+ CONFIGD_EXIT_LOST_MAIN_DOOR
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CONFIGD_EXIT_H */
diff --git a/usr/src/cmd/svc/common/manifest_hash.c b/usr/src/cmd/svc/common/manifest_hash.c
new file mode 100644
index 0000000000..1f33c47175
--- /dev/null
+++ b/usr/src/cmd/svc/common/manifest_hash.c
@@ -0,0 +1,492 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libuutil.h>
+#include <limits.h>
+#include <md5.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+
+#include <manifest_hash.h>
+
+/*
+ * Translate a file name to property name. Return an allocated string or NULL
+ * if realpath() fails.
+ */
+char *
+mhash_filename_to_propname(const char *in)
+{
+ char *out, *cp, *base;
+ size_t len, piece_len;
+
+ out = uu_zalloc(PATH_MAX + 1);
+ if (realpath(in, out) == NULL) {
+ uu_free(out);
+ return (NULL);
+ }
+
+ base = getenv("PKG_INSTALL_ROOT");
+
+ /*
+ * We copy-shift over the basedir and the leading slash, since it's
+ * not relevant to when we boot with this repository.
+ */
+
+ cp = out + ((base != NULL)? strlen(base) : 0);
+ if (*cp == '/')
+ cp++;
+ (void) memmove(out, cp, strlen(cp) + 1);
+
+ len = strlen(out);
+ if (len > scf_limit(SCF_LIMIT_MAX_NAME_LENGTH)) {
+ /* Use the first half and the second half. */
+ piece_len = (scf_limit(SCF_LIMIT_MAX_NAME_LENGTH) - 3) / 2;
+
+ (void) strncpy(out + piece_len, "__", 2);
+
+ (void) memmove(out + piece_len + 2, out + (len - piece_len),
+ piece_len + 1);
+ }
+
+ /*
+ * Translate non-property characters to '_', first making sure that
+ * we don't begin with '_'.
+ */
+
+ if (!isalpha(*out))
+ *out = 'A';
+
+ for (cp = out + 1; *cp != '\0'; ++cp) {
+ if (!(isalnum(*cp) || *cp == '_' || *cp == '-'))
+ *cp = '_';
+ }
+
+ return (out);
+}
+
+int
+mhash_retrieve_entry(scf_handle_t *hndl, const char *name, uchar_t *hash)
+{
+ scf_scope_t *scope;
+ scf_service_t *svc;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ ssize_t szret;
+ int result = 0;
+
+ /*
+ * In this implementation the hash for name is the opaque value of
+ * svc:/MHASH_SVC/:properties/name/MHASH_PROP
+ */
+
+ if ((scope = scf_scope_create(hndl)) == NULL ||
+ (svc = scf_service_create(hndl)) == NULL ||
+ (pg = scf_pg_create(hndl)) == NULL ||
+ (prop = scf_property_create(hndl)) == NULL ||
+ (val = scf_value_create(hndl)) == NULL) {
+ result = -1;
+ goto out;
+ }
+
+ if (scf_handle_get_local_scope(hndl, scope) < 0) {
+ result = -1;
+ goto out;
+ }
+
+ if (scf_scope_get_service(scope, MHASH_SVC, svc) < 0) {
+ result = -1;
+ goto out;
+ }
+
+ if (scf_service_get_pg(svc, name, pg) != SCF_SUCCESS) {
+ result = -1;
+ goto out;
+ }
+
+ if (scf_pg_get_property(pg, MHASH_PROP, prop) != SCF_SUCCESS) {
+ result = -1;
+ goto out;
+ }
+
+ if (scf_property_get_value(prop, val) != SCF_SUCCESS) {
+ result = -1;
+ goto out;
+ }
+
+ szret = scf_value_get_opaque(val, hash, MHASH_SIZE);
+ if (szret < 0) {
+ result = -1;
+ goto out;
+ }
+
+ if (szret != MHASH_SIZE) {
+ scf_value_destroy(val);
+ result = -1;
+ goto out;
+ }
+
+out:
+ (void) scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+
+ return (result);
+}
+
+int
+mhash_store_entry(scf_handle_t *hndl, const char *name, uchar_t *hash,
+ char **errstr)
+{
+ scf_scope_t *scope = NULL;
+ scf_service_t *svc = NULL;
+ scf_propertygroup_t *pg = NULL;
+ scf_property_t *prop = NULL;
+ scf_value_t *val = NULL;
+ scf_transaction_t *tx = NULL;
+ scf_transaction_entry_t *e = NULL;
+ int ret, result = 0;
+
+ int i;
+
+ if ((scope = scf_scope_create(hndl)) == NULL ||
+ (svc = scf_service_create(hndl)) == NULL ||
+ (pg = scf_pg_create(hndl)) == NULL ||
+ (prop = scf_property_create(hndl)) == NULL) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not create scf objects");
+ result = -1;
+ goto out;
+ }
+
+ if (scf_handle_get_local_scope(hndl, scope) != SCF_SUCCESS) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not get local scope");
+ result = -1;
+ goto out;
+ }
+
+ for (i = 0; i < 5; ++i) {
+ scf_error_t err;
+
+ if (scf_scope_get_service(scope, MHASH_SVC, svc) ==
+ SCF_SUCCESS)
+ break;
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not get manifest hash "
+ "service");
+ result = -1;
+ goto out;
+ }
+
+ if (scf_scope_add_service(scope, MHASH_SVC, svc) ==
+ SCF_SUCCESS)
+ break;
+
+ err = scf_error();
+
+ if (err == SCF_ERROR_EXISTS)
+ /* Try again. */
+ continue;
+ else if (err == SCF_ERROR_PERMISSION_DENIED) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "permission denied.\n");
+ result = -1;
+ goto out;
+ }
+
+ if (errstr != NULL)
+ *errstr = gettext("Could not add manifest hash "
+ "service");
+ result = -1;
+ goto out;
+ }
+
+ if (i == 5) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "service addition contention.\n");
+ result = -1;
+ goto out;
+ }
+
+ for (i = 0; i < 5; ++i) {
+ scf_error_t err;
+
+ if (scf_service_get_pg(svc, name, pg) == SCF_SUCCESS)
+ break;
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not get service's "
+ "hash record)");
+ result = -1;
+ goto out;
+ }
+
+ if (scf_service_add_pg(svc, name, MHASH_PG_TYPE,
+ MHASH_PG_FLAGS, pg) == SCF_SUCCESS)
+ break;
+
+ err = scf_error();
+
+ if (err == SCF_ERROR_EXISTS)
+ /* Try again. */
+ continue;
+ else if (err == SCF_ERROR_PERMISSION_DENIED) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "permission denied.\n");
+ result = -1;
+ goto out;
+ }
+
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash");
+ result = -1;
+ goto out;
+ }
+ if (i == 5) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "property group addition contention.\n");
+ result = -1;
+ goto out;
+ }
+
+ if ((e = scf_entry_create(hndl)) == NULL ||
+ (val = scf_value_create(hndl)) == NULL) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "permission denied.\n");
+ result = -1;
+ goto out;
+ }
+
+ ret = scf_value_set_opaque(val, hash, MHASH_SIZE);
+ assert(ret == SCF_SUCCESS);
+
+ tx = scf_transaction_create(hndl);
+ if (tx == NULL) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not create transaction");
+ result = -1;
+ goto out;
+ }
+
+ do {
+ if (scf_pg_update(pg) == -1) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not update hash "
+ "entry");
+ result = -1;
+ goto out;
+ }
+ if (scf_transaction_start(tx, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not start "
+ "hash transaction.\n");
+ result = -1;
+ goto out;
+ }
+
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "permission denied.\n");
+ result = -1;
+
+ scf_transaction_destroy(tx);
+ (void) scf_entry_destroy(e);
+ goto out;
+ }
+
+ if (scf_transaction_property_new(tx, e, MHASH_PROP,
+ SCF_TYPE_OPAQUE) != SCF_SUCCESS &&
+ scf_transaction_property_change_type(tx, e, MHASH_PROP,
+ SCF_TYPE_OPAQUE) != SCF_SUCCESS) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not modify hash "
+ "entry");
+ result = -1;
+ goto out;
+ }
+
+ ret = scf_entry_add_value(e, val);
+ assert(ret == SCF_SUCCESS);
+
+ ret = scf_transaction_commit(tx);
+
+ if (ret == 0)
+ scf_transaction_reset(tx);
+ } while (ret == 0);
+
+ if (ret < 0) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED) {
+ if (errstr != NULL)
+ *errstr = gettext("Could not store file hash: "
+ "permission denied.\n");
+ result = -1;
+ goto out;
+ }
+
+ if (errstr != NULL)
+ *errstr = gettext("Could not commit transaction");
+ result = -1;
+ }
+
+ scf_transaction_destroy(tx);
+ (void) scf_entry_destroy(e);
+
+out:
+ (void) scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+
+ return (result);
+}
+
+/*
+ * int mhash_test_file(scf_handle_t *, const char *, uint_t, char **, uchar_t *)
+ * Test the given filename against the hashed metadata in the repository.
+ * The behaviours for import and apply are slightly different. For imports,
+ * if the hash value is absent or different, then the import operation
+ * continues. For profile application, the operation continues only if the
+ * hash value for the file is absent.
+ *
+ * Return non-zero if we should skip the file because it is unchanged or
+ * nonexistent.
+ */
+int
+mhash_test_file(scf_handle_t *hndl, const char *file, uint_t is_profile,
+ char **pnamep, uchar_t *hash)
+{
+ boolean_t do_hash;
+ struct stat64 st;
+ char *cp;
+ char *data;
+ uchar_t stored_hash[MHASH_SIZE];
+ char *pname;
+ int ret;
+
+ /*
+ * In the case where we are doing automated imports, we reduce the UID,
+ * the GID, the size, and the mtime into a string (to eliminate
+ * endianness) which we then make opaque as a single MD5 digest.
+ *
+ * The previous hash was composed of the inode number, the UID, the file
+ * size, and the mtime. This formulation was found to be insufficiently
+ * portable for use in highly replicated deployments. The current
+ * algorithm will allow matches of this "v1" hash, but always returns
+ * the effective "v2" hash, such that updates result in the more
+ * portable hash being used.
+ *
+ * If neither calculated digest matches the stored value, we consider
+ * the test to have failed, implying that some aspect of the manifest
+ * has changed.
+ */
+
+ cp = getenv("SVCCFG_CHECKHASH");
+ do_hash = (cp != NULL && *cp != '\0');
+ if (!do_hash) {
+ *pnamep = NULL;
+ return (0);
+ }
+
+ do
+ ret = stat64(file, &st);
+ while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ return (-1);
+ }
+
+ data = uu_msprintf(MHASH_FORMAT_V2, st.st_uid, st.st_gid,
+ st.st_size, st.st_mtime);
+ if (data == NULL) {
+ return (-1);
+ }
+
+ md5_calc(hash, (uchar_t *)data, strlen(data));
+
+ uu_free(data);
+
+ pname = mhash_filename_to_propname(file);
+ if (pname == NULL)
+ return (-1);
+
+ if (mhash_retrieve_entry(hndl, pname, stored_hash) == 0) {
+ uchar_t hash_v1[MHASH_SIZE];
+
+ if (is_profile) {
+ return (1);
+ }
+
+ /*
+ * Manifest import.
+ */
+ if (memcmp(hash, stored_hash, MHASH_SIZE) == 0)
+ return (1);
+
+ /*
+ * No match on V2 hash; compare V1 hash.
+ */
+ data = uu_msprintf(MHASH_FORMAT_V1, st.st_ino, st.st_uid,
+ st.st_size, st.st_mtime);
+ if (data == NULL)
+ return (-1);
+
+ md5_calc(hash_v1, (uchar_t *)data, strlen(data));
+
+ uu_free(data);
+
+ if (memcmp(hash_v1, stored_hash, MHASH_SIZE) == 0)
+ return (1);
+ }
+
+ *pnamep = pname;
+
+ return (0);
+}
diff --git a/usr/src/cmd/svc/common/manifest_hash.h b/usr/src/cmd/svc/common/manifest_hash.h
new file mode 100644
index 0000000000..eb49ed4371
--- /dev/null
+++ b/usr/src/cmd/svc/common/manifest_hash.h
@@ -0,0 +1,58 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _MANIFEST_HASH_H
+#define _MANIFEST_HASH_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <libscf.h>
+#include <md5.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MHASH_SIZE MD5_DIGEST_LENGTH
+#define MHASH_SVC "smf/manifest"
+#define MHASH_PG_TYPE "framework"
+#define MHASH_PG_FLAGS 0
+#define MHASH_PROP "md5sum"
+
+#define MHASH_FORMAT_V1 "%llx%lx%llx%lx"
+#define MHASH_FORMAT_V2 "%lx%lx%llx%lx"
+
+char *mhash_filename_to_propname(const char *);
+int mhash_retrieve_entry(scf_handle_t *, const char *, uchar_t *);
+int mhash_store_entry(scf_handle_t *, const char *, uchar_t *, char **);
+int mhash_test_file(scf_handle_t *, const char *, uint_t, char **, uchar_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MANIFEST_HASH_H */
diff --git a/usr/src/cmd/svc/configd/Makefile b/usr/src/cmd/svc/configd/Makefile
new file mode 100644
index 0000000000..72a098711a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/Makefile
@@ -0,0 +1,140 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+MYPROG = svc.configd
+MYOBJS = \
+ backend.o \
+ configd.o \
+ client.o \
+ file_object.o \
+ maindoor.o \
+ object.o \
+ rc_node.o \
+ snapshot.o
+
+PROG = $(MYPROG)
+OBJS = $(MYOBJS)
+
+SRCS = $(MYOBJS:%.o=%.c)
+
+include ../../Makefile.cmd
+include ../Makefile.ctf
+
+NATIVE_BUILD=$(POUND_SIGN)
+$(NATIVE_BUILD)PROG = $(MYPROG:%=%-native)
+$(NATIVE_BUILD)OBJS = $(MYOBJS:%.o=%-native.o)
+
+ROOTCMDDIR= $(ROOT)/lib/svc/bin
+
+MYCPPFLAGS = -I. -I../common -I../../../common/svc -D_REENTRANT
+CPPFLAGS += $(MYCPPFLAGS)
+CFLAGS += -v
+MYLDLIBS = -lrt -lumem -luutil
+LDLIBS += -lsecdb $(MYLDLIBS)
+LINTFLAGS += -errtags -erroff=E_BAD_FORMAT_ARG_TYPE2
+
+CLOBBERFILES += $(MYPROG:%=%-native)
+
+LIBUUTIL = $(SRC)/lib/libuutil
+LIBSCF = $(SRC)/lib/libscf
+
+SCRIPTFILE = restore_repository
+ROOTSCRIPTFILE = $(ROOTCMDDIR)/$(SCRIPTFILE)
+
+$(NATIVE_BUILD)CC = $(NATIVECC)
+$(NATIVE_BUILD)LD = $(NATIVELD)
+$(NATIVE_BUILD)CFLAGS = $(NATIVE_CFLAGS)
+$(NATIVE_BUILD)CPPFLAGS = $(MYCPPFLAGS) -I$(LIBUUTIL)/common -I$(LIBSCF)/inc
+$(NATIVE_BUILD)CPPFLAGS += -DNATIVE_BUILD
+$(NATIVE_BUILD)LDFLAGS =
+$(NATIVE_BUILD)LDLIBS = -L$(LIBUUTIL)/native -R $(LIBUUTIL)/native \
+ $(MYLDLIBS) -ldoor
+
+DIRMODE = 0755
+FILEMODE = 0555
+OWNER = root
+GROUP = sys
+
+SQLITEDIR = sqlite
+LIBSQLITE = $(SQLITEDIR)/libsqlite.o
+$(NATIVE_BUILD)LIBSQLITE = $(SQLITEDIR)/libsqlite-native.o
+SQLITELINT = $(SQLITEDIR)/llib-lsqlite.ln
+
+OBJS += $(LIBSQLITE)
+
+install := TARGET = install
+clobber := TARGET = clobber
+
+lint := LDLIBS += $(SQLITELINT)
+
+.KEEP_STATE:
+.PARALLEL: $(MYOBJS) $(MYOBJS:%.o=%-native.o)
+
+all: $(PROG)
+
+native: FRC
+ @cd $(LIBUUTIL)/native; pwd; $(MAKE) $(MFLAGS) install
+ @NATIVE_BUILD= $(MAKE) $(MFLAGS) all
+
+$(SQLITEDIR): FRC
+ @cd $(SQLITEDIR); pwd; $(MAKE) $(TARGET)
+
+$(SQLITEDIR)/libsqlite.o: FRC
+ @cd $(SQLITEDIR); pwd; $(MAKE) all
+
+$(SQLITEDIR)/libsqlite-native.o: FRC
+ @cd $(SQLITEDIR); pwd; $(MAKE) native
+
+$(SQLITEDIR)/llib-lsqlite.ln: FRC
+ @cd $(SQLITEDIR); pwd; $(MAKE) llib-lsqlite.ln
+
+$(PROG): $(LIBSQLITE) $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS) $(CTFMERGE_HOOK)
+ $(POST_PROCESS)
+
+%-native.o: %.c
+ $(COMPILE.c) -o $@ $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+$(ROOTCMDDIR)/%: %.sh
+ $(INS.rename)
+
+install: all $(SQLITEDIR) $(ROOTCMD) $(ROOTVARSADMFILE) $(ROOTSCRIPTFILE)
+
+clean: FRC
+ $(RM) $(MYOBJS) $(MYOBJS:%.o=%-native.o)
+
+clobber: $(SQLITEDIR)
+
+lint: lint_SRCS
+
+lint_SRCS: $(SQLITELINT)
+
+include ../../Makefile.targ
+
+FRC:
diff --git a/usr/src/cmd/svc/configd/backend.c b/usr/src/cmd/svc/configd/backend.c
new file mode 100644
index 0000000000..141df0c168
--- /dev/null
+++ b/usr/src/cmd/svc/configd/backend.c
@@ -0,0 +1,1950 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <door.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <zone.h>
+
+#include "configd.h"
+#include "repcache_protocol.h"
+
+#include "sqlite/sqlite.h"
+#include "sqlite/sqlite-misc.h"
+
+/*
+ * This file has two purposes:
+ *
+ * 1. It contains the database schema, and the code for setting up our backend
+ * databases, including installing said schema.
+ *
+ * 2. It provides a simplified interface to the SQL database library, and
+ * synchronizes MT access to the database.
+ */
+
+typedef struct backend_spent {
+ uint64_t bs_count;
+ hrtime_t bs_time;
+ hrtime_t bs_vtime;
+} backend_spent_t;
+
+typedef struct backend_totals {
+ backend_spent_t bt_lock; /* waiting for lock */
+ backend_spent_t bt_exec; /* time spent executing SQL */
+} backend_totals_t;
+
+typedef struct sqlite_backend {
+ pthread_mutex_t be_lock;
+ pthread_t be_thread; /* thread holding lock */
+ struct sqlite *be_db;
+ const char *be_path; /* path to db */
+ int be_readonly; /* backend is read-only */
+ int be_writing; /* held for writing */
+ backend_type_t be_type; /* type of db */
+ backend_totals_t be_totals[2]; /* one for reading, one for writing */
+} sqlite_backend_t;
+
+struct backend_tx {
+ sqlite_backend_t *bt_be;
+ int bt_readonly;
+ int bt_type;
+ int bt_full; /* SQLITE_FULL during tx */
+};
+
+#define UPDATE_TOTALS_WR(sb, writing, field, ts, vts) { \
+ backend_spent_t *__bsp = &(sb)->be_totals[!!(writing)].field; \
+ __bsp->bs_count++; \
+ __bsp->bs_time += (gethrtime() - ts); \
+ __bsp->bs_vtime += (gethrvtime() - vts); \
+}
+
+#define UPDATE_TOTALS(sb, field, ts, vts) \
+ UPDATE_TOTALS_WR(sb, (sb)->be_writing, field, ts, vts)
+
+struct backend_query {
+ char *bq_buf;
+ size_t bq_size;
+};
+
+struct backend_tbl_info {
+ const char *bti_name;
+ const char *bti_cols;
+};
+
+struct backend_idx_info {
+ const char *bxi_tbl;
+ const char *bxi_idx;
+ const char *bxi_cols;
+};
+
+static pthread_mutex_t backend_panic_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t backend_panic_cv = PTHREAD_COND_INITIALIZER;
+pthread_t backend_panic_thread = 0;
+
+int backend_do_trace = 0; /* invoke tracing callback */
+int backend_print_trace = 0; /* tracing callback prints SQL */
+int backend_panic_abort = 0; /* abort when panicking */
+
+/*
+ * Any change to the below schema should bump the version number
+ */
+#define BACKEND_SCHEMA_VERSION 5
+
+static struct backend_tbl_info tbls_normal[] = { /* BACKEND_TYPE_NORMAL */
+ /*
+ * service_tbl holds all services. svc_id is the identifier of the
+ * service.
+ */
+ {
+ "service_tbl",
+ "svc_id INTEGER PRIMARY KEY,"
+ "svc_name CHAR(256) NOT NULL"
+ },
+
+ /*
+ * instance_tbl holds all of the instances. The parent service id
+ * is instance_svc.
+ */
+ {
+ "instance_tbl",
+ "instance_id INTEGER PRIMARY KEY,"
+ "instance_name CHAR(256) NOT NULL,"
+ "instance_svc INTEGER NOT NULL"
+ },
+
+ /*
+ * snapshot_lnk_tbl links (instance, snapshot name) with snapshots.
+ */
+ {
+ "snapshot_lnk_tbl",
+ "lnk_id INTEGER PRIMARY KEY,"
+ "lnk_inst_id INTEGER NOT NULL,"
+ "lnk_snap_name CHAR(256) NOT NULL,"
+ "lnk_snap_id INTEGER NOT NULL"
+ },
+
+ /*
+ * snaplevel_tbl maps a snapshot id to a set of named, ordered
+ * snaplevels.
+ */
+ {
+ "snaplevel_tbl",
+ "snap_id INTEGER NOT NULL,"
+ "snap_level_num INTEGER NOT NULL,"
+ "snap_level_id INTEGER NOT NULL,"
+ "snap_level_service_id INTEGER NOT NULL,"
+ "snap_level_service CHAR(256) NOT NULL,"
+ "snap_level_instance_id INTEGER NULL,"
+ "snap_level_instance CHAR(256) NULL"
+ },
+
+ /*
+ * snaplevel_lnk_tbl links snaplevels to property groups.
+ * snaplvl_pg_* is identical to the original property group,
+ * and snaplvl_gen_id overrides the generation number.
+ * The service/instance ids are as in the snaplevel.
+ */
+ {
+ "snaplevel_lnk_tbl",
+ "snaplvl_level_id INTEGER NOT NULL,"
+ "snaplvl_pg_id INTEGER NOT NULL,"
+ "snaplvl_pg_name CHAR(256) NOT NULL,"
+ "snaplvl_pg_type CHAR(256) NOT NULL,"
+ "snaplvl_pg_flags INTEGER NOT NULL,"
+ "snaplvl_gen_id INTEGER NOT NULL"
+ },
+
+ { NULL, NULL }
+};
+
+static struct backend_idx_info idxs_normal[] = { /* BACKEND_TYPE_NORMAL */
+ { "service_tbl", "name", "svc_name" },
+ { "instance_tbl", "name", "instance_svc, instance_name" },
+ { "snapshot_lnk_tbl", "name", "lnk_inst_id, lnk_snap_name" },
+ { "snapshot_lnk_tbl", "snapid", "lnk_snap_id" },
+ { "snaplevel_tbl", "id", "snap_id" },
+ { "snaplevel_lnk_tbl", "id", "snaplvl_pg_id" },
+ { "snaplevel_lnk_tbl", "level", "snaplvl_level_id" },
+ { NULL, NULL, NULL }
+};
+
+static struct backend_tbl_info tbls_np[] = { /* BACKEND_TYPE_NONPERSIST */
+ { NULL, NULL }
+};
+
+static struct backend_idx_info idxs_np[] = { /* BACKEND_TYPE_NONPERSIST */
+ { NULL, NULL, NULL }
+};
+
+static struct backend_tbl_info tbls_common[] = { /* all backend types */
+ /*
+ * pg_tbl defines property groups. They are associated with a single
+ * service or instance. The pg_gen_id links them with the latest
+ * "edited" version of its properties.
+ */
+ {
+ "pg_tbl",
+ "pg_id INTEGER PRIMARY KEY,"
+ "pg_parent_id INTEGER NOT NULL,"
+ "pg_name CHAR(256) NOT NULL,"
+ "pg_type CHAR(256) NOT NULL,"
+ "pg_flags INTEGER NOT NULL,"
+ "pg_gen_id INTEGER NOT NULL"
+ },
+
+ /*
+ * prop_lnk_tbl links a particular pg_id and gen_id to a set of
+ * (prop_name, prop_type, val_id) trios.
+ */
+ {
+ "prop_lnk_tbl",
+ "lnk_prop_id INTEGER PRIMARY KEY,"
+ "lnk_pg_id INTEGER NOT NULL,"
+ "lnk_gen_id INTEGER NOT NULL,"
+ "lnk_prop_name CHAR(256) NOT NULL,"
+ "lnk_prop_type CHAR(2) NOT NULL,"
+ "lnk_val_id INTEGER"
+ },
+
+ /*
+ * value_tbl maps a value_id to a set of values. For any given
+ * value_id, value_type is constant.
+ */
+ {
+ "value_tbl",
+ "value_id INTEGER NOT NULL,"
+ "value_type CHAR(1) NOT NULL,"
+ "value_value VARCHAR NOT NULL"
+ },
+
+ /*
+ * id_tbl has one row per id space
+ */
+ {
+ "id_tbl",
+ "id_name STRING NOT NULL,"
+ "id_next INTEGER NOT NULL"
+ },
+
+ /*
+ * schema_version has a single row, which contains
+ * BACKEND_SCHEMA_VERSION at the time of creation.
+ */
+ {
+ "schema_version",
+ "schema_version INTEGER"
+ },
+ { NULL, NULL }
+};
+
+static struct backend_idx_info idxs_common[] = { /* all backend types */
+ { "pg_tbl", "parent", "pg_parent_id" },
+ { "pg_tbl", "name", "pg_parent_id, pg_name" },
+ { "pg_tbl", "type", "pg_parent_id, pg_type" },
+ { "prop_lnk_tbl", "base", "lnk_pg_id, lnk_gen_id" },
+ { "prop_lnk_tbl", "val", "lnk_val_id" },
+ { "value_tbl", "id", "value_id" },
+ { "id_tbl", "id", "id_name" },
+ { NULL, NULL, NULL }
+};
+
+struct run_single_int_info {
+ uint32_t *rs_out;
+ int rs_result;
+};
+
+/*ARGSUSED*/
+static int
+run_single_int_callback(void *arg, int columns, char **vals, char **names)
+{
+ struct run_single_int_info *info = arg;
+ uint32_t val;
+
+ char *endptr = vals[0];
+
+ assert(info->rs_result != REP_PROTOCOL_SUCCESS);
+ assert(columns == 1);
+
+ if (vals[0] == NULL)
+ return (BACKEND_CALLBACK_CONTINUE);
+
+ errno = 0;
+ val = strtoul(vals[0], &endptr, 10);
+ if ((val == 0 && endptr == vals[0]) || *endptr != 0 || errno != 0)
+ backend_panic("malformed integer \"%20s\"", vals[0]);
+
+ *info->rs_out = val;
+ info->rs_result = REP_PROTOCOL_SUCCESS;
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*ARGSUSED*/
+int
+backend_fail_if_seen(void *arg, int columns, char **vals, char **names)
+{
+ return (BACKEND_CALLBACK_ABORT);
+}
+
+static int
+backend_is_readonly(struct sqlite *db, char **errp)
+{
+ int r = sqlite_exec(db,
+ "BEGIN TRANSACTION; "
+ "UPDATE schema_version SET schema_version = schema_version; ",
+ NULL, NULL, errp);
+
+ (void) sqlite_exec(db, "ROLLBACK TRANSACTION", NULL, NULL, NULL);
+ return (r);
+}
+
+static void
+backend_trace_sql(void *arg, const char *sql)
+{
+ sqlite_backend_t *be = arg;
+
+ if (backend_print_trace) {
+ (void) fprintf(stderr, "%d: %s\n", be->be_type, sql);
+ }
+}
+
+static sqlite_backend_t be_info[BACKEND_TYPE_TOTAL];
+static sqlite_backend_t *bes[BACKEND_TYPE_TOTAL];
+
+#define BACKEND_PANIC_TIMEOUT (50 * MILLISEC)
+/*
+ * backend_panic() -- some kind of database problem or corruption has been hit.
+ * We attempt to quiesce the other database users -- all of the backend sql
+ * entry points will call backend_panic(NULL) if a panic is in progress, as
+ * will any attempt to start a transaction.
+ *
+ * We give threads holding a backend lock 50ms (BACKEND_PANIC_TIMEOUT) to
+ * either drop the lock or call backend_panic(). If they don't respond in
+ * time, we'll just exit anyway.
+ */
+void
+backend_panic(const char *format, ...)
+{
+ int i;
+ va_list args;
+ int failed = 0;
+
+ (void) pthread_mutex_lock(&backend_panic_lock);
+ if (backend_panic_thread != 0) {
+ (void) pthread_mutex_unlock(&backend_panic_lock);
+ /*
+ * first, drop any backend locks we're holding, then
+ * sleep forever on the panic_cv.
+ */
+ for (i = 0; i < BACKEND_TYPE_TOTAL; i++) {
+ if (bes[i] != NULL &&
+ bes[i]->be_thread == pthread_self())
+ (void) pthread_mutex_unlock(&bes[i]->be_lock);
+ }
+ (void) pthread_mutex_lock(&backend_panic_lock);
+ for (;;)
+ (void) pthread_cond_wait(&backend_panic_cv,
+ &backend_panic_lock);
+ }
+ backend_panic_thread = pthread_self();
+ (void) pthread_mutex_unlock(&backend_panic_lock);
+
+ for (i = 0; i < BACKEND_TYPE_TOTAL; i++) {
+ if (bes[i] != NULL && bes[i]->be_thread == pthread_self())
+ (void) pthread_mutex_unlock(&bes[i]->be_lock);
+ }
+
+ va_start(args, format);
+ configd_vcritical(format, args);
+ va_end(args);
+
+ for (i = 0; i < BACKEND_TYPE_TOTAL; i++) {
+ timespec_t rel;
+
+ rel.tv_sec = 0;
+ rel.tv_nsec = BACKEND_PANIC_TIMEOUT;
+
+ if (bes[i] != NULL && bes[i]->be_thread != pthread_self()) {
+ if (pthread_mutex_reltimedlock_np(&bes[i]->be_lock,
+ &rel) != 0)
+ failed++;
+ }
+ }
+ if (failed) {
+ configd_critical("unable to quiesce database\n");
+ }
+
+ if (backend_panic_abort)
+ abort();
+
+ exit(CONFIGD_EXIT_DATABASE_BAD);
+}
+
+/*
+ * Returns
+ * _SUCCESS
+ * _DONE - callback aborted query
+ * _NO_RESOURCES - out of memory (_FULL & _TOOBIG?)
+ */
+static int
+backend_error(sqlite_backend_t *be, int error, char *errmsg)
+{
+ if (error == SQLITE_OK)
+ return (REP_PROTOCOL_SUCCESS);
+
+ switch (error) {
+ case SQLITE_ABORT:
+ free(errmsg);
+ return (REP_PROTOCOL_DONE);
+
+ case SQLITE_NOMEM:
+ case SQLITE_FULL:
+ case SQLITE_TOOBIG:
+ free(errmsg);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ default:
+ backend_panic("%s: db error: %s", be->be_path, errmsg);
+ /*NOTREACHED*/
+ }
+}
+
+static void
+backend_backup_cleanup(const char **out_arg, ssize_t out_sz)
+{
+ char **out = (char **)out_arg;
+
+ while (out_sz-- > 0)
+ free(*out++);
+ free(out_arg);
+}
+
+/*
+ * builds a inverse-time-sorted array of backup files. The path is a
+ * a single buffer, and the pointers look like:
+ *
+ * /this/is/a/full/path/to/repository-name-YYYYMMDDHHMMSS
+ * ^pathname ^ ^(pathname+pathlen)
+ * basename
+ *
+ * dirname will either be pathname, or ".".
+ *
+ * Returns the number of elements in the array, 0 if there are no previous
+ * backups, or -1 on error.
+ */
+static ssize_t
+backend_backup_get_prev(char *pathname, size_t pathlen, const char ***out_arg)
+{
+ char b_start, b_end;
+ DIR *dir;
+ char **out = NULL;
+ char *name, *p;
+ char *dirname, *basename;
+ char *pathend;
+ struct dirent *ent;
+
+ size_t count = 0;
+ size_t baselen;
+
+ /*
+ * year, month, day, hour, min, sec, plus an '_'.
+ */
+ const size_t ndigits = 4 + 5*2 + 1;
+ const size_t baroffset = 4 + 2*2;
+
+ size_t idx;
+
+ pathend = pathname + pathlen;
+ b_end = *pathend;
+ *pathend = '\0';
+
+ basename = strrchr(pathname, '/');
+
+ if (basename != NULL) {
+ assert(pathend > pathname && basename < pathend);
+ basename++;
+ dirname = pathname;
+ } else {
+ basename = pathname;
+ dirname = ".";
+ }
+
+ baselen = strlen(basename);
+
+ /*
+ * munge the string temporarily for the opendir(), then restore it.
+ */
+ b_start = basename[0];
+
+ basename[0] = '\0';
+ dir = opendir(dirname);
+ basename[0] = b_start; /* restore path */
+
+ if (dir == NULL)
+ goto fail;
+
+
+ while ((ent = readdir(dir)) != NULL) {
+ /*
+ * Must match:
+ * basename-YYYYMMDD_HHMMSS
+ * or we ignore it.
+ */
+ if (strncmp(ent->d_name, basename, baselen) != 0)
+ continue;
+
+ name = ent->d_name;
+ if (name[baselen] != '-')
+ continue;
+
+ p = name + baselen + 1;
+
+ for (idx = 0; idx < ndigits; idx++) {
+ char c = p[idx];
+ if (idx == baroffset && c != '_')
+ break;
+ if (idx != baroffset && (c < '0' || c > '9'))
+ break;
+ }
+ if (idx != ndigits || p[idx] != '\0')
+ continue;
+
+ /*
+ * We have a match. insertion-sort it into our list.
+ */
+ name = strdup(name);
+ if (name == NULL)
+ goto fail_closedir;
+ p = strrchr(name, '-');
+
+ for (idx = 0; idx < count; idx++) {
+ char *tmp = out[idx];
+ char *tp = strrchr(tmp, '-');
+
+ int cmp = strcmp(p, tp);
+ if (cmp == 0)
+ cmp = strcmp(name, tmp);
+
+ if (cmp == 0) {
+ free(name);
+ name = NULL;
+ break;
+ } else if (cmp > 0) {
+ out[idx] = name;
+ name = tmp;
+ p = tp;
+ }
+ }
+
+ if (idx == count) {
+ char **new_out = realloc(out,
+ (count + 1) * sizeof (*out));
+
+ if (new_out == NULL) {
+ free(name);
+ goto fail_closedir;
+ }
+
+ out = new_out;
+ out[count++] = name;
+ } else {
+ assert(name == NULL);
+ }
+ }
+ (void) closedir(dir);
+
+ basename[baselen] = b_end;
+
+ *out_arg = (const char **)out;
+ return (count);
+
+fail_closedir:
+ (void) closedir(dir);
+fail:
+ basename[0] = b_start;
+ *pathend = b_end;
+
+ backend_backup_cleanup((const char **)out, count);
+
+ *out_arg = NULL;
+ return (-1);
+}
+
+/*
+ * Copies the repository path into out, a buffer of out_len bytes,
+ * removes the ".db" (or whatever) extension, and, if name is non-NULL,
+ * appends "-name" to it. If name is non-NULL, it can fail with:
+ *
+ * _TRUNCATED will not fit in buffer.
+ * _BAD_REQUEST name is not a valid identifier
+ */
+static rep_protocol_responseid_t
+backend_backup_base(sqlite_backend_t *be, const char *name,
+ char *out, size_t out_len)
+{
+ char *p, *q;
+ size_t len;
+
+ /*
+ * for paths of the form /path/to/foo.db, we truncate at the final
+ * '.'.
+ */
+ (void) strlcpy(out, be->be_path, out_len);
+
+ p = strrchr(out, '/');
+ q = strrchr(out, '.');
+
+ if (p != NULL && q != NULL && q > p)
+ *q = 0;
+
+ if (name != NULL) {
+ len = strlen(out);
+ assert(len < out_len);
+
+ out += len;
+ out_len -= len;
+
+ len = strlen(name);
+
+ /*
+ * verify that the name tag is entirely alphabetic,
+ * non-empty, and not too long.
+ */
+ if (len == 0 || len >= REP_PROTOCOL_NAME_LEN ||
+ uu_check_name(name, UU_NAME_DOMAIN) < 0)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ if (snprintf(out, out_len, "-%s", name) >= out_len)
+ return (REP_PROTOCOL_FAIL_TRUNCATED);
+ }
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Can return:
+ * _BAD_REQUEST name is not valid
+ * _TRUNCATED name is too long for current repository path
+ * _UNKNOWN failed for unknown reason (details written to
+ * console)
+ * _BACKEND_READONLY backend is not writable
+ *
+ * _SUCCESS Backup completed successfully.
+ */
+static rep_protocol_responseid_t
+backend_create_backup_locked(sqlite_backend_t *be, const char *name)
+{
+ const char **old_list;
+ ssize_t old_sz;
+ ssize_t old_max = max_repository_backups;
+ ssize_t cur;
+
+ char *finalname;
+
+ char finalpath[PATH_MAX];
+ char tmppath[PATH_MAX];
+ char buf[8192];
+ int infd, outfd;
+ size_t len;
+ off_t inlen, outlen, offset;
+
+ time_t now;
+ struct tm now_tm;
+
+ rep_protocol_responseid_t result;
+
+ if (be->be_readonly)
+ return (REP_PROTOCOL_FAIL_BACKEND_READONLY);
+
+ result = backend_backup_base(be, name, finalpath, sizeof (finalpath));
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ /*
+ * remember the original length, and the basename location
+ */
+ len = strlen(finalpath);
+ finalname = strrchr(finalpath, '/');
+ if (finalname != NULL)
+ finalname++;
+ else
+ finalname = finalpath;
+
+ (void) strlcpy(tmppath, finalpath, sizeof (tmppath));
+ if (strlcat(tmppath, "-tmpXXXXXX", sizeof (tmppath)) >=
+ sizeof (tmppath))
+ return (REP_PROTOCOL_FAIL_TRUNCATED);
+
+ now = time(NULL);
+ if (localtime_r(&now, &now_tm) == NULL) {
+ configd_critical(
+ "\"%s\" backup failed: localtime(3C) failed: %s\n", name,
+ be->be_path, strerror(errno));
+ return (REP_PROTOCOL_FAIL_UNKNOWN);
+ }
+
+ if (strftime(finalpath + len, sizeof (finalpath) - len,
+ "-%Y""%m""%d""_""%H""%M""%S", &now_tm) >=
+ sizeof (finalpath) - len) {
+ return (REP_PROTOCOL_FAIL_TRUNCATED);
+ }
+
+ infd = open(be->be_path, O_RDONLY);
+ if (infd < 0) {
+ configd_critical("\"%s\" backup failed: opening %s: %s\n", name,
+ be->be_path, strerror(errno));
+ return (REP_PROTOCOL_FAIL_UNKNOWN);
+ }
+
+ outfd = mkstemp(tmppath);
+ if (outfd < 0) {
+ configd_critical("\"%s\" backup failed: mkstemp(%s): %s\n",
+ name, tmppath, strerror(errno));
+ (void) close(infd);
+ return (REP_PROTOCOL_FAIL_UNKNOWN);
+ }
+
+ for (;;) {
+ do {
+ inlen = read(infd, buf, sizeof (buf));
+ } while (inlen < 0 && errno == EINTR);
+
+ if (inlen <= 0)
+ break;
+
+ for (offset = 0; offset < inlen; offset += outlen) {
+ do {
+ outlen = write(outfd, buf + offset,
+ inlen - offset);
+ } while (outlen < 0 && errno == EINTR);
+
+ if (outlen >= 0)
+ continue;
+
+ configd_critical(
+ "\"%s\" backup failed: write to %s: %s\n",
+ name, tmppath, strerror(errno));
+ result = REP_PROTOCOL_FAIL_UNKNOWN;
+ goto fail;
+ }
+ }
+
+ if (inlen < 0) {
+ configd_critical(
+ "\"%s\" backup failed: read from %s: %s\n",
+ name, be->be_path, strerror(errno));
+ goto fail;
+ }
+
+ /*
+ * grab the old list before doing our re-name.
+ */
+ if (old_max > 0)
+ old_sz = backend_backup_get_prev(finalpath, len, &old_list);
+
+ if (rename(tmppath, finalpath) < 0) {
+ configd_critical(
+ "\"%s\" backup failed: rename(%s, %s): %s\n",
+ name, tmppath, finalpath, strerror(errno));
+ result = REP_PROTOCOL_FAIL_UNKNOWN;
+ goto fail;
+ }
+
+ tmppath[len] = 0; /* strip -XXXXXX, for reference symlink */
+
+ (void) unlink(tmppath);
+ if (symlink(finalname, tmppath) < 0) {
+ configd_critical(
+ "\"%s\" backup completed, but updating "
+ "\"%s\" symlink to \"%s\" failed: %s\n",
+ name, tmppath, finalname, strerror(errno));
+ }
+
+ if (old_max > 0 && old_sz > 0) {
+ /* unlink all but the first (old_max - 1) files */
+ for (cur = old_max - 1; cur < old_sz; cur++) {
+ (void) strlcpy(finalname, old_list[cur],
+ sizeof (finalpath) - (finalname - finalpath));
+ if (unlink(finalpath) < 0)
+ configd_critical(
+ "\"%s\" backup completed, but removing old "
+ "file \"%s\" failed: %s\n",
+ name, finalpath, strerror(errno));
+ }
+
+ backend_backup_cleanup(old_list, old_sz);
+ }
+
+ result = REP_PROTOCOL_SUCCESS;
+
+fail:
+ (void) close(infd);
+ (void) close(outfd);
+ if (result != REP_PROTOCOL_SUCCESS)
+ (void) unlink(tmppath);
+
+ return (result);
+}
+
+
+/*
+ * If t is not BACKEND_TYPE_NORMAL, can fail with
+ * _BACKEND_ACCESS - backend does not exist
+ *
+ * If writing is nonzero, can also fail with
+ * _BACKEND_READONLY - backend is read-only
+ */
+static int
+backend_lock(backend_type_t t, int writing, sqlite_backend_t **bep)
+{
+ sqlite_backend_t *be = NULL;
+ hrtime_t ts, vts;
+
+ *bep = NULL;
+
+ assert(t == BACKEND_TYPE_NORMAL ||
+ t == BACKEND_TYPE_NONPERSIST);
+
+ be = bes[t];
+ if (t == BACKEND_TYPE_NORMAL)
+ assert(be != NULL); /* should always be there */
+
+ if (be == NULL)
+ return (REP_PROTOCOL_FAIL_BACKEND_ACCESS);
+
+ if (backend_panic_thread != 0)
+ backend_panic(NULL); /* don't proceed */
+
+ ts = gethrtime();
+ vts = gethrvtime();
+ (void) pthread_mutex_lock(&be->be_lock);
+ UPDATE_TOTALS_WR(be, writing, bt_lock, ts, vts);
+
+ if (backend_panic_thread != 0) {
+ (void) pthread_mutex_unlock(&be->be_lock);
+ backend_panic(NULL); /* don't proceed */
+ }
+ be->be_thread = pthread_self();
+
+ if (writing && be->be_readonly) {
+ char *errp;
+ struct sqlite *new;
+ int r;
+
+ assert(t == BACKEND_TYPE_NORMAL);
+
+ new = sqlite_open(be->be_path, 0600, &errp);
+ if (new == NULL) {
+ backend_panic("reopening %s: %s\n", be->be_path, errp);
+ /*NOTREACHED*/
+ }
+ r = backend_is_readonly(new, &errp);
+ if (r != SQLITE_OK) {
+ free(errp);
+ sqlite_close(new);
+ be->be_thread = 0;
+ (void) pthread_mutex_unlock(&be->be_lock);
+ return (REP_PROTOCOL_FAIL_BACKEND_READONLY);
+ }
+
+ /*
+ * We can write! Swap our db handles, mark ourself writable,
+ * and make a backup.
+ */
+ sqlite_close(be->be_db);
+ be->be_db = new;
+ be->be_readonly = 0;
+
+ if (backend_create_backup_locked(be, REPOSITORY_BOOT_BACKUP) !=
+ REP_PROTOCOL_SUCCESS) {
+ configd_critical(
+ "unable to create \"%s\" backup of \"%s\"\n",
+ REPOSITORY_BOOT_BACKUP, be->be_path);
+ }
+ }
+
+ if (backend_do_trace)
+ (void) sqlite_trace(be->be_db, backend_trace_sql, be);
+ else
+ (void) sqlite_trace(be->be_db, NULL, NULL);
+
+ be->be_writing = writing;
+ *bep = be;
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static void
+backend_unlock(sqlite_backend_t *be)
+{
+ be->be_writing = 0;
+ be->be_thread = 0;
+ (void) pthread_mutex_unlock(&be->be_lock);
+}
+
+static void
+backend_destroy(sqlite_backend_t *be)
+{
+ if (be->be_db != NULL) {
+ sqlite_close(be->be_db);
+ be->be_db = NULL;
+ }
+ be->be_thread = 0;
+ (void) pthread_mutex_unlock(&be->be_lock);
+ (void) pthread_mutex_destroy(&be->be_lock);
+}
+
+static void
+backend_create_finish(backend_type_t backend_id, sqlite_backend_t *be)
+{
+ assert(MUTEX_HELD(&be->be_lock));
+ assert(be == &be_info[backend_id]);
+
+ bes[backend_id] = be;
+ (void) pthread_mutex_unlock(&be->be_lock);
+}
+
+static int
+backend_fd_write(int fd, const char *mess)
+{
+ int len = strlen(mess);
+ int written;
+
+ while (len > 0) {
+ if ((written = write(fd, mess, len)) < 0)
+ return (-1);
+ mess += written;
+ len -= written;
+ }
+ return (0);
+}
+
+/*
+ * Can return:
+ * _BAD_REQUEST name is not valid
+ * _TRUNCATED name is too long for current repository path
+ * _UNKNOWN failed for unknown reason (details written to
+ * console)
+ * _BACKEND_READONLY backend is not writable
+ *
+ * _SUCCESS Backup completed successfully.
+ */
+rep_protocol_responseid_t
+backend_create_backup(const char *name)
+{
+ rep_protocol_responseid_t result;
+ sqlite_backend_t *be;
+
+ result = backend_lock(BACKEND_TYPE_NORMAL, 0, &be);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ result = backend_create_backup_locked(be, name);
+ backend_unlock(be);
+
+ return (result);
+}
+
+/*ARGSUSED*/
+static int
+backend_integrity_callback(void *private, int narg, char **vals, char **cols)
+{
+ char **out = private;
+ char *old = *out;
+ char *new;
+ const char *info;
+ size_t len;
+ int x;
+
+ for (x = 0; x < narg; x++) {
+ if ((info = vals[x]) != NULL &&
+ strcmp(info, "ok") != 0) {
+ len = (old == NULL)? 0 : strlen(old);
+ len += strlen(info) + 2; /* '\n' + '\0' */
+
+ new = realloc(old, len);
+ if (new == NULL)
+ return (BACKEND_CALLBACK_ABORT);
+ if (old == NULL)
+ new[0] = 0;
+ old = *out = new;
+ (void) strlcat(new, info, len);
+ (void) strlcat(new, "\n", len);
+ }
+ }
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+#define BACKEND_CREATE_LOCKED -2
+#define BACKEND_CREATE_FAIL -1
+#define BACKEND_CREATE_SUCCESS 0
+#define BACKEND_CREATE_READONLY 1
+#define BACKEND_CREATE_NEED_INIT 2
+static int
+backend_create(backend_type_t backend_id, const char *db_file,
+ sqlite_backend_t **bep)
+{
+ char *errp;
+ char *integrity_results = NULL;
+ sqlite_backend_t *be;
+ int r;
+ uint32_t val = -1UL;
+ struct run_single_int_info info;
+ int fd;
+
+ assert(backend_id >= 0 && backend_id < BACKEND_TYPE_TOTAL);
+
+ be = &be_info[backend_id];
+ assert(be->be_db == NULL);
+
+ (void) pthread_mutex_init(&be->be_lock, NULL);
+ (void) pthread_mutex_lock(&be->be_lock);
+
+ be->be_type = backend_id;
+ be->be_path = strdup(db_file);
+ if (be->be_path == NULL) {
+ perror("malloc");
+ goto fail;
+ }
+
+ be->be_db = sqlite_open(be->be_path, 0600, &errp);
+
+ if (be->be_db == NULL) {
+ if (strstr(errp, "out of memory") != NULL) {
+ configd_critical("%s: %s\n", db_file, errp);
+ free(errp);
+
+ goto fail;
+ }
+
+ /* report it as an integrity failure */
+ integrity_results = errp;
+ errp = NULL;
+ goto integrity_fail;
+ }
+
+ /*
+ * check if we are inited and of the correct schema version
+ *
+ * Eventually, we'll support schema upgrade here.
+ */
+ info.rs_out = &val;
+ info.rs_result = REP_PROTOCOL_FAIL_NOT_FOUND;
+
+ r = sqlite_exec(be->be_db, "SELECT schema_version FROM schema_version;",
+ run_single_int_callback, &info, &errp);
+ if (r == SQLITE_ERROR &&
+ strcmp("no such table: schema_version", errp) == 0) {
+ free(errp);
+ /*
+ * Could be an empty repository, could be pre-schema_version
+ * schema. Check for id_tbl, which has always been there.
+ */
+ r = sqlite_exec(be->be_db, "SELECT count() FROM id_tbl;",
+ NULL, NULL, &errp);
+ if (r == SQLITE_ERROR &&
+ strcmp("no such table: id_tbl", errp) == 0) {
+ free(errp);
+ *bep = be;
+ return (BACKEND_CREATE_NEED_INIT);
+ }
+
+ configd_critical("%s: schema version mismatch\n", db_file);
+ goto fail;
+ }
+ if (r == SQLITE_BUSY || r == SQLITE_LOCKED) {
+ free(errp);
+ *bep = NULL;
+ backend_destroy(be);
+ return (BACKEND_CREATE_LOCKED);
+ }
+ if (r == SQLITE_OK) {
+ if (info.rs_result == REP_PROTOCOL_FAIL_NOT_FOUND ||
+ val != BACKEND_SCHEMA_VERSION) {
+ configd_critical("%s: schema version mismatch\n",
+ db_file);
+ goto fail;
+ }
+ }
+
+ /*
+ * pull in the whole database sequentially.
+ */
+ if ((fd = open(db_file, O_RDONLY)) >= 0) {
+ size_t sz = 64 * 1024;
+ char *buffer = malloc(sz);
+ if (buffer != NULL) {
+ while (read(fd, buffer, sz) > 0)
+ ;
+ free(buffer);
+ }
+ (void) close(fd);
+ }
+
+ /*
+ * run an integrity check
+ */
+ r = sqlite_exec(be->be_db, "PRAGMA integrity_check;",
+ backend_integrity_callback, &integrity_results, &errp);
+
+ if (r == SQLITE_BUSY || r == SQLITE_LOCKED) {
+ free(errp);
+ *bep = NULL;
+ backend_destroy(be);
+ return (BACKEND_CREATE_LOCKED);
+ }
+ if (r == SQLITE_ABORT) {
+ free(errp);
+ errp = NULL;
+ integrity_results = "out of memory running integrity check\n";
+ } else if (r != SQLITE_OK && integrity_results == NULL) {
+ integrity_results = errp;
+ errp = NULL;
+ }
+
+integrity_fail:
+ if (integrity_results != NULL) {
+ const char *fname = "/etc/svc/volatile/db_errors";
+ if ((fd = open(fname, O_CREAT|O_WRONLY|O_APPEND, 0600)) < 0) {
+ fname = NULL;
+ } else {
+ if (backend_fd_write(fd, "\n\n") < 0 ||
+ backend_fd_write(fd, db_file) < 0 ||
+ backend_fd_write(fd,
+ ": PRAGMA integrity_check; failed. Results:\n") <
+ 0 || backend_fd_write(fd, integrity_results) < 0 ||
+ backend_fd_write(fd, "\n\n") < 0) {
+ fname = NULL;
+ }
+ (void) close(fd);
+ }
+
+ if (!is_main_repository ||
+ backend_id == BACKEND_TYPE_NONPERSIST) {
+ if (fname != NULL)
+ configd_critical(
+ "%s: integrity check failed. Details in "
+ "%s\n", db_file, fname);
+ else
+ configd_critical(
+ "%s: integrity check failed: %s\n",
+ db_file);
+ } else {
+ (void) fprintf(stderr,
+"\n"
+"svc.configd: smf(5) database integrity check of:\n"
+"\n"
+" %s\n"
+"\n"
+" failed. The database might be damaged or a media error might have\n"
+" prevented it from being verified. Additional information useful to\n"
+" your service provider%s%s\n"
+"\n"
+" The system will not be able to boot until you have restored a working\n"
+" database. svc.startd(1M) will provide a sulogin(1M) prompt for recovery\n"
+" purposes. The command:\n"
+"\n"
+" /lib/svc/bin/restore_repository\n"
+"\n"
+" can be run to restore a backup version of your repository. See\n"
+" http://sun.com/msg/SMF-8000-MY for more information.\n"
+"\n",
+ db_file,
+ (fname == NULL)? ":\n\n" : " is in:\n\n ",
+ (fname == NULL)? integrity_results : fname);
+ }
+ free(errp);
+ goto fail;
+ }
+
+ /*
+ * check if we are writable
+ */
+ r = backend_is_readonly(be->be_db, &errp);
+
+ if (r == SQLITE_BUSY || r == SQLITE_LOCKED) {
+ free(errp);
+ *bep = NULL;
+ backend_destroy(be);
+ return (BACKEND_CREATE_LOCKED);
+ }
+ if (r != SQLITE_OK && r != SQLITE_FULL) {
+ free(errp);
+ be->be_readonly = 1;
+ *bep = be;
+ return (BACKEND_CREATE_READONLY);
+ }
+ *bep = be;
+ return (BACKEND_CREATE_SUCCESS);
+
+fail:
+ *bep = NULL;
+ backend_destroy(be);
+ return (BACKEND_CREATE_FAIL);
+}
+
+/*
+ * (arg & -arg) is, through the magic of twos-complement arithmetic, the
+ * lowest set bit in arg.
+ */
+static size_t
+round_up_to_p2(size_t arg)
+{
+ /*
+ * Don't allow a zero result.
+ */
+ assert(arg > 0 && ((ssize_t)arg > 0));
+
+ while ((arg & (arg - 1)) != 0)
+ arg += (arg & -arg);
+
+ return (arg);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES - out of memory
+ * _BACKEND_ACCESS - backend type t (other than _NORMAL) doesn't exist
+ * _DONE - callback aborted query
+ * _SUCCESS
+ */
+int
+backend_run(backend_type_t t, backend_query_t *q,
+ backend_run_callback_f *cb, void *data)
+{
+ char *errmsg = NULL;
+ int ret;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ if (q == NULL || q->bq_buf == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ if ((ret = backend_lock(t, 0, &be)) != REP_PROTOCOL_SUCCESS)
+ return (ret);
+
+ ts = gethrtime();
+ vts = gethrvtime();
+ ret = sqlite_exec(be->be_db, q->bq_buf, cb, data, &errmsg);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ ret = backend_error(be, ret, errmsg);
+ backend_unlock(be);
+
+ return (ret);
+}
+
+/*
+ * Starts a "read-only" transaction -- i.e., locks out writers as long
+ * as it is active.
+ *
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ *
+ * If t is not _NORMAL, can also fail with
+ * _BACKEND_ACCESS - backend does not exist
+ *
+ * If writable is true, can also fail with
+ * _BACKEND_READONLY
+ */
+static int
+backend_tx_begin_common(backend_type_t t, backend_tx_t **txp, int writable)
+{
+ backend_tx_t *ret;
+ sqlite_backend_t *be;
+ int r;
+
+ *txp = NULL;
+
+ ret = uu_zalloc(sizeof (*ret));
+ if (ret == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ if ((r = backend_lock(t, writable, &be)) != REP_PROTOCOL_SUCCESS) {
+ uu_free(ret);
+ return (r);
+ }
+
+ ret->bt_be = be;
+ ret->bt_readonly = !writable;
+ ret->bt_type = t;
+ ret->bt_full = 0;
+
+ *txp = ret;
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+int
+backend_tx_begin_ro(backend_type_t t, backend_tx_t **txp)
+{
+ return (backend_tx_begin_common(t, txp, 0));
+}
+
+static void
+backend_tx_end(backend_tx_t *tx)
+{
+ sqlite_backend_t *be;
+
+ be = tx->bt_be;
+
+ if (tx->bt_full) {
+ struct sqlite *new;
+
+ /*
+ * sqlite tends to be sticky with SQLITE_FULL, so we try
+ * to get a fresh database handle if we got a FULL warning
+ * along the way. If that fails, no harm done.
+ */
+ new = sqlite_open(be->be_path, 0600, NULL);
+ if (new != NULL) {
+ sqlite_close(be->be_db);
+ be->be_db = new;
+ }
+ }
+ backend_unlock(be);
+ tx->bt_be = NULL;
+ uu_free(tx);
+}
+
+void
+backend_tx_end_ro(backend_tx_t *tx)
+{
+ assert(tx->bt_readonly);
+ backend_tx_end(tx);
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ * _BACKEND_ACCESS
+ * _BACKEND_READONLY
+ */
+int
+backend_tx_begin(backend_type_t t, backend_tx_t **txp)
+{
+ int r;
+ char *errmsg;
+ hrtime_t ts, vts;
+
+ r = backend_tx_begin_common(t, txp, 1);
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (r);
+
+ ts = gethrtime();
+ vts = gethrvtime();
+ r = sqlite_exec((*txp)->bt_be->be_db, "BEGIN TRANSACTION", NULL, NULL,
+ &errmsg);
+ UPDATE_TOTALS((*txp)->bt_be, bt_exec, ts, vts);
+ if (r == SQLITE_FULL)
+ (*txp)->bt_full = 1;
+ r = backend_error((*txp)->bt_be, r, errmsg);
+
+ if (r != REP_PROTOCOL_SUCCESS) {
+ assert(r != REP_PROTOCOL_DONE);
+ (void) sqlite_exec((*txp)->bt_be->be_db,
+ "ROLLBACK TRANSACTION", NULL, NULL, NULL);
+ backend_tx_end(*txp);
+ *txp = NULL;
+ return (r);
+ }
+
+ (*txp)->bt_readonly = 0;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+void
+backend_tx_rollback(backend_tx_t *tx)
+{
+ int r;
+ char *errmsg;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ assert(tx != NULL && tx->bt_be != NULL && !tx->bt_readonly);
+ be = tx->bt_be;
+
+ ts = gethrtime();
+ vts = gethrvtime();
+ r = sqlite_exec(be->be_db, "ROLLBACK TRANSACTION", NULL, NULL,
+ &errmsg);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ if (r == SQLITE_FULL)
+ tx->bt_full = 1;
+ (void) backend_error(be, r, errmsg);
+
+ backend_tx_end(tx);
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ */
+int
+backend_tx_commit(backend_tx_t *tx)
+{
+ int r, r2;
+ char *errmsg;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ assert(tx != NULL && tx->bt_be != NULL && !tx->bt_readonly);
+ be = tx->bt_be;
+ ts = gethrtime();
+ vts = gethrvtime();
+ r = sqlite_exec(be->be_db, "COMMIT TRANSACTION", NULL, NULL,
+ &errmsg);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ if (r == SQLITE_FULL)
+ tx->bt_full = 1;
+
+ r = backend_error(be, r, errmsg);
+ assert(r != REP_PROTOCOL_DONE);
+
+ if (r != REP_PROTOCOL_SUCCESS) {
+ r2 = sqlite_exec(be->be_db, "ROLLBACK TRANSACTION", NULL, NULL,
+ &errmsg);
+ r2 = backend_error(be, r2, errmsg);
+ if (r2 != REP_PROTOCOL_SUCCESS)
+ backend_panic("cannot rollback failed commit");
+
+ backend_tx_end(tx);
+ return (r);
+ }
+ backend_tx_end(tx);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static const char *
+id_space_to_name(enum id_space id)
+{
+ switch (id) {
+ case BACKEND_ID_SERVICE_INSTANCE:
+ return ("SI");
+ case BACKEND_ID_PROPERTYGRP:
+ return ("PG");
+ case BACKEND_ID_GENERATION:
+ return ("GEN");
+ case BACKEND_ID_PROPERTY:
+ return ("PROP");
+ case BACKEND_ID_VALUE:
+ return ("VAL");
+ case BACKEND_ID_SNAPNAME:
+ return ("SNAME");
+ case BACKEND_ID_SNAPSHOT:
+ return ("SHOT");
+ case BACKEND_ID_SNAPLEVEL:
+ return ("SLVL");
+ default:
+ abort();
+ /*NOTREACHED*/
+ }
+}
+
+/*
+ * Returns a new id or 0 if the id argument is invalid or the query fails.
+ */
+uint32_t
+backend_new_id(backend_tx_t *tx, enum id_space id)
+{
+ struct run_single_int_info info;
+ uint32_t new_id = 0;
+ const char *name = id_space_to_name(id);
+ char *errmsg;
+ int ret;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ assert(tx != NULL && tx->bt_be != NULL && !tx->bt_readonly);
+ be = tx->bt_be;
+
+ info.rs_out = &new_id;
+ info.rs_result = REP_PROTOCOL_FAIL_NOT_FOUND;
+
+ ts = gethrtime();
+ vts = gethrvtime();
+ ret = sqlite_exec_printf(be->be_db,
+ "SELECT id_next FROM id_tbl WHERE (id_name = '%q');"
+ "UPDATE id_tbl SET id_next = id_next + 1 WHERE (id_name = '%q');",
+ run_single_int_callback, &info, &errmsg, name, name);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ if (ret == SQLITE_FULL)
+ tx->bt_full = 1;
+
+ ret = backend_error(be, ret, errmsg);
+
+ if (ret != REP_PROTOCOL_SUCCESS) {
+ return (0);
+ }
+
+ return (new_id);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES - out of memory
+ * _DONE - callback aborted query
+ * _SUCCESS
+ */
+int
+backend_tx_run(backend_tx_t *tx, backend_query_t *q,
+ backend_run_callback_f *cb, void *data)
+{
+ char *errmsg = NULL;
+ int ret;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ assert(tx != NULL && tx->bt_be != NULL);
+ be = tx->bt_be;
+
+ if (q == NULL || q->bq_buf == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ ts = gethrtime();
+ vts = gethrvtime();
+ ret = sqlite_exec(be->be_db, q->bq_buf, cb, data, &errmsg);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ if (ret == SQLITE_FULL)
+ tx->bt_full = 1;
+ ret = backend_error(be, ret, errmsg);
+
+ return (ret);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES - out of memory
+ * _NOT_FOUND - the query returned no results
+ * _SUCCESS - the query returned a single integer
+ */
+int
+backend_tx_run_single_int(backend_tx_t *tx, backend_query_t *q, uint32_t *buf)
+{
+ struct run_single_int_info info;
+ int ret;
+
+ info.rs_out = buf;
+ info.rs_result = REP_PROTOCOL_FAIL_NOT_FOUND;
+
+ ret = backend_tx_run(tx, q, run_single_int_callback, &info);
+ assert(ret != REP_PROTOCOL_DONE);
+
+ if (ret != REP_PROTOCOL_SUCCESS)
+ return (ret);
+
+ return (info.rs_result);
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ */
+int
+backend_tx_run_update(backend_tx_t *tx, const char *format, ...)
+{
+ va_list a;
+ char *errmsg;
+ int ret;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ assert(tx != NULL && tx->bt_be != NULL && !tx->bt_readonly);
+ be = tx->bt_be;
+
+ va_start(a, format);
+ ts = gethrtime();
+ vts = gethrvtime();
+ ret = sqlite_exec_vprintf(be->be_db, format, NULL, NULL, &errmsg, a);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ if (ret == SQLITE_FULL)
+ tx->bt_full = 1;
+ va_end(a);
+ ret = backend_error(be, ret, errmsg);
+ assert(ret != REP_PROTOCOL_DONE);
+
+ return (ret);
+}
+
+/*
+ * returns REP_PROTOCOL_FAIL_NOT_FOUND if no changes occured
+ */
+int
+backend_tx_run_update_changed(backend_tx_t *tx, const char *format, ...)
+{
+ va_list a;
+ char *errmsg;
+ int ret;
+ sqlite_backend_t *be;
+ hrtime_t ts, vts;
+
+ assert(tx != NULL && tx->bt_be != NULL && !tx->bt_readonly);
+ be = tx->bt_be;
+
+ va_start(a, format);
+ ts = gethrtime();
+ vts = gethrvtime();
+ ret = sqlite_exec_vprintf(be->be_db, format, NULL, NULL, &errmsg, a);
+ UPDATE_TOTALS(be, bt_exec, ts, vts);
+ if (ret == SQLITE_FULL)
+ tx->bt_full = 1;
+ va_end(a);
+
+ ret = backend_error(be, ret, errmsg);
+
+ return (ret);
+}
+
+#define BACKEND_ADD_SCHEMA(be, file, tbls, idxs) \
+ (backend_add_schema((be), (file), \
+ (tbls), sizeof (tbls) / sizeof (*(tbls)), \
+ (idxs), sizeof (idxs) / sizeof (*(idxs))))
+
+static int
+backend_add_schema(sqlite_backend_t *be, const char *file,
+ struct backend_tbl_info *tbls, int tbl_count,
+ struct backend_idx_info *idxs, int idx_count)
+{
+ int i;
+ char *errmsg;
+ int ret;
+
+ /*
+ * Create the tables.
+ */
+ for (i = 0; i < tbl_count; i++) {
+ if (tbls[i].bti_name == NULL) {
+ assert(i + 1 == tbl_count);
+ break;
+ }
+ ret = sqlite_exec_printf(be->be_db,
+ "CREATE TABLE %s (%s);\n",
+ NULL, NULL, &errmsg, tbls[i].bti_name, tbls[i].bti_cols);
+
+ if (ret != SQLITE_OK) {
+ configd_critical(
+ "%s: %s table creation fails: %s\n", file,
+ tbls[i].bti_name, errmsg);
+ free(errmsg);
+ return (-1);
+ }
+ }
+
+ /*
+ * Make indices on key tables and columns.
+ */
+ for (i = 0; i < idx_count; i++) {
+ if (idxs[i].bxi_tbl == NULL) {
+ assert(i + 1 == idx_count);
+ break;
+ }
+
+ ret = sqlite_exec_printf(be->be_db,
+ "CREATE INDEX %s_%s ON %s (%s);\n",
+ NULL, NULL, &errmsg, idxs[i].bxi_tbl, idxs[i].bxi_idx,
+ idxs[i].bxi_tbl, idxs[i].bxi_cols);
+
+ if (ret != SQLITE_OK) {
+ configd_critical(
+ "%s: %s_%s index creation fails: %s\n", file,
+ idxs[i].bxi_tbl, idxs[i].bxi_idx, errmsg);
+ free(errmsg);
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+static int
+backend_init_schema(sqlite_backend_t *be, const char *db_file, backend_type_t t)
+{
+ int i;
+ char *errmsg;
+ int ret;
+
+ assert(t == BACKEND_TYPE_NORMAL || t == BACKEND_TYPE_NONPERSIST);
+
+ if (t == BACKEND_TYPE_NORMAL) {
+ ret = BACKEND_ADD_SCHEMA(be, db_file, tbls_normal, idxs_normal);
+ } else if (t == BACKEND_TYPE_NONPERSIST) {
+ ret = BACKEND_ADD_SCHEMA(be, db_file, tbls_np, idxs_np);
+ } else {
+ abort(); /* can't happen */
+ }
+
+ if (ret < 0) {
+ return (ret);
+ }
+
+ ret = BACKEND_ADD_SCHEMA(be, db_file, tbls_common, idxs_common);
+ if (ret < 0) {
+ return (ret);
+ }
+
+ /*
+ * Add the schema version to the table
+ */
+ ret = sqlite_exec_printf(be->be_db,
+ "INSERT INTO schema_version (schema_version) VALUES (%d)",
+ NULL, NULL, &errmsg, BACKEND_SCHEMA_VERSION);
+ if (ret != SQLITE_OK) {
+ configd_critical(
+ "setting schema version fails: %s\n", errmsg);
+ free(errmsg);
+ }
+
+ /*
+ * Populate id_tbl with initial IDs.
+ */
+ for (i = 0; i < BACKEND_ID_INVALID; i++) {
+ const char *name = id_space_to_name(i);
+
+ ret = sqlite_exec_printf(be->be_db,
+ "INSERT INTO id_tbl (id_name, id_next) "
+ "VALUES ('%q', %d);", NULL, NULL, &errmsg, name, 1);
+ if (ret != SQLITE_OK) {
+ configd_critical(
+ "id insertion for %s fails: %s\n", name, errmsg);
+ free(errmsg);
+ return (-1);
+ }
+ }
+ /*
+ * Set the persistance of the database. The normal database is marked
+ * "synchronous", so that all writes are synchronized to stable storage
+ * before proceeding.
+ */
+ ret = sqlite_exec_printf(be->be_db,
+ "PRAGMA default_synchronous = %s; PRAGMA synchronous = %s;",
+ NULL, NULL, &errmsg,
+ (t == BACKEND_TYPE_NORMAL)? "ON" : "OFF",
+ (t == BACKEND_TYPE_NORMAL)? "ON" : "OFF");
+ if (ret != SQLITE_OK) {
+ configd_critical("pragma setting fails: %s\n", errmsg);
+ free(errmsg);
+ return (-1);
+ }
+
+ return (0);
+}
+
+int
+backend_init(const char *db_file, const char *npdb_file, int have_np)
+{
+ sqlite_backend_t *be;
+ int r;
+ int writable_persist = 1;
+
+ /* set up our temporary directory */
+ sqlite_temp_directory = "/etc/svc/volatile";
+
+ if (strcmp(SQLITE_VERSION, sqlite_version) != 0) {
+ configd_critical("Mismatched link! (%s should be %s)\n",
+ sqlite_version, SQLITE_VERSION);
+ return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
+ }
+ if (db_file == NULL)
+ db_file = REPOSITORY_DB;
+
+ r = backend_create(BACKEND_TYPE_NORMAL, db_file, &be);
+ switch (r) {
+ case BACKEND_CREATE_FAIL:
+ return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
+ case BACKEND_CREATE_LOCKED:
+ return (CONFIGD_EXIT_DATABASE_LOCKED);
+ case BACKEND_CREATE_SUCCESS:
+ break; /* success */
+ case BACKEND_CREATE_READONLY:
+ writable_persist = 0;
+ break;
+ case BACKEND_CREATE_NEED_INIT:
+ if (backend_init_schema(be, db_file, BACKEND_TYPE_NORMAL)) {
+ backend_destroy(be);
+ return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
+ }
+ break;
+ default:
+ abort();
+ /*NOTREACHED*/
+ }
+ backend_create_finish(BACKEND_TYPE_NORMAL, be);
+
+ if (have_np) {
+ if (npdb_file == NULL)
+ npdb_file = NONPERSIST_DB;
+
+ r = backend_create(BACKEND_TYPE_NONPERSIST, npdb_file, &be);
+ switch (r) {
+ case BACKEND_CREATE_SUCCESS:
+ break; /* success */
+ case BACKEND_CREATE_FAIL:
+ return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
+ case BACKEND_CREATE_LOCKED:
+ return (CONFIGD_EXIT_DATABASE_LOCKED);
+ case BACKEND_CREATE_READONLY:
+ configd_critical("%s: unable to write\n", npdb_file);
+ return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
+ case BACKEND_CREATE_NEED_INIT:
+ if (backend_init_schema(be, db_file,
+ BACKEND_TYPE_NONPERSIST)) {
+ backend_destroy(be);
+ return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
+ }
+ break;
+ default:
+ abort();
+ /*NOTREACHED*/
+ }
+ backend_create_finish(BACKEND_TYPE_NONPERSIST, be);
+
+ /*
+ * If we started up with a writable filesystem, but the
+ * non-persistent database needed initialization, we
+ * are booting a non-global zone, so do a backup.
+ */
+ if (r == BACKEND_CREATE_NEED_INIT && writable_persist &&
+ backend_lock(BACKEND_TYPE_NORMAL, 0, &be) ==
+ REP_PROTOCOL_SUCCESS) {
+ if (backend_create_backup_locked(be,
+ REPOSITORY_BOOT_BACKUP) != REP_PROTOCOL_SUCCESS) {
+ configd_critical(
+ "unable to create \"%s\" backup of "
+ "\"%s\"\n", REPOSITORY_BOOT_BACKUP,
+ be->be_path);
+ }
+ backend_unlock(be);
+ }
+ }
+ return (CONFIGD_EXIT_OKAY);
+}
+
+/*
+ * quiesce all database activity prior to exiting
+ */
+void
+backend_fini(void)
+{
+ sqlite_backend_t *be_normal, *be_np;
+
+ (void) backend_lock(BACKEND_TYPE_NORMAL, 1, &be_normal);
+ (void) backend_lock(BACKEND_TYPE_NONPERSIST, 1, &be_np);
+}
+
+#define QUERY_BASE 128
+backend_query_t *
+backend_query_alloc(void)
+{
+ backend_query_t *q;
+ q = calloc(1, sizeof (backend_query_t));
+ if (q != NULL) {
+ q->bq_size = QUERY_BASE;
+ q->bq_buf = calloc(1, q->bq_size);
+ if (q->bq_buf == NULL) {
+ q->bq_size = 0;
+ }
+
+ }
+ return (q);
+}
+
+void
+backend_query_append(backend_query_t *q, const char *value)
+{
+ char *alloc;
+ int count;
+ size_t size, old_len;
+
+ if (q == NULL) {
+ /* We'll discover the error when we try to run the query. */
+ return;
+ }
+
+ while (q->bq_buf != NULL) {
+ old_len = strlen(q->bq_buf);
+ size = q->bq_size;
+ count = strlcat(q->bq_buf, value, size);
+
+ if (count < size)
+ break; /* success */
+
+ q->bq_buf[old_len] = 0;
+ size = round_up_to_p2(count + 1);
+
+ assert(size > q->bq_size);
+ alloc = realloc(q->bq_buf, size);
+ if (alloc == NULL) {
+ free(q->bq_buf);
+ q->bq_buf = NULL;
+ break; /* can't grow */
+ }
+
+ q->bq_buf = alloc;
+ q->bq_size = size;
+ }
+}
+
+void
+backend_query_add(backend_query_t *q, const char *format, ...)
+{
+ va_list args;
+ char *new;
+
+ if (q == NULL || q->bq_buf == NULL)
+ return;
+
+ va_start(args, format);
+ new = sqlite_vmprintf(format, args);
+ va_end(args);
+
+ if (new == NULL) {
+ free(q->bq_buf);
+ q->bq_buf = NULL;
+ return;
+ }
+
+ backend_query_append(q, new);
+
+ free(new);
+}
+
+void
+backend_query_free(backend_query_t *q)
+{
+ if (q != NULL) {
+ if (q->bq_buf != NULL) {
+ free(q->bq_buf);
+ }
+ free(q);
+ }
+}
diff --git a/usr/src/cmd/svc/configd/client.c b/usr/src/cmd/svc/configd/client.c
new file mode 100644
index 0000000000..49aed805d1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/client.c
@@ -0,0 +1,2212 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This is the client layer for svc.configd. All direct protocol interactions
+ * are handled here.
+ *
+ * Essentially, the job of this layer is to turn the idempotent protocol
+ * into a series of non-idempotent calls into the object layer, while
+ * also handling the necessary locking.
+ */
+
+#include <alloca.h>
+#include <assert.h>
+#include <door.h>
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <libuutil.h>
+
+#include "configd.h"
+#include "repcache_protocol.h"
+
+#define INVALID_CHANGEID (0)
+#define INVALID_DOORID ((door_id_t)-1)
+#define INVALID_RESULT ((rep_protocol_responseid_t)INT_MIN)
+
+/*
+ * lint doesn't like constant assertions
+ */
+#ifdef lint
+#define assert_nolint(x) (void)0
+#else
+#define assert_nolint(x) assert(x)
+#endif
+
+/*
+ * Protects client linkage and the freelist
+ */
+#define CLIENT_HASH_SIZE 64
+
+#pragma align 64(client_hash)
+static client_bucket_t client_hash[CLIENT_HASH_SIZE];
+
+static uu_list_pool_t *entity_pool;
+static uu_list_pool_t *iter_pool;
+static uu_list_pool_t *client_pool;
+
+#define CLIENT_HASH(id) (&client_hash[((id) & (CLIENT_HASH_SIZE - 1))])
+
+uint_t request_log_size = 1024; /* tunable, before we start */
+
+static pthread_mutex_t request_log_lock = PTHREAD_MUTEX_INITIALIZER;
+static uint_t request_log_cur;
+request_log_entry_t *request_log;
+
+static uint32_t client_maxid;
+static pthread_mutex_t client_lock; /* protects client_maxid */
+
+static request_log_entry_t *
+get_log(void)
+{
+ thread_info_t *ti = thread_self();
+ return (&ti->ti_log);
+}
+
+void
+log_enter(request_log_entry_t *rlp)
+{
+ if (rlp->rl_start != 0 && request_log != NULL) {
+ request_log_entry_t *logrlp;
+
+ (void) pthread_mutex_lock(&request_log_lock);
+ assert(request_log_cur < request_log_size);
+ logrlp = &request_log[request_log_cur++];
+ if (request_log_cur == request_log_size)
+ request_log_cur = 0;
+ (void) memcpy(logrlp, rlp, sizeof (*rlp));
+ (void) pthread_mutex_unlock(&request_log_lock);
+ }
+}
+
+/*
+ * Note that the svc.configd dmod will join all of the per-thread log entries
+ * with the main log, so that even if the log is disabled, there is some
+ * information available.
+ */
+static request_log_entry_t *
+start_log(uint32_t clientid)
+{
+ request_log_entry_t *rlp = get_log();
+
+ log_enter(rlp);
+
+ (void) memset(rlp, 0, sizeof (*rlp));
+ rlp->rl_start = gethrtime();
+ rlp->rl_tid = pthread_self();
+ rlp->rl_clientid = clientid;
+
+ return (rlp);
+}
+
+void
+end_log(void)
+{
+ request_log_entry_t *rlp = get_log();
+
+ rlp->rl_end = gethrtime();
+}
+
+static void
+add_log_ptr(request_log_entry_t *rlp, enum rc_ptr_type type, uint32_t id,
+ void *ptr)
+{
+ request_log_ptr_t *rpp;
+
+ if (rlp == NULL)
+ return;
+
+ if (rlp->rl_num_ptrs >= MAX_PTRS)
+ return;
+
+ rpp = &rlp->rl_ptrs[rlp->rl_num_ptrs++];
+ rpp->rlp_type = type;
+ rpp->rlp_id = id;
+ rpp->rlp_ptr = ptr;
+
+ /*
+ * For entities, it's useful to have the node pointer at the start
+ * of the request.
+ */
+ if (type == RC_PTR_TYPE_ENTITY && ptr != NULL)
+ rpp->rlp_data = ((repcache_entity_t *)ptr)->re_node.rnp_node;
+}
+
+int
+client_is_privileged(void)
+{
+ thread_info_t *ti = thread_self();
+
+ ucred_t *uc;
+
+ if (ti->ti_active_client != NULL &&
+ ti->ti_active_client->rc_all_auths)
+ return (1);
+
+ if ((uc = get_ucred()) == NULL)
+ return (0);
+
+ return (ucred_is_privileged(uc));
+}
+
+/*ARGSUSED*/
+static int
+client_compare(const void *lc_arg, const void *rc_arg, void *private)
+{
+ uint32_t l_id = ((const repcache_client_t *)lc_arg)->rc_id;
+ uint32_t r_id = ((const repcache_client_t *)rc_arg)->rc_id;
+
+ if (l_id > r_id)
+ return (1);
+ if (l_id < r_id)
+ return (-1);
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+entity_compare(const void *lc_arg, const void *rc_arg, void *private)
+{
+ uint32_t l_id = ((const repcache_entity_t *)lc_arg)->re_id;
+ uint32_t r_id = ((const repcache_entity_t *)rc_arg)->re_id;
+
+ if (l_id > r_id)
+ return (1);
+ if (l_id < r_id)
+ return (-1);
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+iter_compare(const void *lc_arg, const void *rc_arg, void *private)
+{
+ uint32_t l_id = ((const repcache_iter_t *)lc_arg)->ri_id;
+ uint32_t r_id = ((const repcache_iter_t *)rc_arg)->ri_id;
+
+ if (l_id > r_id)
+ return (1);
+ if (l_id < r_id)
+ return (-1);
+ return (0);
+}
+
+static int
+client_hash_init(void)
+{
+ int x;
+
+ assert_nolint(offsetof(repcache_entity_t, re_id) == 0);
+ entity_pool = uu_list_pool_create("repcache_entitys",
+ sizeof (repcache_entity_t), offsetof(repcache_entity_t, re_link),
+ entity_compare, UU_LIST_POOL_DEBUG);
+
+ assert_nolint(offsetof(repcache_iter_t, ri_id) == 0);
+ iter_pool = uu_list_pool_create("repcache_iters",
+ sizeof (repcache_iter_t), offsetof(repcache_iter_t, ri_link),
+ iter_compare, UU_LIST_POOL_DEBUG);
+
+ assert_nolint(offsetof(repcache_client_t, rc_id) == 0);
+ client_pool = uu_list_pool_create("repcache_clients",
+ sizeof (repcache_client_t), offsetof(repcache_client_t, rc_link),
+ client_compare, UU_LIST_POOL_DEBUG);
+
+ if (entity_pool == NULL || iter_pool == NULL || client_pool == NULL)
+ return (0);
+
+ for (x = 0; x < CLIENT_HASH_SIZE; x++) {
+ uu_list_t *lp = uu_list_create(client_pool, &client_hash[x],
+ UU_LIST_SORTED);
+ if (lp == NULL)
+ return (0);
+
+ (void) pthread_mutex_init(&client_hash[x].cb_lock, NULL);
+ client_hash[x].cb_list = lp;
+ }
+
+ return (1);
+}
+
+static repcache_client_t *
+client_alloc(void)
+{
+ repcache_client_t *cp;
+ cp = uu_zalloc(sizeof (*cp));
+ if (cp == NULL)
+ return (NULL);
+
+ cp->rc_entity_list = uu_list_create(entity_pool, cp, UU_LIST_SORTED);
+ if (cp->rc_entity_list == NULL)
+ goto fail;
+
+ cp->rc_iter_list = uu_list_create(iter_pool, cp, UU_LIST_SORTED);
+ if (cp->rc_iter_list == NULL)
+ goto fail;
+
+ uu_list_node_init(cp, &cp->rc_link, client_pool);
+
+ cp->rc_doorfd = -1;
+ cp->rc_doorid = INVALID_DOORID;
+
+ (void) pthread_mutex_init(&cp->rc_lock, NULL);
+
+ rc_node_ptr_init(&cp->rc_notify_ptr);
+
+ return (cp);
+
+fail:
+ if (cp->rc_iter_list != NULL)
+ uu_list_destroy(cp->rc_iter_list);
+ if (cp->rc_entity_list != NULL)
+ uu_list_destroy(cp->rc_entity_list);
+ uu_free(cp);
+ return (NULL);
+}
+
+static void
+client_free(repcache_client_t *cp)
+{
+ assert(cp->rc_insert_thr == 0);
+ assert(cp->rc_refcnt == 0);
+ assert(cp->rc_doorfd == -1);
+ assert(cp->rc_doorid == INVALID_DOORID);
+ assert(uu_list_first(cp->rc_entity_list) == NULL);
+ assert(uu_list_first(cp->rc_iter_list) == NULL);
+ uu_list_destroy(cp->rc_entity_list);
+ uu_list_destroy(cp->rc_iter_list);
+ uu_list_node_fini(cp, &cp->rc_link, client_pool);
+ (void) pthread_mutex_destroy(&cp->rc_lock);
+ uu_free(cp);
+}
+
+static void
+client_insert(repcache_client_t *cp)
+{
+ client_bucket_t *bp = CLIENT_HASH(cp->rc_id);
+ uu_list_index_t idx;
+
+ assert(cp->rc_id > 0);
+
+ (void) pthread_mutex_lock(&bp->cb_lock);
+ /*
+ * We assume it does not already exist
+ */
+ (void) uu_list_find(bp->cb_list, cp, NULL, &idx);
+ uu_list_insert(bp->cb_list, cp, idx);
+
+ (void) pthread_mutex_unlock(&bp->cb_lock);
+}
+
+static repcache_client_t *
+client_lookup(uint32_t id)
+{
+ client_bucket_t *bp = CLIENT_HASH(id);
+ repcache_client_t *cp;
+
+ (void) pthread_mutex_lock(&bp->cb_lock);
+
+ cp = uu_list_find(bp->cb_list, &id, NULL, NULL);
+
+ /*
+ * Bump the reference count
+ */
+ if (cp != NULL) {
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(!(cp->rc_flags & RC_CLIENT_DEAD));
+ cp->rc_refcnt++;
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+ }
+ (void) pthread_mutex_unlock(&bp->cb_lock);
+
+ return (cp);
+}
+
+static void
+client_release(repcache_client_t *cp)
+{
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(cp->rc_refcnt > 0);
+ assert(cp->rc_insert_thr != pthread_self());
+
+ --cp->rc_refcnt;
+ (void) pthread_cond_broadcast(&cp->rc_cv);
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+/*
+ * We only allow one thread to be inserting at a time, to prevent
+ * insert/insert races.
+ */
+static void
+client_start_insert(repcache_client_t *cp)
+{
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(cp->rc_refcnt > 0);
+
+ while (cp->rc_insert_thr != 0) {
+ assert(cp->rc_insert_thr != pthread_self());
+ (void) pthread_cond_wait(&cp->rc_cv, &cp->rc_lock);
+ }
+ cp->rc_insert_thr = pthread_self();
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+static void
+client_end_insert(repcache_client_t *cp)
+{
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(cp->rc_insert_thr == pthread_self());
+ cp->rc_insert_thr = 0;
+ (void) pthread_cond_broadcast(&cp->rc_cv);
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+/*ARGSUSED*/
+static repcache_entity_t *
+entity_alloc(repcache_client_t *cp)
+{
+ repcache_entity_t *ep = uu_zalloc(sizeof (repcache_entity_t));
+ if (ep != NULL) {
+ uu_list_node_init(ep, &ep->re_link, entity_pool);
+ }
+ return (ep);
+}
+
+static void
+entity_add(repcache_client_t *cp, repcache_entity_t *ep)
+{
+ uu_list_index_t idx;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(cp->rc_insert_thr == pthread_self());
+
+ (void) uu_list_find(cp->rc_entity_list, ep, NULL, &idx);
+ uu_list_insert(cp->rc_entity_list, ep, idx);
+
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+static repcache_entity_t *
+entity_find(repcache_client_t *cp, uint32_t id)
+{
+ repcache_entity_t *ep;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ ep = uu_list_find(cp->rc_entity_list, &id, NULL, NULL);
+ if (ep != NULL) {
+ add_log_ptr(get_log(), RC_PTR_TYPE_ENTITY, id, ep);
+ (void) pthread_mutex_lock(&ep->re_lock);
+ }
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ return (ep);
+}
+
+/*
+ * Fails with
+ * _DUPLICATE_ID - the ids are equal
+ * _UNKNOWN_ID - an id does not designate an active register
+ */
+static int
+entity_find2(repcache_client_t *cp, uint32_t id1, repcache_entity_t **out1,
+ uint32_t id2, repcache_entity_t **out2)
+{
+ repcache_entity_t *e1, *e2;
+ request_log_entry_t *rlp;
+
+ if (id1 == id2)
+ return (REP_PROTOCOL_FAIL_DUPLICATE_ID);
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ e1 = uu_list_find(cp->rc_entity_list, &id1, NULL, NULL);
+ e2 = uu_list_find(cp->rc_entity_list, &id2, NULL, NULL);
+ if (e1 == NULL || e2 == NULL) {
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+ }
+
+ assert(e1 != e2);
+
+ /*
+ * locks are ordered by id number
+ */
+ if (id1 < id2) {
+ (void) pthread_mutex_lock(&e1->re_lock);
+ (void) pthread_mutex_lock(&e2->re_lock);
+ } else {
+ (void) pthread_mutex_lock(&e2->re_lock);
+ (void) pthread_mutex_lock(&e1->re_lock);
+ }
+ *out1 = e1;
+ *out2 = e2;
+
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ if ((rlp = get_log()) != NULL) {
+ add_log_ptr(rlp, RC_PTR_TYPE_ENTITY, id1, e1);
+ add_log_ptr(rlp, RC_PTR_TYPE_ENTITY, id2, e2);
+ }
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static void
+entity_release(repcache_entity_t *ep)
+{
+ assert(ep->re_node.rnp_node == NULL ||
+ !MUTEX_HELD(&ep->re_node.rnp_node->rn_lock));
+ (void) pthread_mutex_unlock(&ep->re_lock);
+}
+
+static void
+entity_destroy(repcache_entity_t *entity)
+{
+ (void) pthread_mutex_lock(&entity->re_lock);
+ rc_node_clear(&entity->re_node, 0);
+ (void) pthread_mutex_unlock(&entity->re_lock);
+
+ uu_list_node_fini(entity, &entity->re_link, entity_pool);
+ (void) pthread_mutex_destroy(&entity->re_lock);
+ uu_free(entity);
+}
+
+static void
+entity_remove(repcache_client_t *cp, uint32_t id)
+{
+ repcache_entity_t *entity;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ entity = uu_list_find(cp->rc_entity_list, &id, NULL, NULL);
+ if (entity != NULL)
+ uu_list_remove(cp->rc_entity_list, entity);
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ if (entity != NULL)
+ entity_destroy(entity);
+}
+
+static void
+entity_cleanup(repcache_client_t *cp)
+{
+ repcache_entity_t *ep;
+ void *cookie = NULL;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ while ((ep = uu_list_teardown(cp->rc_entity_list, &cookie)) != NULL) {
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+ entity_destroy(ep);
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ }
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+/*ARGSUSED*/
+static repcache_iter_t *
+iter_alloc(repcache_client_t *cp)
+{
+ repcache_iter_t *iter;
+ iter = uu_zalloc(sizeof (repcache_iter_t));
+ if (iter != NULL)
+ uu_list_node_init(iter, &iter->ri_link, iter_pool);
+ return (iter);
+}
+
+static void
+iter_add(repcache_client_t *cp, repcache_iter_t *iter)
+{
+ uu_list_index_t idx;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(cp->rc_insert_thr == pthread_self());
+
+ (void) uu_list_find(cp->rc_iter_list, iter, NULL, &idx);
+ uu_list_insert(cp->rc_iter_list, iter, idx);
+
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+static repcache_iter_t *
+iter_find(repcache_client_t *cp, uint32_t id)
+{
+ repcache_iter_t *iter;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+
+ iter = uu_list_find(cp->rc_iter_list, &id, NULL, NULL);
+ if (iter != NULL) {
+ add_log_ptr(get_log(), RC_PTR_TYPE_ITER, id, iter);
+ (void) pthread_mutex_lock(&iter->ri_lock);
+ }
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ return (iter);
+}
+
+/*
+ * Fails with
+ * _UNKNOWN_ID - iter_id or entity_id does not designate an active register
+ */
+static int
+iter_find_w_entity(repcache_client_t *cp, uint32_t iter_id,
+ repcache_iter_t **iterp, uint32_t entity_id, repcache_entity_t **epp)
+{
+ repcache_iter_t *iter;
+ repcache_entity_t *ep;
+ request_log_entry_t *rlp;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ iter = uu_list_find(cp->rc_iter_list, &iter_id, NULL, NULL);
+ ep = uu_list_find(cp->rc_entity_list, &entity_id, NULL, NULL);
+
+ assert(iter == NULL || !MUTEX_HELD(&iter->ri_lock));
+ assert(ep == NULL || !MUTEX_HELD(&ep->re_lock));
+
+ if (iter == NULL || ep == NULL) {
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+ }
+
+ (void) pthread_mutex_lock(&iter->ri_lock);
+ (void) pthread_mutex_lock(&ep->re_lock);
+
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ *iterp = iter;
+ *epp = ep;
+
+ if ((rlp = get_log()) != NULL) {
+ add_log_ptr(rlp, RC_PTR_TYPE_ENTITY, entity_id, ep);
+ add_log_ptr(rlp, RC_PTR_TYPE_ITER, iter_id, iter);
+ }
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static void
+iter_release(repcache_iter_t *iter)
+{
+ (void) pthread_mutex_unlock(&iter->ri_lock);
+}
+
+static void
+iter_destroy(repcache_iter_t *iter)
+{
+ (void) pthread_mutex_lock(&iter->ri_lock);
+ rc_iter_destroy(&iter->ri_iter);
+ (void) pthread_mutex_unlock(&iter->ri_lock);
+
+ uu_list_node_fini(iter, &iter->ri_link, iter_pool);
+ (void) pthread_mutex_destroy(&iter->ri_lock);
+ uu_free(iter);
+}
+
+static void
+iter_remove(repcache_client_t *cp, uint32_t id)
+{
+ repcache_iter_t *iter;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ iter = uu_list_find(cp->rc_iter_list, &id, NULL, NULL);
+ if (iter != NULL)
+ uu_list_remove(cp->rc_iter_list, iter);
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ if (iter != NULL)
+ iter_destroy(iter);
+}
+
+static void
+iter_cleanup(repcache_client_t *cp)
+{
+ repcache_iter_t *iter;
+ void *cookie = NULL;
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ while ((iter = uu_list_teardown(cp->rc_iter_list, &cookie)) != NULL) {
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+ iter_destroy(iter);
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ }
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+}
+
+/*
+ * Ensure that the passed client id is no longer usable, wait for any
+ * outstanding invocations to complete, then destroy the client
+ * structure.
+ */
+static void
+client_destroy(uint32_t id)
+{
+ client_bucket_t *bp = CLIENT_HASH(id);
+ repcache_client_t *cp;
+
+ (void) pthread_mutex_lock(&bp->cb_lock);
+
+ cp = uu_list_find(bp->cb_list, &id, NULL, NULL);
+
+ if (cp == NULL) {
+ (void) pthread_mutex_unlock(&bp->cb_lock);
+ return;
+ }
+
+ uu_list_remove(bp->cb_list, cp);
+
+ (void) pthread_mutex_unlock(&bp->cb_lock);
+
+ /* kick the waiters out */
+ rc_notify_info_fini(&cp->rc_notify_info);
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(!(cp->rc_flags & RC_CLIENT_DEAD));
+ cp->rc_flags |= RC_CLIENT_DEAD;
+
+ if (cp->rc_doorfd != -1) {
+ if (door_revoke(cp->rc_doorfd) < 0)
+ perror("door_revoke");
+ cp->rc_doorfd = -1;
+ cp->rc_doorid = INVALID_DOORID;
+ }
+
+ while (cp->rc_refcnt > 0)
+ (void) pthread_cond_wait(&cp->rc_cv, &cp->rc_lock);
+
+ assert(cp->rc_insert_thr == 0 && cp->rc_notify_thr == 0);
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ /*
+ * destroy outstanding objects
+ */
+ entity_cleanup(cp);
+ iter_cleanup(cp);
+
+ /*
+ * clean up notifications
+ */
+ rc_pg_notify_fini(&cp->rc_pg_notify);
+
+ client_free(cp);
+}
+
+/*
+ * Fails with
+ * _TYPE_MISMATCH - the entity is already set up with a different type
+ * _NO_RESOURCES - out of memory
+ */
+static int
+entity_setup(repcache_client_t *cp, struct rep_protocol_entity_setup *rpr)
+{
+ repcache_entity_t *ep;
+ uint32_t type;
+
+ client_start_insert(cp);
+
+ if ((ep = entity_find(cp, rpr->rpr_entityid)) != NULL) {
+ type = ep->re_type;
+ entity_release(ep);
+
+ client_end_insert(cp);
+
+ if (type != rpr->rpr_entitytype)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ switch (type = rpr->rpr_entitytype) {
+ case REP_PROTOCOL_ENTITY_SCOPE:
+ case REP_PROTOCOL_ENTITY_SERVICE:
+ case REP_PROTOCOL_ENTITY_INSTANCE:
+ case REP_PROTOCOL_ENTITY_SNAPSHOT:
+ case REP_PROTOCOL_ENTITY_SNAPLEVEL:
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ case REP_PROTOCOL_ENTITY_PROPERTY:
+ break;
+ default:
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ ep = entity_alloc(cp);
+ if (ep == NULL) {
+ client_end_insert(cp);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ ep->re_id = rpr->rpr_entityid;
+ ep->re_changeid = INVALID_CHANGEID;
+
+ ep->re_type = type;
+ rc_node_ptr_init(&ep->re_node);
+
+ entity_add(cp, ep);
+ client_end_insert(cp);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*ARGSUSED*/
+static void
+entity_name(repcache_client_t *cp, const void *in, size_t insz, void *out_arg,
+ size_t *outsz, void *arg)
+{
+ const struct rep_protocol_entity_name *rpr = in;
+ struct rep_protocol_name_response *out = out_arg;
+ repcache_entity_t *ep;
+ size_t sz = sizeof (out->rpr_name);
+
+ assert(*outsz == sizeof (*out));
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+
+ if (ep == NULL) {
+ out->rpr_response = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ *outsz = sizeof (out->rpr_response);
+ return;
+ }
+ out->rpr_response = rc_node_name(&ep->re_node, out->rpr_name,
+ sz, rpr->rpr_answertype, &sz);
+ entity_release(ep);
+
+ /*
+ * If we fail, we only return the response code.
+ * If we succeed, we don't return anything after the '\0' in rpr_name.
+ */
+ if (out->rpr_response != REP_PROTOCOL_SUCCESS)
+ *outsz = sizeof (out->rpr_response);
+ else
+ *outsz = offsetof(struct rep_protocol_name_response,
+ rpr_name[sz + 1]);
+}
+
+/*ARGSUSED*/
+static void
+entity_parent_type(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ const struct rep_protocol_entity_name *rpr = in;
+ struct rep_protocol_integer_response *out = out_arg;
+ repcache_entity_t *ep;
+
+ assert(*outsz == sizeof (*out));
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+
+ if (ep == NULL) {
+ out->rpr_response = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ *outsz = sizeof (out->rpr_response);
+ return;
+ }
+
+ out->rpr_response = rc_node_parent_type(&ep->re_node, &out->rpr_value);
+ entity_release(ep);
+
+ if (out->rpr_response != REP_PROTOCOL_SUCCESS)
+ *outsz = sizeof (out->rpr_response);
+}
+
+/*
+ * Fails with
+ * _DUPLICATE_ID - the ids are equal
+ * _UNKNOWN_ID - an id does not designate an active register
+ * _INVALID_TYPE - type is invalid
+ * _TYPE_MISMATCH - np doesn't carry children of type type
+ * _DELETED - np has been deleted
+ * _NOT_FOUND - no child with that name/type combo found
+ * _NO_RESOURCES
+ * _BACKEND_ACCESS
+ */
+static int
+entity_get_child(repcache_client_t *cp,
+ struct rep_protocol_entity_get_child *rpr)
+{
+ repcache_entity_t *parent, *child;
+ int result;
+
+ uint32_t parentid = rpr->rpr_entityid;
+ uint32_t childid = rpr->rpr_childid;
+
+ result = entity_find2(cp, childid, &child, parentid, &parent);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ rpr->rpr_name[sizeof (rpr->rpr_name) - 1] = 0;
+
+ result = rc_node_get_child(&parent->re_node, rpr->rpr_name,
+ child->re_type, &child->re_node);
+
+ entity_release(child);
+ entity_release(parent);
+
+ return (result);
+}
+
+/*
+ * Returns _FAIL_DUPLICATE_ID, _FAIL_UNKNOWN_ID, _FAIL_NOT_SET, _FAIL_DELETED,
+ * _FAIL_TYPE_MISMATCH, _FAIL_NOT_FOUND (scope has no parent), or _SUCCESS.
+ * Fails with
+ * _DUPLICATE_ID - the ids are equal
+ * _UNKNOWN_ID - an id does not designate an active register
+ * _NOT_SET - child is not set
+ * _DELETED - child has been deleted
+ * _TYPE_MISMATCH - child's parent does not match that of the parent register
+ * _NOT_FOUND - child has no parent (and is a scope)
+ */
+static int
+entity_get_parent(repcache_client_t *cp, struct rep_protocol_entity_parent *rpr)
+{
+ repcache_entity_t *child, *parent;
+ int result;
+
+ uint32_t childid = rpr->rpr_entityid;
+ uint32_t outid = rpr->rpr_outid;
+
+ result = entity_find2(cp, childid, &child, outid, &parent);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ result = rc_node_get_parent(&child->re_node, parent->re_type,
+ &parent->re_node);
+
+ entity_release(child);
+ entity_release(parent);
+
+ return (result);
+}
+
+static int
+entity_get(repcache_client_t *cp, struct rep_protocol_entity_get *rpr)
+{
+ repcache_entity_t *ep;
+ int result;
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+
+ if (ep == NULL)
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+
+ switch (rpr->rpr_object) {
+ case RP_ENTITY_GET_INVALIDATE:
+ rc_node_clear(&ep->re_node, 0);
+ result = REP_PROTOCOL_SUCCESS;
+ break;
+ case RP_ENTITY_GET_MOST_LOCAL_SCOPE:
+ result = rc_local_scope(ep->re_type, &ep->re_node);
+ break;
+ default:
+ result = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ break;
+ }
+
+ entity_release(ep);
+
+ return (result);
+}
+
+static int
+entity_update(repcache_client_t *cp, struct rep_protocol_entity_update *rpr)
+{
+ repcache_entity_t *ep;
+ int result;
+
+ if (rpr->rpr_changeid == INVALID_CHANGEID)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+
+ if (ep == NULL)
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+
+ if (ep->re_changeid == rpr->rpr_changeid) {
+ result = REP_PROTOCOL_DONE;
+ } else {
+ result = rc_node_update(&ep->re_node);
+ if (result == REP_PROTOCOL_DONE)
+ ep->re_changeid = rpr->rpr_changeid;
+ }
+
+ entity_release(ep);
+
+ return (result);
+}
+
+static int
+entity_reset(repcache_client_t *cp, struct rep_protocol_entity_reset *rpr)
+{
+ repcache_entity_t *ep;
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+ if (ep == NULL)
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+
+ rc_node_clear(&ep->re_node, 0);
+ ep->re_txstate = REPCACHE_TX_INIT;
+
+ entity_release(ep);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _BAD_REQUEST - request has invalid changeid
+ * rpr_name is invalid
+ * cannot create children for parent's type of node
+ * _DUPLICATE_ID - request has duplicate ids
+ * _UNKNOWN_ID - request has unknown id
+ * _DELETED - parent has been deleted
+ * _NOT_SET - parent is reset
+ * _NOT_APPLICABLE - rpr_childtype is _PROPERTYGRP
+ * _INVALID_TYPE - parent is corrupt or rpr_childtype is invalid
+ * _TYPE_MISMATCH - parent cannot have children of type rpr_childtype
+ * _NO_RESOURCES
+ * _PERMISSION_DENIED
+ * _BACKEND_ACCESS
+ * _BACKEND_READONLY
+ * _EXISTS - child already exists
+ * _NOT_FOUND - could not allocate new id
+ */
+static int
+entity_create_child(repcache_client_t *cp,
+ struct rep_protocol_entity_create_child *rpr)
+{
+ repcache_entity_t *parent;
+ repcache_entity_t *child;
+
+ uint32_t parentid = rpr->rpr_entityid;
+ uint32_t childid = rpr->rpr_childid;
+
+ int result;
+
+ if (rpr->rpr_changeid == INVALID_CHANGEID)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ result = entity_find2(cp, parentid, &parent, childid, &child);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ rpr->rpr_name[sizeof (rpr->rpr_name) - 1] = 0;
+
+ if (child->re_changeid == rpr->rpr_changeid) {
+ result = REP_PROTOCOL_SUCCESS;
+ } else {
+ result = rc_node_create_child(&parent->re_node,
+ rpr->rpr_childtype, rpr->rpr_name, &child->re_node);
+ if (result == REP_PROTOCOL_SUCCESS)
+ child->re_changeid = rpr->rpr_changeid;
+ }
+
+ entity_release(parent);
+ entity_release(child);
+
+ return (result);
+}
+
+static int
+entity_create_pg(repcache_client_t *cp,
+ struct rep_protocol_entity_create_pg *rpr)
+{
+ repcache_entity_t *parent;
+ repcache_entity_t *child;
+
+ uint32_t parentid = rpr->rpr_entityid;
+ uint32_t childid = rpr->rpr_childid;
+
+ int result;
+
+ if (rpr->rpr_changeid == INVALID_CHANGEID)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ result = entity_find2(cp, parentid, &parent, childid, &child);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ rpr->rpr_name[sizeof (rpr->rpr_name) - 1] = 0;
+ rpr->rpr_type[sizeof (rpr->rpr_type) - 1] = 0;
+
+ if (child->re_changeid == rpr->rpr_changeid) {
+ result = REP_PROTOCOL_SUCCESS;
+ } else {
+ result = rc_node_create_child_pg(&parent->re_node,
+ child->re_type, rpr->rpr_name, rpr->rpr_type,
+ rpr->rpr_flags, &child->re_node);
+ if (result == REP_PROTOCOL_SUCCESS)
+ child->re_changeid = rpr->rpr_changeid;
+ }
+
+ entity_release(parent);
+ entity_release(child);
+
+ return (result);
+}
+
+static int
+entity_delete(repcache_client_t *cp,
+ struct rep_protocol_entity_delete *rpr)
+{
+ repcache_entity_t *entity;
+
+ uint32_t entityid = rpr->rpr_entityid;
+
+ int result;
+
+ if (rpr->rpr_changeid == INVALID_CHANGEID)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ entity = entity_find(cp, entityid);
+
+ if (entity == NULL)
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+
+ if (entity->re_changeid == rpr->rpr_changeid) {
+ result = REP_PROTOCOL_SUCCESS;
+ } else {
+ result = rc_node_delete(&entity->re_node);
+ if (result == REP_PROTOCOL_SUCCESS)
+ entity->re_changeid = rpr->rpr_changeid;
+ }
+
+ entity_release(entity);
+
+ return (result);
+}
+
+static rep_protocol_responseid_t
+entity_teardown(repcache_client_t *cp, struct rep_protocol_entity_teardown *rpr)
+{
+ entity_remove(cp, rpr->rpr_entityid);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _MISORDERED - the iterator exists and is not reset
+ * _NO_RESOURCES - out of memory
+ */
+static int
+iter_setup(repcache_client_t *cp, struct rep_protocol_iter_request *rpr)
+{
+ repcache_iter_t *iter;
+ uint32_t sequence;
+
+ client_start_insert(cp);
+ /*
+ * If the iter already exists, and hasn't been read from,
+ * we assume the previous call succeeded.
+ */
+ if ((iter = iter_find(cp, rpr->rpr_iterid)) != NULL) {
+ sequence = iter->ri_sequence;
+ iter_release(iter);
+
+ client_end_insert(cp);
+
+ if (sequence != 0)
+ return (REP_PROTOCOL_FAIL_MISORDERED);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ iter = iter_alloc(cp);
+ if (iter == NULL) {
+ client_end_insert(cp);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ iter->ri_id = rpr->rpr_iterid;
+ iter->ri_type = REP_PROTOCOL_TYPE_INVALID;
+ iter->ri_sequence = 0;
+ iter_add(cp, iter);
+
+ client_end_insert(cp);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _UNKNOWN_ID
+ * _MISORDERED - iterator has already been started
+ * _NOT_SET
+ * _DELETED
+ * _TYPE_MISMATCH - entity cannot have type children
+ * _BAD_REQUEST - rpr_flags is invalid
+ * rpr_pattern is invalid
+ * _NO_RESOURCES
+ * _INVALID_TYPE
+ * _BACKEND_ACCESS
+ */
+static int
+iter_start(repcache_client_t *cp, struct rep_protocol_iter_start *rpr)
+{
+ int result;
+ repcache_iter_t *iter;
+ repcache_entity_t *ep;
+
+ result = iter_find_w_entity(cp, rpr->rpr_iterid, &iter,
+ rpr->rpr_entity, &ep);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+
+ if (iter->ri_sequence > 1) {
+ result = REP_PROTOCOL_FAIL_MISORDERED;
+ goto end;
+ }
+
+ if (iter->ri_sequence == 1) {
+ result = REP_PROTOCOL_SUCCESS;
+ goto end;
+ }
+
+ rpr->rpr_pattern[sizeof (rpr->rpr_pattern) - 1] = 0;
+
+ result = rc_node_setup_iter(&ep->re_node, &iter->ri_iter,
+ rpr->rpr_itertype, rpr->rpr_flags, rpr->rpr_pattern);
+
+ if (result == REP_PROTOCOL_SUCCESS)
+ iter->ri_sequence++;
+
+end:
+ iter_release(iter);
+ entity_release(ep);
+ return (result);
+}
+
+/*
+ * Returns
+ * _UNKNOWN_ID
+ * _NOT_SET - iter has not been started
+ * _MISORDERED
+ * _BAD_REQUEST - iter walks values
+ * _TYPE_MISMATCH - iter does not walk type entities
+ * _DELETED - parent was deleted
+ * _NO_RESOURCES
+ * _INVALID_TYPE - type is invalid
+ * _DONE
+ * _SUCCESS
+ *
+ * For composed property group iterators, can also return
+ * _TYPE_MISMATCH - parent cannot have type children
+ * _BACKEND_ACCESS
+ */
+static rep_protocol_responseid_t
+iter_read(repcache_client_t *cp, struct rep_protocol_iter_read *rpr)
+{
+ rep_protocol_responseid_t result;
+ repcache_iter_t *iter;
+ repcache_entity_t *ep;
+ uint32_t sequence;
+
+ result = iter_find_w_entity(cp, rpr->rpr_iterid, &iter,
+ rpr->rpr_entityid, &ep);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ sequence = rpr->rpr_sequence;
+
+ if (iter->ri_sequence == 0) {
+ iter_release(iter);
+ entity_release(ep);
+ return (REP_PROTOCOL_FAIL_NOT_SET);
+ }
+
+ if (sequence == 1) {
+ iter_release(iter);
+ entity_release(ep);
+ return (REP_PROTOCOL_FAIL_MISORDERED);
+ }
+
+ if (sequence == iter->ri_sequence) {
+ iter_release(iter);
+ entity_release(ep);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ if (sequence == iter->ri_sequence + 1) {
+ result = rc_iter_next(iter->ri_iter, &ep->re_node,
+ ep->re_type);
+
+ if (result == REP_PROTOCOL_SUCCESS)
+ iter->ri_sequence++;
+
+ iter_release(iter);
+ entity_release(ep);
+
+ return (result);
+ }
+
+ iter_release(iter);
+ entity_release(ep);
+ return (REP_PROTOCOL_FAIL_MISORDERED);
+}
+
+/*ARGSUSED*/
+static void
+iter_read_value(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ const struct rep_protocol_iter_read_value *rpr = in;
+ struct rep_protocol_value_response *out = out_arg;
+ rep_protocol_responseid_t result;
+
+ repcache_iter_t *iter;
+ uint32_t sequence;
+ int repeat;
+
+ assert(*outsz == sizeof (*out));
+
+ iter = iter_find(cp, rpr->rpr_iterid);
+
+ if (iter == NULL) {
+ result = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ goto out;
+ }
+
+ sequence = rpr->rpr_sequence;
+
+ if (iter->ri_sequence == 0) {
+ iter_release(iter);
+ result = REP_PROTOCOL_FAIL_NOT_SET;
+ goto out;
+ }
+
+ repeat = (sequence == iter->ri_sequence);
+
+ if (sequence == 1 || (!repeat && sequence != iter->ri_sequence + 1)) {
+ iter_release(iter);
+ result = REP_PROTOCOL_FAIL_MISORDERED;
+ goto out;
+ }
+
+ result = rc_iter_next_value(iter->ri_iter, out, outsz, repeat);
+
+ if (!repeat && result == REP_PROTOCOL_SUCCESS)
+ iter->ri_sequence++;
+
+ iter_release(iter);
+
+out:
+ /*
+ * If we fail, we only return the response code.
+ * If we succeed, rc_iter_next_value has shortened *outsz
+ * to only include the value bytes needed.
+ */
+ if (result != REP_PROTOCOL_SUCCESS && result != REP_PROTOCOL_DONE)
+ *outsz = sizeof (out->rpr_response);
+
+ out->rpr_response = result;
+}
+
+static int
+iter_reset(repcache_client_t *cp, struct rep_protocol_iter_request *rpr)
+{
+ repcache_iter_t *iter = iter_find(cp, rpr->rpr_iterid);
+
+ if (iter == NULL)
+ return (REP_PROTOCOL_FAIL_UNKNOWN_ID);
+
+ if (iter->ri_sequence != 0) {
+ iter->ri_sequence = 0;
+ rc_iter_destroy(&iter->ri_iter);
+ }
+ iter_release(iter);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static rep_protocol_responseid_t
+iter_teardown(repcache_client_t *cp, struct rep_protocol_iter_request *rpr)
+{
+ iter_remove(cp, rpr->rpr_iterid);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static rep_protocol_responseid_t
+tx_start(repcache_client_t *cp, struct rep_protocol_transaction_start *rpr)
+{
+ repcache_entity_t *tx;
+ repcache_entity_t *ep;
+ rep_protocol_responseid_t result;
+
+ uint32_t txid = rpr->rpr_entityid_tx;
+ uint32_t epid = rpr->rpr_entityid;
+
+ result = entity_find2(cp, txid, &tx, epid, &ep);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ if (tx->re_txstate == REPCACHE_TX_SETUP) {
+ result = REP_PROTOCOL_SUCCESS;
+ goto end;
+ }
+ if (tx->re_txstate != REPCACHE_TX_INIT) {
+ result = REP_PROTOCOL_FAIL_MISORDERED;
+ goto end;
+ }
+
+ result = rc_node_setup_tx(&ep->re_node, &tx->re_node);
+
+end:
+ if (result == REP_PROTOCOL_SUCCESS)
+ tx->re_txstate = REPCACHE_TX_SETUP;
+ else
+ rc_node_clear(&tx->re_node, 0);
+
+ entity_release(ep);
+ entity_release(tx);
+ return (result);
+}
+
+/*ARGSUSED*/
+static void
+tx_commit(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ struct rep_protocol_response *out = out_arg;
+ const struct rep_protocol_transaction_commit *rpr = in;
+ repcache_entity_t *tx;
+
+ assert(*outsz == sizeof (*out));
+ assert(insz >= REP_PROTOCOL_TRANSACTION_COMMIT_MIN_SIZE);
+
+ if (rpr->rpr_size != insz) {
+ out->rpr_response = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ return;
+ }
+
+ tx = entity_find(cp, rpr->rpr_entityid);
+
+ if (tx == NULL) {
+ out->rpr_response = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ return;
+ }
+
+ switch (tx->re_txstate) {
+ case REPCACHE_TX_INIT:
+ out->rpr_response = REP_PROTOCOL_FAIL_MISORDERED;
+ break;
+
+ case REPCACHE_TX_SETUP:
+ out->rpr_response = rc_tx_commit(&tx->re_node, rpr->rpr_cmd,
+ insz - REP_PROTOCOL_TRANSACTION_COMMIT_MIN_SIZE);
+
+ if (out->rpr_response == REP_PROTOCOL_SUCCESS) {
+ tx->re_txstate = REPCACHE_TX_COMMITTED;
+ rc_node_clear(&tx->re_node, 0);
+ }
+
+ break;
+ case REPCACHE_TX_COMMITTED:
+ out->rpr_response = REP_PROTOCOL_SUCCESS;
+ break;
+ default:
+ assert(0); /* CAN'T HAPPEN */
+ break;
+ }
+
+ entity_release(tx);
+}
+
+static rep_protocol_responseid_t
+next_snaplevel(repcache_client_t *cp, struct rep_protocol_entity_pair *rpr)
+{
+ repcache_entity_t *src;
+ repcache_entity_t *dest;
+
+ uint32_t srcid = rpr->rpr_entity_src;
+ uint32_t destid = rpr->rpr_entity_dst;
+
+ int result;
+
+ result = entity_find2(cp, srcid, &src, destid, &dest);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ result = rc_node_next_snaplevel(&src->re_node, &dest->re_node);
+
+ entity_release(src);
+ entity_release(dest);
+
+ return (result);
+}
+
+static rep_protocol_responseid_t
+snapshot_take(repcache_client_t *cp, struct rep_protocol_snapshot_take *rpr)
+{
+ repcache_entity_t *src;
+ uint32_t srcid = rpr->rpr_entityid_src;
+ repcache_entity_t *dest;
+ uint32_t destid = rpr->rpr_entityid_dest;
+
+ int result;
+
+ result = entity_find2(cp, srcid, &src, destid, &dest);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ if (dest->re_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ result = REP_PROTOCOL_FAIL_TYPE_MISMATCH;
+ } else {
+ rpr->rpr_name[sizeof (rpr->rpr_name) - 1] = 0;
+
+ if (rpr->rpr_flags == REP_SNAPSHOT_NEW)
+ result = rc_snapshot_take_new(&src->re_node, NULL,
+ NULL, rpr->rpr_name, &dest->re_node);
+ else if (rpr->rpr_flags == REP_SNAPSHOT_ATTACH &&
+ rpr->rpr_name[0] == 0)
+ result = rc_snapshot_take_attach(&src->re_node,
+ &dest->re_node);
+ else
+ result = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ }
+ entity_release(src);
+ entity_release(dest);
+
+ return (result);
+}
+
+static rep_protocol_responseid_t
+snapshot_take_named(repcache_client_t *cp,
+ struct rep_protocol_snapshot_take_named *rpr)
+{
+ repcache_entity_t *src;
+ uint32_t srcid = rpr->rpr_entityid_src;
+ repcache_entity_t *dest;
+ uint32_t destid = rpr->rpr_entityid_dest;
+
+ int result;
+
+ result = entity_find2(cp, srcid, &src, destid, &dest);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ if (dest->re_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ result = REP_PROTOCOL_FAIL_TYPE_MISMATCH;
+ } else {
+ rpr->rpr_svcname[sizeof (rpr->rpr_svcname) - 1] = 0;
+ rpr->rpr_instname[sizeof (rpr->rpr_instname) - 1] = 0;
+ rpr->rpr_name[sizeof (rpr->rpr_name) - 1] = 0;
+
+ result = rc_snapshot_take_new(&src->re_node, rpr->rpr_svcname,
+ rpr->rpr_instname, rpr->rpr_name, &dest->re_node);
+ }
+ entity_release(src);
+ entity_release(dest);
+
+ return (result);
+}
+
+static rep_protocol_responseid_t
+snapshot_attach(repcache_client_t *cp, struct rep_protocol_snapshot_attach *rpr)
+{
+ repcache_entity_t *src;
+ uint32_t srcid = rpr->rpr_entityid_src;
+ repcache_entity_t *dest;
+ uint32_t destid = rpr->rpr_entityid_dest;
+
+ int result;
+
+ result = entity_find2(cp, srcid, &src, destid, &dest);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ result = rc_snapshot_attach(&src->re_node, &dest->re_node);
+
+ entity_release(src);
+ entity_release(dest);
+
+ return (result);
+}
+
+/*ARGSUSED*/
+static void
+property_get_type(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ const struct rep_protocol_property_request *rpr = in;
+ struct rep_protocol_integer_response *out = out_arg;
+ repcache_entity_t *ep;
+ rep_protocol_value_type_t t = 0;
+
+ assert(*outsz == sizeof (*out));
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+
+ if (ep == NULL) {
+ out->rpr_response = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ *outsz = sizeof (out->rpr_response);
+ return;
+ }
+
+ out->rpr_response = rc_node_get_property_type(&ep->re_node, &t);
+
+ entity_release(ep);
+
+ if (out->rpr_response != REP_PROTOCOL_SUCCESS)
+ *outsz = sizeof (out->rpr_response);
+ else
+ out->rpr_value = t;
+}
+
+/*
+ * Fails with:
+ * _UNKNOWN_ID - an id does not designate an active register
+ * _NOT_SET - The property is not set
+ * _DELETED - The property has been deleted
+ * _TYPE_MISMATCH - The object is not a property
+ * _NOT_FOUND - The property has no values.
+ *
+ * Succeeds with:
+ * _SUCCESS - The property has 1 value.
+ * _TRUNCATED - The property has >1 value.
+ */
+/*ARGSUSED*/
+static void
+property_get_value(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ const struct rep_protocol_property_request *rpr = in;
+ struct rep_protocol_value_response *out = out_arg;
+ repcache_entity_t *ep;
+
+ assert(*outsz == sizeof (*out));
+
+ ep = entity_find(cp, rpr->rpr_entityid);
+ if (ep == NULL) {
+ out->rpr_response = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ *outsz = sizeof (out->rpr_response);
+ return;
+ }
+
+ out->rpr_response = rc_node_get_property_value(&ep->re_node, out,
+ outsz);
+
+ entity_release(ep);
+
+ /*
+ * If we fail, we only return the response code.
+ * If we succeed, rc_node_get_property_value has shortened *outsz
+ * to only include the value bytes needed.
+ */
+ if (out->rpr_response != REP_PROTOCOL_SUCCESS &&
+ out->rpr_response != REP_PROTOCOL_FAIL_TRUNCATED)
+ *outsz = sizeof (out->rpr_response);
+}
+
+static rep_protocol_responseid_t
+propertygrp_notify(repcache_client_t *cp,
+ struct rep_protocol_propertygrp_request *rpr, int *out_fd)
+{
+ int fds[2];
+ int ours, theirs;
+
+ rep_protocol_responseid_t result;
+ repcache_entity_t *ep;
+
+ if (pipe(fds) < 0)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ ours = fds[0];
+ theirs = fds[1];
+
+ if ((ep = entity_find(cp, rpr->rpr_entityid)) == NULL) {
+ result = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ goto fail;
+ }
+
+ /*
+ * While the following can race with other threads setting up a
+ * notification, the worst that can happen is that our fd has
+ * already been closed before we return.
+ */
+ result = rc_pg_notify_setup(&cp->rc_pg_notify, &ep->re_node,
+ ours);
+
+ entity_release(ep);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ *out_fd = theirs;
+ return (REP_PROTOCOL_SUCCESS);
+
+fail:
+ (void) close(ours);
+ (void) close(theirs);
+
+ return (result);
+}
+
+static rep_protocol_responseid_t
+client_add_notify(repcache_client_t *cp,
+ struct rep_protocol_notify_request *rpr)
+{
+ rpr->rpr_pattern[sizeof (rpr->rpr_pattern) - 1] = 0;
+
+ switch (rpr->rpr_type) {
+ case REP_PROTOCOL_NOTIFY_PGNAME:
+ return (rc_notify_info_add_name(&cp->rc_notify_info,
+ rpr->rpr_pattern));
+
+ case REP_PROTOCOL_NOTIFY_PGTYPE:
+ return (rc_notify_info_add_type(&cp->rc_notify_info,
+ rpr->rpr_pattern));
+
+ default:
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+}
+
+/*ARGSUSED*/
+static void
+client_wait(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ int result;
+ repcache_entity_t *ep;
+ const struct rep_protocol_wait_request *rpr = in;
+ struct rep_protocol_fmri_response *out = out_arg;
+
+ assert(*outsz == sizeof (*out));
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ if (cp->rc_notify_thr != 0) {
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+ out->rpr_response = REP_PROTOCOL_FAIL_EXISTS;
+ *outsz = sizeof (out->rpr_response);
+ return;
+ }
+ cp->rc_notify_thr = pthread_self();
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ result = rc_notify_info_wait(&cp->rc_notify_info, &cp->rc_notify_ptr,
+ out->rpr_fmri, sizeof (out->rpr_fmri));
+
+ if (result == REP_PROTOCOL_SUCCESS) {
+ if ((ep = entity_find(cp, rpr->rpr_entityid)) != NULL) {
+ if (ep->re_type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ rc_node_ptr_assign(&ep->re_node,
+ &cp->rc_notify_ptr);
+ } else {
+ result = REP_PROTOCOL_FAIL_TYPE_MISMATCH;
+ }
+ entity_release(ep);
+ } else {
+ result = REP_PROTOCOL_FAIL_UNKNOWN_ID;
+ }
+ rc_node_clear(&cp->rc_notify_ptr, 0);
+ }
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ assert(cp->rc_notify_thr == pthread_self());
+ cp->rc_notify_thr = 0;
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ out->rpr_response = result;
+ if (result != REP_PROTOCOL_SUCCESS)
+ *outsz = sizeof (out->rpr_response);
+}
+
+/*
+ * Can return:
+ * _PERMISSION_DENIED not enough privileges to do request.
+ * _BAD_REQUEST name is not valid or reserved
+ * _TRUNCATED name is too long for current repository path
+ * _UNKNOWN failed for unknown reason (details written to
+ * console)
+ * _BACKEND_READONLY backend is not writable
+ *
+ * _SUCCESS Backup completed successfully.
+ */
+static rep_protocol_responseid_t
+backup_repository(repcache_client_t *cp,
+ struct rep_protocol_backup_request *rpr)
+{
+ rep_protocol_responseid_t result;
+ ucred_t *uc = get_ucred();
+
+ if (!client_is_privileged() && (uc == NULL || ucred_geteuid(uc) != 0))
+ return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
+
+ rpr->rpr_name[REP_PROTOCOL_NAME_LEN - 1] = 0;
+ if (strcmp(rpr->rpr_name, REPOSITORY_BOOT_BACKUP) == 0)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ (void) pthread_mutex_lock(&cp->rc_lock);
+ if (rpr->rpr_changeid != cp->rc_changeid) {
+ result = backend_create_backup(rpr->rpr_name);
+ if (result == REP_PROTOCOL_SUCCESS)
+ cp->rc_changeid = rpr->rpr_changeid;
+ } else {
+ result = REP_PROTOCOL_SUCCESS;
+ }
+ (void) pthread_mutex_unlock(&cp->rc_lock);
+
+ return (result);
+}
+
+
+typedef rep_protocol_responseid_t protocol_simple_f(repcache_client_t *cp,
+ const void *rpr);
+
+/*ARGSUSED*/
+static void
+simple_handler(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg)
+{
+ protocol_simple_f *f = (protocol_simple_f *)arg;
+ rep_protocol_response_t *out = out_arg;
+
+ assert(*outsz == sizeof (*out));
+ assert(f != NULL);
+
+ out->rpr_response = (*f)(cp, in);
+}
+
+typedef rep_protocol_responseid_t protocol_simple_fd_f(repcache_client_t *cp,
+ const void *rpr, int *out_fd);
+
+/*ARGSUSED*/
+static void
+simple_fd_handler(repcache_client_t *cp, const void *in, size_t insz,
+ void *out_arg, size_t *outsz, void *arg, int *out_fd)
+{
+ protocol_simple_fd_f *f = (protocol_simple_fd_f *)arg;
+ rep_protocol_response_t *out = out_arg;
+
+ assert(*outsz == sizeof (*out));
+ assert(f != NULL);
+
+ out->rpr_response = (*f)(cp, in, out_fd);
+}
+
+typedef void protocol_handler_f(repcache_client_t *, const void *in,
+ size_t insz, void *out, size_t *outsz, void *arg);
+
+typedef void protocol_handler_fdret_f(repcache_client_t *, const void *in,
+ size_t insz, void *out, size_t *outsz, void *arg, int *fd_out);
+
+#define PROTO(p, f, in) { \
+ p, #p, simple_handler, (void *)(&f), NULL, \
+ sizeof (in), sizeof (rep_protocol_response_t), 0 \
+ }
+
+#define PROTO_FD_OUT(p, f, in) { \
+ p, #p, NULL, (void *)(&f), simple_fd_handler, \
+ sizeof (in), \
+ sizeof (rep_protocol_response_t), \
+ PROTO_FLAG_RETFD \
+ }
+
+#define PROTO_VARIN(p, f, insz) { \
+ p, #p, &(f), NULL, NULL, \
+ insz, sizeof (rep_protocol_response_t), \
+ PROTO_FLAG_VARINPUT \
+ }
+
+#define PROTO_UINT_OUT(p, f, in) { \
+ p, #p, &(f), NULL, NULL, \
+ sizeof (in), \
+ sizeof (struct rep_protocol_integer_response), 0 \
+ }
+
+#define PROTO_NAME_OUT(p, f, in) { \
+ p, #p, &(f), NULL, NULL, \
+ sizeof (in), \
+ sizeof (struct rep_protocol_name_response), 0 \
+ }
+
+#define PROTO_FMRI_OUT(p, f, in) { \
+ p, #p, &(f), NULL, NULL, \
+ sizeof (in), \
+ sizeof (struct rep_protocol_fmri_response), 0 \
+ }
+
+#define PROTO_VALUE_OUT(p, f, in) { \
+ p, #p, &(f), NULL, NULL, \
+ sizeof (in), \
+ sizeof (struct rep_protocol_value_response), 0 \
+ }
+
+#define PROTO_PANIC(p) { p, #p, NULL, NULL, NULL, 0, 0, PROTO_FLAG_PANIC }
+#define PROTO_END() { 0, NULL, NULL, NULL, NULL, 0, 0, PROTO_FLAG_PANIC }
+
+#define PROTO_FLAG_PANIC 0x00000001 /* should never be called */
+#define PROTO_FLAG_VARINPUT 0x00000004 /* in_size is minimum size */
+#define PROTO_FLAG_RETFD 0x00000008 /* can also return an FD */
+
+#define PROTO_ALL_FLAGS 0x0000000f /* all flags */
+
+static struct protocol_entry {
+ enum rep_protocol_requestid pt_request;
+ const char *pt_name;
+ protocol_handler_f *pt_handler;
+ void *pt_arg;
+ protocol_handler_fdret_f *pt_fd_handler;
+ size_t pt_in_size;
+ size_t pt_out_max;
+ uint32_t pt_flags;
+} protocol_table[] = {
+ PROTO_PANIC(REP_PROTOCOL_CLOSE), /* special case */
+
+ PROTO(REP_PROTOCOL_ENTITY_SETUP, entity_setup,
+ struct rep_protocol_entity_setup),
+ PROTO_NAME_OUT(REP_PROTOCOL_ENTITY_NAME, entity_name,
+ struct rep_protocol_entity_name),
+ PROTO_UINT_OUT(REP_PROTOCOL_ENTITY_PARENT_TYPE, entity_parent_type,
+ struct rep_protocol_entity_parent_type),
+ PROTO(REP_PROTOCOL_ENTITY_GET_CHILD, entity_get_child,
+ struct rep_protocol_entity_get_child),
+ PROTO(REP_PROTOCOL_ENTITY_GET_PARENT, entity_get_parent,
+ struct rep_protocol_entity_parent),
+ PROTO(REP_PROTOCOL_ENTITY_GET, entity_get,
+ struct rep_protocol_entity_get),
+ PROTO(REP_PROTOCOL_ENTITY_UPDATE, entity_update,
+ struct rep_protocol_entity_update),
+ PROTO(REP_PROTOCOL_ENTITY_CREATE_CHILD, entity_create_child,
+ struct rep_protocol_entity_create_child),
+ PROTO(REP_PROTOCOL_ENTITY_CREATE_PG, entity_create_pg,
+ struct rep_protocol_entity_create_pg),
+ PROTO(REP_PROTOCOL_ENTITY_DELETE, entity_delete,
+ struct rep_protocol_entity_delete),
+ PROTO(REP_PROTOCOL_ENTITY_RESET, entity_reset,
+ struct rep_protocol_entity_reset),
+ PROTO(REP_PROTOCOL_ENTITY_TEARDOWN, entity_teardown,
+ struct rep_protocol_entity_teardown),
+
+ PROTO(REP_PROTOCOL_ITER_SETUP, iter_setup,
+ struct rep_protocol_iter_request),
+ PROTO(REP_PROTOCOL_ITER_START, iter_start,
+ struct rep_protocol_iter_start),
+ PROTO(REP_PROTOCOL_ITER_READ, iter_read,
+ struct rep_protocol_iter_read),
+ PROTO_VALUE_OUT(REP_PROTOCOL_ITER_READ_VALUE, iter_read_value,
+ struct rep_protocol_iter_read_value),
+ PROTO(REP_PROTOCOL_ITER_RESET, iter_reset,
+ struct rep_protocol_iter_request),
+ PROTO(REP_PROTOCOL_ITER_TEARDOWN, iter_teardown,
+ struct rep_protocol_iter_request),
+
+ PROTO(REP_PROTOCOL_NEXT_SNAPLEVEL, next_snaplevel,
+ struct rep_protocol_entity_pair),
+
+ PROTO(REP_PROTOCOL_SNAPSHOT_TAKE, snapshot_take,
+ struct rep_protocol_snapshot_take),
+ PROTO(REP_PROTOCOL_SNAPSHOT_TAKE_NAMED, snapshot_take_named,
+ struct rep_protocol_snapshot_take_named),
+ PROTO(REP_PROTOCOL_SNAPSHOT_ATTACH, snapshot_attach,
+ struct rep_protocol_snapshot_attach),
+
+ PROTO_UINT_OUT(REP_PROTOCOL_PROPERTY_GET_TYPE, property_get_type,
+ struct rep_protocol_property_request),
+ PROTO_VALUE_OUT(REP_PROTOCOL_PROPERTY_GET_VALUE, property_get_value,
+ struct rep_protocol_property_request),
+
+ PROTO_FD_OUT(REP_PROTOCOL_PROPERTYGRP_SETUP_WAIT, propertygrp_notify,
+ struct rep_protocol_propertygrp_request),
+ PROTO(REP_PROTOCOL_PROPERTYGRP_TX_START, tx_start,
+ struct rep_protocol_transaction_start),
+ PROTO_VARIN(REP_PROTOCOL_PROPERTYGRP_TX_COMMIT, tx_commit,
+ REP_PROTOCOL_TRANSACTION_COMMIT_MIN_SIZE),
+
+ PROTO(REP_PROTOCOL_CLIENT_ADD_NOTIFY, client_add_notify,
+ struct rep_protocol_notify_request),
+ PROTO_FMRI_OUT(REP_PROTOCOL_CLIENT_WAIT, client_wait,
+ struct rep_protocol_wait_request),
+
+ PROTO(REP_PROTOCOL_BACKUP, backup_repository,
+ struct rep_protocol_backup_request),
+
+ PROTO_END()
+};
+#undef PROTO
+#undef PROTO_FMRI_OUT
+#undef PROTO_NAME_OUT
+#undef PROTO_UINT_OUT
+#undef PROTO_PANIC
+#undef PROTO_END
+
+/*
+ * The number of entries, sans PROTO_END()
+ */
+#define PROTOCOL_ENTRIES \
+ (sizeof (protocol_table) / sizeof (*protocol_table) - 1)
+
+#define PROTOCOL_PREFIX "REP_PROTOCOL_"
+
+int
+client_init(void)
+{
+ int i;
+ struct protocol_entry *e;
+
+ if (!client_hash_init())
+ return (0);
+
+ if (request_log_size > 0) {
+ request_log = uu_zalloc(request_log_size *
+ sizeof (request_log_entry_t));
+ }
+
+ /*
+ * update the names to not include REP_PROTOCOL_
+ */
+ for (i = 0; i < PROTOCOL_ENTRIES; i++) {
+ e = &protocol_table[i];
+ assert(strncmp(e->pt_name, PROTOCOL_PREFIX,
+ strlen(PROTOCOL_PREFIX)) == 0);
+ e->pt_name += strlen(PROTOCOL_PREFIX);
+ }
+ /*
+ * verify the protocol table is consistent
+ */
+ for (i = 0; i < PROTOCOL_ENTRIES; i++) {
+ e = &protocol_table[i];
+ assert(e->pt_request == (REP_PROTOCOL_BASE + i));
+
+ assert((e->pt_flags & ~PROTO_ALL_FLAGS) == 0);
+
+ if (e->pt_flags & PROTO_FLAG_PANIC)
+ assert(e->pt_in_size == 0 && e->pt_out_max == 0 &&
+ e->pt_handler == NULL);
+ else
+ assert(e->pt_in_size != 0 && e->pt_out_max != 0 &&
+ (e->pt_handler != NULL ||
+ e->pt_fd_handler != NULL));
+ }
+ assert((REP_PROTOCOL_BASE + i) == REP_PROTOCOL_MAX_REQUEST);
+
+ assert(protocol_table[i].pt_request == 0);
+
+ return (1);
+}
+
+static void
+client_switcher(void *cookie, char *argp, size_t arg_size, door_desc_t *desc_in,
+ uint_t n_desc)
+{
+ thread_info_t *ti = thread_self();
+
+ repcache_client_t *cp;
+ uint32_t id = (uint32_t)cookie;
+ enum rep_protocol_requestid request_code;
+
+ rep_protocol_responseid_t result = INVALID_RESULT;
+
+ struct protocol_entry *e;
+
+ char *retval = NULL;
+ size_t retsize = 0;
+
+ int retfd = -1;
+ door_desc_t desc;
+ request_log_entry_t *rlp;
+
+ rlp = start_log(id);
+
+ if (n_desc != 0)
+ uu_die("can't happen: %d descriptors @%p (cookie %p)",
+ n_desc, desc_in, cookie);
+
+ if (argp == DOOR_UNREF_DATA) {
+ client_destroy(id);
+ goto bad_end;
+ }
+
+ thread_newstate(ti, TI_CLIENT_CALL);
+
+ /*
+ * To simplify returning just a result code, we set up for
+ * that case here.
+ */
+ retval = (char *)&result;
+ retsize = sizeof (result);
+
+ if (arg_size < sizeof (request_code)) {
+ result = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ goto end_unheld;
+ }
+
+ ti->ti_client_request = (void *)argp;
+
+ /* LINTED alignment */
+ request_code = *(uint32_t *)argp;
+
+ if (rlp != NULL) {
+ rlp->rl_request = request_code;
+ }
+ /*
+ * In order to avoid locking problems on removal, we handle the
+ * "close" case before doing a lookup.
+ */
+ if (request_code == REP_PROTOCOL_CLOSE) {
+ client_destroy(id);
+ result = REP_PROTOCOL_SUCCESS;
+ goto end_unheld;
+ }
+
+ cp = client_lookup(id);
+ /*
+ * cp is held
+ */
+
+ if (cp == NULL)
+ goto bad_end;
+
+ if (rlp != NULL)
+ rlp->rl_client = cp;
+
+ ti->ti_active_client = cp;
+
+ if (request_code < REP_PROTOCOL_BASE ||
+ request_code >= REP_PROTOCOL_BASE + PROTOCOL_ENTRIES) {
+ result = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ goto end;
+ }
+
+ e = &protocol_table[request_code - REP_PROTOCOL_BASE];
+
+ assert(!(e->pt_flags & PROTO_FLAG_PANIC));
+
+ if (e->pt_flags & PROTO_FLAG_VARINPUT) {
+ if (arg_size < e->pt_in_size) {
+ result = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ goto end;
+ }
+ } else if (arg_size != e->pt_in_size) {
+ result = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ goto end;
+ }
+
+ if (retsize != e->pt_out_max) {
+ retsize = e->pt_out_max;
+ retval = alloca(retsize);
+ }
+
+ if (e->pt_flags & PROTO_FLAG_RETFD)
+ e->pt_fd_handler(cp, argp, arg_size, retval, &retsize,
+ e->pt_arg, &retfd);
+ else
+ e->pt_handler(cp, argp, arg_size, retval, &retsize, e->pt_arg);
+
+end:
+ ti->ti_active_client = NULL;
+ client_release(cp);
+
+end_unheld:
+ if (rlp != NULL) {
+ /* LINTED alignment */
+ rlp->rl_response = *(uint32_t *)retval;
+ end_log();
+ rlp = NULL;
+ }
+ ti->ti_client_request = NULL;
+ thread_newstate(ti, TI_DOOR_RETURN);
+
+ if (retval == (char *)&result) {
+ assert(result != INVALID_RESULT && retsize == sizeof (result));
+ } else {
+ /* LINTED alignment */
+ result = *(uint32_t *)retval;
+ }
+ if (retfd != -1) {
+ desc.d_attributes = DOOR_DESCRIPTOR | DOOR_RELEASE;
+ desc.d_data.d_desc.d_descriptor = retfd;
+ (void) door_return(retval, retsize, &desc, 1);
+ } else {
+ (void) door_return(retval, retsize, NULL, 0);
+ }
+bad_end:
+ if (rlp != NULL) {
+ rlp->rl_response = -1;
+ end_log();
+ rlp = NULL;
+ }
+ (void) door_return(NULL, 0, NULL, 0);
+}
+
+int
+create_client(pid_t pid, uint32_t debugflags, int privileged, int *out_fd)
+{
+ int fd;
+
+ repcache_client_t *cp;
+
+ struct door_info info;
+
+ int door_flags = DOOR_UNREF | DOOR_REFUSE_DESC;
+#ifdef DOOR_NO_CANCEL
+ door_flags |= DOOR_NO_CANCEL;
+#endif
+
+ cp = client_alloc();
+ if (cp == NULL)
+ return (REPOSITORY_DOOR_FAIL_NO_RESOURCES);
+
+ (void) pthread_mutex_lock(&client_lock);
+ cp->rc_id = ++client_maxid;
+ (void) pthread_mutex_unlock(&client_lock);
+
+ cp->rc_all_auths = privileged;
+ cp->rc_pid = pid;
+ cp->rc_debug = debugflags;
+
+ cp->rc_doorfd = door_create(client_switcher, (void *)cp->rc_id,
+ door_flags);
+
+ if (cp->rc_doorfd < 0) {
+ client_free(cp);
+ return (REPOSITORY_DOOR_FAIL_NO_RESOURCES);
+ }
+#ifdef DOOR_PARAM_DATA_MIN
+ (void) door_setparam(cp->rc_doorfd, DOOR_PARAM_DATA_MIN,
+ sizeof (enum rep_protocol_requestid));
+#endif
+
+ if ((fd = dup(cp->rc_doorfd)) < 0 ||
+ door_info(cp->rc_doorfd, &info) < 0) {
+ if (fd >= 0)
+ (void) close(fd);
+ (void) door_revoke(cp->rc_doorfd);
+ cp->rc_doorfd = -1;
+ client_free(cp);
+ return (REPOSITORY_DOOR_FAIL_NO_RESOURCES);
+ }
+
+ rc_pg_notify_init(&cp->rc_pg_notify);
+ rc_notify_info_init(&cp->rc_notify_info);
+
+ client_insert(cp);
+
+ cp->rc_doorid = info.di_uniquifier;
+ *out_fd = fd;
+
+ return (REPOSITORY_DOOR_SUCCESS);
+}
diff --git a/usr/src/cmd/svc/configd/configd.c b/usr/src/cmd/svc/configd/configd.c
new file mode 100644
index 0000000000..32f6f318c6
--- /dev/null
+++ b/usr/src/cmd/svc/configd/configd.c
@@ -0,0 +1,701 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <door.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <priv.h>
+#include <procfs.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <sys/corectl.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+#include "configd.h"
+
+/*
+ * This file manages the overall startup and shutdown of configd, as well
+ * as managing its door thread pool and per-thread datastructures.
+ *
+ * 1. Per-thread Datastructures
+ * -----------------------------
+ * Each configd thread has an associated thread_info_t which contains its
+ * current state. A pointer is kept to this in TSD, keyed by thread_info_key.
+ * The thread_info_ts for all threads in configd are kept on a single global
+ * list, thread_list. After creation, the state in the thread_info structure
+ * is only modified by the associated thread, so no locking is needed. A TSD
+ * destructor removes the thread_info from the global list and frees it at
+ * pthread_exit() time.
+ *
+ * Threads access their per-thread data using thread_self()
+ *
+ * The thread_list is protected by thread_lock, a leaf lock.
+ *
+ * 2. Door Thread Pool Management
+ * ------------------------------
+ * Whenever door_return(3door) returns from the kernel and there are no
+ * other configd threads waiting for requests, libdoor automatically
+ * invokes a function registered with door_server_create(), to request a new
+ * door server thread. The default function just creates a thread that calls
+ * door_return(3door). Unfortunately, since it can take a while for the new
+ * thread to *get* to door_return(3door), a stream of requests can cause a
+ * large number of threads to be created, even though they aren't all needed.
+ *
+ * In our callback, new_server_needed(), we limit ourself to two new threads
+ * at a time -- this logic is handled in reserve_new_thread(). This keeps
+ * us from creating an absurd number of threads in response to peaking load.
+ */
+static pthread_key_t thread_info_key;
+static pthread_attr_t thread_attr;
+
+static pthread_mutex_t thread_lock = PTHREAD_MUTEX_INITIALIZER;
+int num_started; /* number actually running */
+int num_servers; /* number in-progress or running */
+static uu_list_pool_t *thread_pool;
+uu_list_t *thread_list;
+
+static thread_info_t main_thread_info;
+
+static int finished;
+
+static pid_t privileged_pid = 0;
+static int privileged_psinfo_fd = -1;
+
+static int privileged_user = 0;
+
+static priv_set_t *privileged_privs;
+
+static int log_to_syslog = 0;
+
+int is_main_repository = 1;
+
+int max_repository_backups = 4;
+
+#define CONFIGD_MAX_FDS 262144
+
+/*
+ * Thanks, Mike
+ */
+void
+abort_handler(int sig, siginfo_t *sip, ucontext_t *ucp)
+{
+ struct sigaction act;
+
+ (void) sigemptyset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ act.sa_flags = 0;
+ (void) sigaction(sig, &act, NULL);
+
+ (void) printstack(2);
+
+ if (sip != NULL && SI_FROMUSER(sip))
+ (void) pthread_kill(pthread_self(), sig);
+ (void) sigfillset(&ucp->uc_sigmask);
+ (void) sigdelset(&ucp->uc_sigmask, sig);
+ ucp->uc_flags |= UC_SIGMASK;
+ (void) setcontext(ucp);
+}
+
+/*
+ * Don't want to have more than a couple thread creates outstanding
+ */
+static int
+reserve_new_thread(void)
+{
+ (void) pthread_mutex_lock(&thread_lock);
+ assert(num_started >= 0);
+ if (num_servers > num_started + 1) {
+ (void) pthread_mutex_unlock(&thread_lock);
+ return (0);
+ }
+ ++num_servers;
+ (void) pthread_mutex_unlock(&thread_lock);
+ return (1);
+}
+
+static void
+thread_info_free(thread_info_t *ti)
+{
+ uu_list_node_fini(ti, &ti->ti_node, thread_pool);
+ if (ti->ti_ucred != NULL)
+ uu_free(ti->ti_ucred);
+ uu_free(ti);
+}
+
+static void
+thread_exiting(void *arg)
+{
+ thread_info_t *ti = arg;
+
+ log_enter(&ti->ti_log);
+
+ (void) pthread_mutex_lock(&thread_lock);
+ if (ti != NULL) {
+ num_started--;
+ uu_list_remove(thread_list, ti);
+ }
+ assert(num_servers > 0);
+ --num_servers;
+
+ if (num_servers == 0) {
+ configd_critical("no door server threads\n");
+ abort();
+ }
+ (void) pthread_mutex_unlock(&thread_lock);
+
+ if (ti != NULL && ti != &main_thread_info)
+ thread_info_free(ti);
+}
+
+void
+thread_newstate(thread_info_t *ti, thread_state_t newstate)
+{
+ ti->ti_ucred_read = 0; /* invalidate cached ucred */
+ if (newstate != ti->ti_state) {
+ ti->ti_prev_state = ti->ti_state;
+ ti->ti_state = newstate;
+ ti->ti_lastchange = gethrtime();
+ }
+}
+
+thread_info_t *
+thread_self(void)
+{
+ return (pthread_getspecific(thread_info_key));
+}
+
+ucred_t *
+get_ucred(void)
+{
+ thread_info_t *ti = thread_self();
+ ucred_t **ret = &ti->ti_ucred;
+
+ if (ti->ti_ucred_read)
+ return (*ret); /* cached value */
+
+ if (door_ucred(ret) != 0)
+ return (NULL);
+ ti->ti_ucred_read = 1;
+
+ return (*ret);
+}
+
+int
+ucred_is_privileged(ucred_t *uc)
+{
+ const priv_set_t *ps;
+
+ if ((ps = ucred_getprivset(uc, PRIV_EFFECTIVE)) != NULL) {
+ if (priv_isfullset(ps))
+ return (1); /* process has all privs */
+
+ if (privileged_privs != NULL &&
+ priv_issubset(privileged_privs, ps))
+ return (1); /* process has zone privs */
+ }
+
+ return (0);
+}
+
+static void *
+thread_start(void *arg)
+{
+ thread_info_t *ti = arg;
+
+ (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
+
+ (void) pthread_mutex_lock(&thread_lock);
+ num_started++;
+ (void) uu_list_insert_after(thread_list, uu_list_last(thread_list),
+ ti);
+ (void) pthread_mutex_unlock(&thread_lock);
+ (void) pthread_setspecific(thread_info_key, ti);
+
+ thread_newstate(ti, TI_DOOR_RETURN);
+
+ /*
+ * Start handling door calls
+ */
+ (void) door_return(NULL, 0, NULL, 0);
+ return (arg);
+}
+
+static void
+new_thread_needed(door_info_t *dip)
+{
+ thread_info_t *ti;
+
+ sigset_t new, old;
+
+ assert(dip == NULL);
+
+ if (!reserve_new_thread())
+ return;
+
+ if ((ti = uu_zalloc(sizeof (*ti))) == NULL)
+ goto fail;
+
+ uu_list_node_init(ti, &ti->ti_node, thread_pool);
+ ti->ti_state = TI_CREATED;
+ ti->ti_prev_state = TI_CREATED;
+
+ if ((ti->ti_ucred = uu_zalloc(ucred_size())) == NULL)
+ goto fail;
+
+ (void) sigfillset(&new);
+ (void) pthread_sigmask(SIG_SETMASK, &new, &old);
+ if ((errno = pthread_create(&ti->ti_thread, &thread_attr, thread_start,
+ ti)) != 0) {
+ (void) pthread_sigmask(SIG_SETMASK, &old, NULL);
+ goto fail;
+ }
+
+ (void) pthread_sigmask(SIG_SETMASK, &old, NULL);
+ return;
+
+fail:
+ /*
+ * Since the thread_info structure was never linked onto the
+ * thread list, thread_exiting() can't handle the cleanup.
+ */
+ thread_exiting(NULL);
+ if (ti != NULL)
+ thread_info_free(ti);
+}
+
+int
+create_connection(ucred_t *uc, repository_door_request_t *rp,
+ size_t rp_size, int *out_fd)
+{
+ int flags;
+ int privileged = 0;
+ uint32_t debugflags = 0;
+ psinfo_t info;
+
+ if (privileged_pid != 0) {
+ /*
+ * in privileged pid mode, we only allow connections from
+ * our original parent -- the psinfo read verifies that
+ * it is the same process which we started with.
+ */
+ if (ucred_getpid(uc) != privileged_pid ||
+ read(privileged_psinfo_fd, &info, sizeof (info)) !=
+ sizeof (info))
+ return (REPOSITORY_DOOR_FAIL_PERMISSION_DENIED);
+
+ privileged = 1; /* he gets full privileges */
+ } else if (privileged_user != 0) {
+ /*
+ * in privileged user mode, only one particular user is
+ * allowed to connect to us, and he can do anything.
+ */
+ if (ucred_geteuid(uc) != privileged_user)
+ return (REPOSITORY_DOOR_FAIL_PERMISSION_DENIED);
+
+ privileged = 1;
+ }
+
+ /*
+ * Check that rp, of size rp_size, is large enough to
+ * contain field 'f'. If so, write the value into *out, and return 1.
+ * Otherwise, return 0.
+ */
+#define GET_ARG(rp, rp_size, f, out) \
+ (((rp_size) >= offsetofend(repository_door_request_t, f)) ? \
+ ((*(out) = (rp)->f), 1) : 0)
+
+ if (!GET_ARG(rp, rp_size, rdr_flags, &flags))
+ return (REPOSITORY_DOOR_FAIL_BAD_REQUEST);
+
+#if (REPOSITORY_DOOR_FLAG_ALL != REPOSITORY_DOOR_FLAG_DEBUG)
+#error Need to update flag checks
+#endif
+
+ if (flags & ~REPOSITORY_DOOR_FLAG_ALL)
+ return (REPOSITORY_DOOR_FAIL_BAD_FLAG);
+
+ if (flags & REPOSITORY_DOOR_FLAG_DEBUG)
+ if (!GET_ARG(rp, rp_size, rdr_debug, &debugflags))
+ return (REPOSITORY_DOOR_FAIL_BAD_REQUEST);
+#undef GET_ARG
+
+ return (create_client(ucred_getpid(uc), debugflags, privileged,
+ out_fd));
+}
+
+void
+configd_vcritical(const char *message, va_list args)
+{
+ if (log_to_syslog)
+ vsyslog(LOG_CRIT, message, args);
+ else {
+ flockfile(stderr);
+ (void) fprintf(stderr, "svc.configd: Fatal error: ");
+ (void) vfprintf(stderr, message, args);
+ if (message[0] == 0 || message[strlen(message) - 1] != '\n')
+ (void) fprintf(stderr, "\n");
+ funlockfile(stderr);
+ }
+}
+
+void
+configd_critical(const char *message, ...)
+{
+ va_list args;
+ va_start(args, message);
+ configd_vcritical(message, args);
+ va_end(args);
+}
+
+static void
+usage(const char *prog, int ret)
+{
+ (void) fprintf(stderr,
+ "usage: %s [-np] [-d door_path] [-r repository_path]\n"
+ " [-t nonpersist_repository]\n", prog);
+ exit(ret);
+}
+
+/*ARGSUSED*/
+static void
+handler(int sig, siginfo_t *info, void *data)
+{
+ finished = 1;
+}
+
+static int pipe_fd = -1;
+
+static int
+daemonize_start(void)
+{
+ char data;
+ int status;
+
+ int filedes[2];
+ pid_t pid;
+
+ (void) close(0);
+ (void) dup2(2, 1); /* stderr only */
+
+ if (pipe(filedes) < 0)
+ return (-1);
+
+ if ((pid = fork1()) < 0)
+ return (-1);
+
+ if (pid != 0) {
+ /*
+ * parent
+ */
+ struct sigaction act;
+
+ act.sa_sigaction = SIG_DFL;
+ (void) sigemptyset(&act.sa_mask);
+ act.sa_flags = 0;
+
+ (void) sigaction(SIGPIPE, &act, NULL); /* ignore SIGPIPE */
+
+ (void) close(filedes[1]);
+ if (read(filedes[0], &data, 1) == 1) {
+ /* presume success */
+ _exit(CONFIGD_EXIT_OKAY);
+ }
+
+ status = -1;
+ (void) wait4(pid, &status, 0, NULL);
+ if (WIFEXITED(status))
+ _exit(WEXITSTATUS(status));
+ else
+ _exit(-1);
+ }
+
+ /*
+ * child
+ */
+ pipe_fd = filedes[1];
+ (void) close(filedes[0]);
+
+ /*
+ * generic Unix setup
+ */
+ (void) setsid();
+ (void) umask(0077);
+
+ return (0);
+}
+
+static void
+daemonize_ready(void)
+{
+ char data = '\0';
+
+ /*
+ * wake the parent
+ */
+ (void) write(pipe_fd, &data, 1);
+ (void) close(pipe_fd);
+}
+
+int
+main(int argc, char *argv[])
+{
+ thread_info_t *ti = &main_thread_info;
+
+ char pidpath[sizeof ("/proc/" "/psinfo") + 10];
+
+ struct rlimit fd_new;
+
+ const char *endptr;
+ sigset_t myset;
+ int c;
+ int ret;
+ int fd;
+ const char *dbpath = NULL;
+ const char *npdbpath = NULL;
+ const char *doorpath = REPOSITORY_DOOR_NAME;
+ struct sigaction act;
+
+ int daemonize = 1; /* default to daemonizing */
+ int have_npdb = 1;
+
+ closefrom(3); /* get rid of extraneous fds */
+
+ while ((c = getopt(argc, argv, "Dnpd:r:t:")) != -1) {
+ switch (c) {
+ case 'n':
+ daemonize = 0;
+ break;
+ case 'd':
+ doorpath = optarg;
+ is_main_repository = 0;
+ have_npdb = 0; /* default to no non-persist */
+ break;
+ case 'p':
+ log_to_syslog = 0; /* don't use syslog */
+ is_main_repository = 0;
+
+ /*
+ * If our parent exits while we're opening its /proc
+ * psinfo, we're vulnerable to a pid wrapping. To
+ * protect against that, re-check our ppid after
+ * opening it.
+ */
+ privileged_pid = getppid();
+ (void) snprintf(pidpath, sizeof (pidpath),
+ "/proc/%d/psinfo", privileged_pid);
+ if ((fd = open(pidpath, O_RDONLY)) < 0 ||
+ getppid() != privileged_pid) {
+ (void) fprintf(stderr,
+ "%s: unable to get parent info\n", argv[0]);
+ exit(CONFIGD_EXIT_BAD_ARGS);
+ }
+ privileged_psinfo_fd = fd;
+ break;
+ case 'r':
+ dbpath = optarg;
+ is_main_repository = 0;
+ break;
+ case 't':
+ npdbpath = optarg;
+ is_main_repository = 0;
+ break;
+ default:
+ usage(argv[0], CONFIGD_EXIT_BAD_ARGS);
+ break;
+ }
+ }
+
+ /*
+ * If we're not running as root, allow our euid full access, and
+ * everyone else no access.
+ */
+ if (privileged_pid == 0 && geteuid() != 0) {
+ privileged_user = geteuid();
+ }
+
+ privileged_privs = priv_str_to_set("zone", "", &endptr);
+ if (endptr != NULL && privileged_privs != NULL) {
+ priv_freeset(privileged_privs);
+ privileged_privs = NULL;
+ }
+
+ openlog("svc.configd", LOG_PID | LOG_CONS, LOG_DAEMON);
+ (void) setlogmask(LOG_UPTO(LOG_NOTICE));
+
+ /*
+ * if a non-persist db is specified, always enable it
+ */
+ if (npdbpath)
+ have_npdb = 1;
+
+ if (optind != argc)
+ usage(argv[0], CONFIGD_EXIT_BAD_ARGS);
+
+ if (daemonize) {
+ if (getuid() == 0)
+ (void) chdir("/");
+ if (daemonize_start() < 0) {
+ (void) perror("unable to daemonize");
+ exit(CONFIGD_EXIT_INIT_FAILED);
+ }
+ }
+ if (getuid() == 0)
+ (void) core_set_process_path(CONFIGD_CORE,
+ strlen(CONFIGD_CORE) + 1, getpid());
+
+ /*
+ * this should be enabled once we can drop privileges and still get
+ * a core dump.
+ */
+#if 0
+ /* turn off basic privileges we do not need */
+ (void) priv_set(PRIV_OFF, PRIV_PERMITTED, PRIV_FILE_LINK_ANY,
+ PRIV_PROC_EXEC, PRIV_PROC_FORK, PRIV_PROC_SESSION, NULL);
+#endif
+
+ /* not that we can exec, but to be safe, shut them all off... */
+ (void) priv_set(PRIV_SET, PRIV_INHERITABLE, NULL);
+
+ (void) sigfillset(&act.sa_mask);
+
+ /* signals to ignore */
+ act.sa_sigaction = SIG_IGN;
+ act.sa_flags = 0;
+ (void) sigaction(SIGPIPE, &act, NULL);
+ (void) sigaction(SIGALRM, &act, NULL);
+ (void) sigaction(SIGUSR1, &act, NULL);
+ (void) sigaction(SIGUSR2, &act, NULL);
+ (void) sigaction(SIGPOLL, &act, NULL);
+
+ /* signals to abort on */
+ act.sa_sigaction = (void (*)(int, siginfo_t *, void *))&abort_handler;
+ act.sa_flags = SA_SIGINFO;
+
+ (void) sigaction(SIGABRT, &act, NULL);
+
+ /* signals to handle */
+ act.sa_sigaction = &handler;
+ act.sa_flags = SA_SIGINFO;
+
+ (void) sigaction(SIGHUP, &act, NULL);
+ (void) sigaction(SIGINT, &act, NULL);
+ (void) sigaction(SIGTERM, &act, NULL);
+
+ (void) sigemptyset(&myset);
+ (void) sigaddset(&myset, SIGHUP);
+ (void) sigaddset(&myset, SIGINT);
+ (void) sigaddset(&myset, SIGTERM);
+
+ if ((errno = pthread_attr_init(&thread_attr)) != 0) {
+ (void) perror("initializing");
+ exit(CONFIGD_EXIT_INIT_FAILED);
+ }
+
+ /*
+ * Set the hard and soft limits to CONFIGD_MAX_FDS.
+ */
+ fd_new.rlim_max = fd_new.rlim_cur = CONFIGD_MAX_FDS;
+ (void) setrlimit(RLIMIT_NOFILE, &fd_new);
+
+ if ((ret = backend_init(dbpath, npdbpath, have_npdb)) !=
+ CONFIGD_EXIT_OKAY)
+ exit(ret);
+
+ if (!client_init())
+ exit(CONFIGD_EXIT_INIT_FAILED);
+
+ if (!rc_node_init())
+ exit(CONFIGD_EXIT_INIT_FAILED);
+
+ (void) pthread_attr_setdetachstate(&thread_attr,
+ PTHREAD_CREATE_DETACHED);
+ (void) pthread_attr_setscope(&thread_attr, PTHREAD_SCOPE_SYSTEM);
+
+ if ((errno = pthread_key_create(&thread_info_key,
+ thread_exiting)) != 0) {
+ perror("pthread_key_create");
+ exit(CONFIGD_EXIT_INIT_FAILED);
+ }
+
+ if ((thread_pool = uu_list_pool_create("thread_pool",
+ sizeof (thread_info_t), offsetof(thread_info_t, ti_node),
+ NULL, UU_LIST_POOL_DEBUG)) == NULL) {
+ configd_critical("uu_list_pool_create: %s\n",
+ uu_strerror(uu_error()));
+ exit(CONFIGD_EXIT_INIT_FAILED);
+ }
+
+ if ((thread_list = uu_list_create(thread_pool, NULL, 0)) == NULL) {
+ configd_critical("uu_list_create: %s\n",
+ uu_strerror(uu_error()));
+ exit(CONFIGD_EXIT_INIT_FAILED);
+ }
+
+ (void) memset(ti, '\0', sizeof (*ti));
+ uu_list_node_init(ti, &ti->ti_node, thread_pool);
+ (void) uu_list_insert_before(thread_list, uu_list_first(thread_list),
+ ti);
+
+ ti->ti_thread = pthread_self();
+ ti->ti_state = TI_SIGNAL_WAIT;
+ ti->ti_prev_state = TI_SIGNAL_WAIT;
+
+ (void) pthread_setspecific(thread_info_key, ti);
+
+ (void) door_server_create(new_thread_needed);
+
+ if (!setup_main_door(doorpath)) {
+ configd_critical("Setting up main door failed.\n");
+ exit(CONFIGD_EXIT_DOOR_INIT_FAILED);
+ }
+
+ if (daemonize)
+ daemonize_ready();
+
+ (void) pthread_sigmask(SIG_BLOCK, &myset, NULL);
+ while (!finished) {
+ int sig = sigwait(&myset);
+ if (sig > 0) {
+ break;
+ }
+ }
+
+ backend_fini();
+
+ return (CONFIGD_EXIT_OKAY);
+}
diff --git a/usr/src/cmd/svc/configd/configd.h b/usr/src/cmd/svc/configd/configd.h
new file mode 100644
index 0000000000..f00bb6467b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/configd.h
@@ -0,0 +1,743 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _CONFIGD_H
+#define _CONFIGD_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <door.h>
+#include <pthread.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <libscf.h>
+#include <repcache_protocol.h>
+#include <libuutil.h>
+
+#include <configd_exit.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Lock order:
+ *
+ * client lock
+ * iter locks, in ID order
+ * entity locks, in ID order
+ *
+ * (any iter/entity locks)
+ * backend locks (NORMAL, then NONPERSIST)
+ * rc_node lock
+ * children's rc_node lock
+ * cache bucket lock
+ * rc_node lock[*]
+ *
+ * * only one node may be grabbed while holding a bucket lock
+ *
+ * leaf locks: (no other locks may be aquired while holding one)
+ * rc_pg_notify_lock
+ */
+
+/*
+ * Returns the minimum size for a structure of type 't' such
+ * that it is safe to access field 'f'.
+ */
+#define offsetofend(t, f) (offsetof(t, f) + sizeof (((t *)0)->f))
+
+/*
+ * We want MUTEX_HELD, but we also want pthreads. So we're stuck with this.
+ */
+struct _lwp_mutex_t;
+extern int _mutex_held(struct _lwp_mutex_t *);
+#define MUTEX_HELD(m) _mutex_held((struct _lwp_mutex_t *)(m))
+
+/*
+ * Maximum levels of composition.
+ */
+#define COMPOSITION_DEPTH 2
+
+/*
+ * The default locations of the repository dbs
+ */
+#define REPOSITORY_DB "/etc/svc/repository.db"
+#define NONPERSIST_DB "/etc/svc/volatile/svc_nonpersist.db"
+
+#define CONFIGD_CORE "core.%f.%t.%p"
+
+#ifndef NDEBUG
+#define bad_error(f, e) \
+ uu_warn("%s:%d: %s() returned bad error %d. Aborting.\n", \
+ __FILE__, __LINE__, f, e); \
+ abort()
+#else
+#define bad_error(f, e) abort()
+#endif
+
+typedef enum backend_type {
+ BACKEND_TYPE_NORMAL = 0,
+ BACKEND_TYPE_NONPERSIST,
+ BACKEND_TYPE_TOTAL /* backend use only */
+} backend_type_t;
+
+/*
+ * pre-declare rc_* types
+ */
+typedef struct rc_node rc_node_t;
+typedef struct rc_snapshot rc_snapshot_t;
+typedef struct rc_snaplevel rc_snaplevel_t;
+
+/*
+ * notification layer -- protected by rc_pg_notify_lock
+ */
+typedef struct rc_notify_info rc_notify_info_t;
+typedef struct rc_notify_delete rc_notify_delete_t;
+
+#define RC_NOTIFY_MAX_NAMES 4 /* enough for now */
+
+typedef struct rc_notify {
+ uu_list_node_t rcn_list_node;
+ rc_node_t *rcn_node;
+ rc_notify_info_t *rcn_info;
+ rc_notify_delete_t *rcn_delete;
+} rc_notify_t;
+
+struct rc_notify_delete {
+ rc_notify_t rnd_notify;
+ char rnd_fmri[REP_PROTOCOL_FMRI_LEN];
+};
+
+struct rc_notify_info {
+ uu_list_node_t rni_list_node;
+ rc_notify_t rni_notify;
+ const char *rni_namelist[RC_NOTIFY_MAX_NAMES];
+ const char *rni_typelist[RC_NOTIFY_MAX_NAMES];
+
+ int rni_flags;
+ int rni_waiters;
+ pthread_cond_t rni_cv;
+};
+#define RC_NOTIFY_ACTIVE 0x00000001
+#define RC_NOTIFY_DRAIN 0x00000002
+#define RC_NOTIFY_EMPTYING 0x00000004
+
+typedef struct rc_node_pg_notify {
+ uu_list_node_t rnpn_node;
+ int rnpn_fd;
+ rc_node_t *rnpn_pg;
+} rc_node_pg_notify_t;
+
+/*
+ * cache layer
+ */
+
+/*
+ * The 'key' for the main object hash. main_id is the main object
+ * identifier. The rl_ids array contains:
+ *
+ * TYPE RL_IDS
+ * scope unused
+ * service unused
+ * instance {service_id}
+ * snapshot {service_id, instance_id}
+ * snaplevel {service_id, instance_id, name_id, snapshot_id}
+ * propertygroup {service_id, (instance_id or 0), (name_id or 0),
+ * (snapshot_id or 0), (l_id or 0)}
+ * property {service_id, (instance_id or 0), (name_id or 0),
+ * (snapshot_id or 0), (l_id or 0), pg_id, gen_id}
+ */
+#define ID_SERVICE 0
+#define ID_INSTANCE 1
+#define ID_NAME 2
+#define ID_SNAPSHOT 3
+#define ID_LEVEL 4
+#define ID_PG 5
+#define ID_GEN 6
+#define MAX_IDS 7
+typedef struct rc_node_lookup {
+ uint16_t rl_type; /* REP_PROTOCOL_ENTITY_* */
+ uint16_t rl_backend; /* BACKEND_TYPE_* */
+ uint32_t rl_main_id; /* primary identifier */
+ uint32_t rl_ids[MAX_IDS]; /* context */
+} rc_node_lookup_t;
+
+struct rc_node {
+ /*
+ * read-only data
+ */
+ rc_node_lookup_t rn_id; /* must be first */
+ uint32_t rn_hash;
+ const char *rn_name;
+
+ /*
+ * type-specific state
+ * (if space becomes an issue, these can become a union)
+ */
+
+ /*
+ * Used by instances, snapshots, and "composed property groups" only.
+ * These are the entities whose properties should appear composed when
+ * this entity is traversed by a composed iterator. 0 is the top-most
+ * entity, down to COMPOSITION_DEPTH - 1.
+ */
+ rc_node_t *rn_cchain[COMPOSITION_DEPTH];
+
+ /*
+ * used by property groups only
+ */
+ const char *rn_type;
+ uint32_t rn_pgflags;
+ uint32_t rn_gen_id;
+ uu_list_t *rn_pg_notify_list; /* prot by rc_pg_notify_lock */
+ rc_notify_t rn_notify; /* prot by rc_pg_notify_lock */
+
+ /*
+ * used by properties only
+ */
+ rep_protocol_value_type_t rn_valtype;
+ const char *rn_values; /* protected by rn_lock */
+ size_t rn_values_count; /* protected by rn_lock */
+ size_t rn_values_size; /* protected by rn_lock */
+
+ /*
+ * used by snapshots only
+ */
+ uint32_t rn_snapshot_id;
+ rc_snapshot_t *rn_snapshot; /* protected by rn_lock */
+
+ /*
+ * used by snaplevels only
+ */
+ rc_snaplevel_t *rn_snaplevel;
+
+ /*
+ * mutable state
+ */
+ pthread_mutex_t rn_lock;
+ pthread_cond_t rn_cv;
+ uint32_t rn_flags;
+ uint32_t rn_refs; /* reference count */
+ uint32_t rn_other_refs; /* atomic refcount */
+ uint32_t rn_other_refs_held; /* for 1->0 transitions */
+
+ uu_list_t *rn_children;
+ uu_list_node_t rn_sibling_node;
+
+ rc_node_t *rn_parent; /* set if on child list */
+ rc_node_t *rn_former; /* next former node */
+ rc_node_t *rn_parent_ref; /* reference count target */
+
+ /*
+ * external state (protected by hash chain lock)
+ */
+ rc_node_t *rn_hash_next;
+};
+
+/*
+ * flag ordering:
+ * RC_DYING
+ * RC_NODE_CHILDREN_CHANGING
+ * RC_NODE_CREATING_CHILD
+ * RC_NODE_USING_PARENT
+ * RC_NODE_IN_TX
+ *
+ * RC_NODE_USING_PARENT is special, because it lets you proceed up the tree,
+ * in the reverse of the usual locking order. Because of this, there are
+ * limitations on what you can do while holding it. While holding
+ * RC_NODE_USING_PARENT, you may:
+ * bump or release your parent's reference count
+ * access fields in your parent
+ * hold RC_NODE_USING_PARENT in the parent, proceeding recursively.
+ *
+ * If you are only holding *one* node's RC_NODE_USING_PARENT, and:
+ * you are *not* proceeding recursively, you can hold your
+ * immediate parent's RC_NODE_CHILDREN_CHANGING flag.
+ * you hold your parent's RC_NODE_CHILDREN_CHANGING flag, you can add
+ * RC_NODE_IN_TX to your flags.
+ * you want to grab a flag in your parent, you must lock your parent,
+ * lock yourself, drop RC_NODE_USING_PARENT, unlock yourself,
+ * then proceed to manipulate the parent.
+ */
+#define RC_NODE_CHILDREN_CHANGING 0x00000001 /* child list in flux */
+#define RC_NODE_HAS_CHILDREN 0x00000002 /* child list is accurate */
+
+#define RC_NODE_IN_PARENT 0x00000004 /* I'm in my parent's list */
+#define RC_NODE_USING_PARENT 0x00000008 /* parent ptr in use */
+#define RC_NODE_CREATING_CHILD 0x00000010 /* a create is in progress */
+#define RC_NODE_IN_TX 0x00000020 /* a tx is in progess */
+
+#define RC_NODE_OLD 0x00000400 /* out-of-date object */
+#define RC_NODE_ON_FORMER 0x00000800 /* on an rn_former list */
+
+#define RC_NODE_PARENT_REF 0x00001000 /* parent_ref in use */
+#define RC_NODE_UNREFED 0x00002000 /* unref processing active */
+#define RC_NODE_DYING 0x00004000 /* node is being deleted */
+#define RC_NODE_DEAD 0x00008000 /* node has been deleted */
+
+#define RC_NODE_DYING_FLAGS \
+ (RC_NODE_CHILDREN_CHANGING | RC_NODE_IN_TX | RC_NODE_DYING | \
+ RC_NODE_CREATING_CHILD)
+
+#define RC_NODE_WAITING_FLAGS \
+ (RC_NODE_DYING_FLAGS | RC_NODE_USING_PARENT)
+
+
+typedef struct rc_node_ptr {
+ rc_node_t *rnp_node;
+ char rnp_authorized; /* transaction pre-authed */
+ char rnp_deleted; /* object was deleted */
+} rc_node_ptr_t;
+
+#define NODE_PTR_NOT_HELD(npp) \
+ ((npp)->rnp_node == NULL || !MUTEX_HELD(&(npp)->rnp_node->rn_lock))
+
+typedef int rc_iter_filter_func(rc_node_t *, void *);
+
+typedef struct rc_node_iter {
+ rc_node_t *rni_parent;
+ int rni_clevel; /* index into rni_parent->rn_cchain[] */
+ rc_node_t *rni_iter_node;
+ uu_list_walk_t *rni_iter;
+ uint32_t rni_type;
+
+ /*
+ * for normal walks
+ */
+ rc_iter_filter_func *rni_filter;
+ void *rni_filter_arg;
+
+ /*
+ * for value walks
+ */
+ uint32_t rni_offset; /* next value offset */
+ uint32_t rni_last_offset; /* previous value offset */
+} rc_node_iter_t;
+
+typedef struct rc_node_tx {
+ rc_node_ptr_t rnt_ptr;
+ int rnt_authorized; /* No need to check anymore. */
+} rc_node_tx_t;
+
+
+typedef struct cache_bucket {
+ pthread_mutex_t cb_lock;
+ rc_node_t *cb_head;
+
+ char cb_pad[64 - sizeof (pthread_mutex_t) -
+ 2 * sizeof (rc_node_t *)];
+} cache_bucket_t;
+
+/*
+ * Snapshots
+ */
+struct rc_snapshot {
+ uint32_t rs_snap_id;
+
+ pthread_mutex_t rs_lock;
+ pthread_cond_t rs_cv;
+
+ uint32_t rs_flags;
+ uint32_t rs_refcnt; /* references from rc_nodes */
+ uint32_t rs_childref; /* references to children */
+
+ rc_snaplevel_t *rs_levels; /* list of levels */
+ rc_snapshot_t *rs_hash_next;
+};
+#define RC_SNAPSHOT_FILLING 0x00000001 /* rs_levels changing */
+#define RC_SNAPSHOT_READY 0x00000002
+#define RC_SNAPSHOT_DEAD 0x00000004 /* no resources */
+
+typedef struct rc_snaplevel_pgs {
+ uint32_t rsp_pg_id;
+ uint32_t rsp_gen_id;
+} rc_snaplevel_pgs_t;
+
+struct rc_snaplevel {
+ rc_snapshot_t *rsl_parent;
+ uint32_t rsl_level_num;
+ uint32_t rsl_level_id;
+
+ uint32_t rsl_service_id;
+ uint32_t rsl_instance_id;
+
+ const char *rsl_scope;
+ const char *rsl_service;
+ const char *rsl_instance;
+
+ rc_snaplevel_t *rsl_next;
+};
+
+/*
+ * Client layer -- the IDs fields must be first, in order for the search
+ * routines to work correctly.
+ */
+enum repcache_txstate {
+ REPCACHE_TX_INIT,
+ REPCACHE_TX_SETUP,
+ REPCACHE_TX_COMMITTED
+};
+
+typedef struct repcache_entity {
+ uint32_t re_id;
+ uu_list_node_t re_link;
+ uint32_t re_changeid;
+
+ pthread_mutex_t re_lock;
+ uint32_t re_type;
+ rc_node_ptr_t re_node;
+ enum repcache_txstate re_txstate; /* property groups only */
+} repcache_entity_t;
+
+typedef struct repcache_iter {
+ uint32_t ri_id;
+ uu_list_node_t ri_link;
+
+ uint32_t ri_type; /* result type */
+
+ pthread_mutex_t ri_lock;
+ uint32_t ri_sequence;
+ rc_node_iter_t *ri_iter;
+} repcache_iter_t;
+
+typedef struct repcache_client {
+ /*
+ * constants
+ */
+ uint32_t rc_id; /* must be first */
+ int rc_all_auths; /* bypass auth checks */
+ uint32_t rc_debug; /* debug flags */
+ pid_t rc_pid; /* pid of opening process */
+ door_id_t rc_doorid; /* a globally unique identifier */
+ int rc_doorfd; /* our door's FD */
+
+ /*
+ * client list linkage, protected by hash chain lock
+ */
+ uu_list_node_t rc_link;
+
+ /*
+ * notification information, protected by rc_node layer
+ */
+ rc_node_pg_notify_t rc_pg_notify;
+ rc_notify_info_t rc_notify_info;
+
+ /*
+ * client_wait output, only usable by rc_notify_thr
+ */
+ rc_node_ptr_t rc_notify_ptr;
+
+ /*
+ * register lists, protected by rc_lock
+ */
+ uu_list_t *rc_entity_list; /* entities */
+ uu_list_t *rc_iter_list; /* iters */
+
+ /*
+ * Variables, protected by rc_lock
+ */
+ int rc_refcnt; /* in-progress door calls */
+ int rc_flags; /* state */
+ uint32_t rc_changeid; /* used to make backups idempotent */
+ pthread_t rc_insert_thr; /* single thread trying to insert */
+ pthread_t rc_notify_thr; /* single thread waiting for notify */
+ pthread_cond_t rc_cv;
+ pthread_mutex_t rc_lock;
+} repcache_client_t;
+#define RC_CLIENT_DEAD 0x00000001
+
+typedef struct client_bucket {
+ pthread_mutex_t cb_lock;
+ uu_list_t *cb_list;
+ char ch_pad[64 - sizeof (pthread_mutex_t) - sizeof (uu_list_t *)];
+} client_bucket_t;
+
+enum rc_ptr_type {
+ RC_PTR_TYPE_ENTITY = 1,
+ RC_PTR_TYPE_ITER
+};
+
+typedef struct request_log_ptr {
+ enum rc_ptr_type rlp_type;
+ uint32_t rlp_id;
+ void *rlp_ptr; /* repcache_{entity,iter}_t */
+ void *rlp_data; /* rc_node, for ENTITY only */
+} request_log_ptr_t;
+
+#define MAX_PTRS 3
+
+/*
+ * rl_start through rl_client cannot move without changing start_log()
+ */
+typedef struct request_log_entry {
+ hrtime_t rl_start;
+ hrtime_t rl_end;
+ pthread_t rl_tid;
+ uint32_t rl_clientid;
+ repcache_client_t *rl_client;
+ enum rep_protocol_requestid rl_request;
+ rep_protocol_responseid_t rl_response;
+ int rl_num_ptrs;
+ request_log_ptr_t rl_ptrs[MAX_PTRS];
+} request_log_entry_t;
+
+/*
+ * thread information
+ */
+typedef enum thread_state {
+ TI_CREATED,
+ TI_DOOR_RETURN,
+ TI_SIGNAL_WAIT,
+ TI_MAIN_DOOR_CALL,
+ TI_CLIENT_CALL
+} thread_state_t;
+
+typedef struct thread_info {
+ pthread_t ti_thread;
+ uu_list_node_t ti_node; /* for list of all thread */
+
+ /*
+ * per-thread globals
+ */
+ ucred_t *ti_ucred; /* for credential lookups */
+ int ti_ucred_read; /* ucred holds current creds */
+
+ /*
+ * per-thread state information, for debuggers
+ */
+ hrtime_t ti_lastchange;
+
+ thread_state_t ti_state;
+ thread_state_t ti_prev_state;
+
+ repcache_client_t *ti_active_client;
+ request_log_entry_t ti_log;
+
+ struct rep_protocol_request *ti_client_request;
+ repository_door_request_t *ti_main_door_request;
+
+} thread_info_t;
+
+/*
+ * Backend layer
+ */
+typedef struct backend_query backend_query_t;
+typedef struct backend_tx backend_tx_t;
+
+/*
+ * configd.c
+ */
+int create_connection(ucred_t *cred, repository_door_request_t *rp,
+ size_t rp_size, int *out_fd);
+
+thread_info_t *thread_self(void);
+void thread_newstate(thread_info_t *, thread_state_t);
+ucred_t *get_ucred(void);
+int ucred_is_privileged(ucred_t *);
+
+void configd_critical(const char *, ...);
+void configd_vcritical(const char *, va_list);
+
+extern int is_main_repository;
+extern int max_repository_backups;
+
+/*
+ * maindoor.c
+ */
+int setup_main_door(const char *);
+
+/*
+ * client.c
+ */
+int create_client(pid_t, uint32_t, int, int *);
+int client_init(void);
+int client_is_privileged(void);
+void log_enter(request_log_entry_t *);
+
+/*
+ * rc_node.c, backend/cache interfaces (rc_node_t)
+ */
+int rc_node_init();
+int rc_check_type_name(uint32_t, const char *);
+
+void rc_node_rele(rc_node_t *);
+rc_node_t *rc_node_setup(rc_node_t *, rc_node_lookup_t *,
+ const char *, rc_node_t *);
+rc_node_t *rc_node_setup_pg(rc_node_t *, rc_node_lookup_t *, const char *,
+ const char *, uint32_t, uint32_t, rc_node_t *);
+rc_node_t *rc_node_setup_snapshot(rc_node_t *, rc_node_lookup_t *, const char *,
+ uint32_t, rc_node_t *);
+rc_node_t *rc_node_setup_snaplevel(rc_node_t *, rc_node_lookup_t *,
+ rc_snaplevel_t *, rc_node_t *);
+int rc_node_create_property(rc_node_t *, rc_node_lookup_t *,
+ const char *, rep_protocol_value_type_t, const char *, size_t, size_t);
+
+rc_node_t *rc_node_alloc(void);
+void rc_node_destroy(rc_node_t *);
+
+/*
+ * rc_node.c, client interface (rc_node_ptr_t, rc_node_iter_t)
+ */
+void rc_node_ptr_init(rc_node_ptr_t *);
+int rc_local_scope(uint32_t, rc_node_ptr_t *);
+
+void rc_node_clear(rc_node_ptr_t *, int);
+void rc_node_ptr_assign(rc_node_ptr_t *, const rc_node_ptr_t *);
+int rc_node_name(rc_node_ptr_t *, char *, size_t, uint32_t, size_t *);
+int rc_node_fmri(rc_node_ptr_t *, char *, size_t, size_t *);
+int rc_node_parent_type(rc_node_ptr_t *, uint32_t *);
+int rc_node_get_child(rc_node_ptr_t *, const char *, uint32_t, rc_node_ptr_t *);
+int rc_node_get_parent(rc_node_ptr_t *, uint32_t, rc_node_ptr_t *);
+int rc_node_get_property_type(rc_node_ptr_t *, rep_protocol_value_type_t *);
+int rc_node_get_property_value(rc_node_ptr_t *,
+ struct rep_protocol_value_response *, size_t *);
+int rc_node_create_child(rc_node_ptr_t *, uint32_t, const char *,
+ rc_node_ptr_t *);
+int rc_node_create_child_pg(rc_node_ptr_t *, uint32_t, const char *,
+ const char *, uint32_t, rc_node_ptr_t *);
+int rc_node_update(rc_node_ptr_t *);
+int rc_node_delete(rc_node_ptr_t *);
+int rc_node_next_snaplevel(rc_node_ptr_t *, rc_node_ptr_t *);
+
+int rc_node_setup_iter(rc_node_ptr_t *, rc_node_iter_t **, uint32_t,
+ size_t, const char *);
+
+int rc_iter_next(rc_node_iter_t *, rc_node_ptr_t *, uint32_t);
+int rc_iter_next_value(rc_node_iter_t *, struct rep_protocol_value_response *,
+ size_t *, int);
+void rc_iter_destroy(rc_node_iter_t **);
+
+int rc_node_setup_tx(rc_node_ptr_t *, rc_node_ptr_t *);
+int rc_tx_commit(rc_node_ptr_t *, const void *, size_t);
+
+void rc_pg_notify_init(rc_node_pg_notify_t *);
+int rc_pg_notify_setup(rc_node_pg_notify_t *, rc_node_ptr_t *, int);
+void rc_pg_notify_fini(rc_node_pg_notify_t *);
+
+void rc_notify_info_init(rc_notify_info_t *);
+int rc_notify_info_add_name(rc_notify_info_t *, const char *);
+int rc_notify_info_add_type(rc_notify_info_t *, const char *);
+int rc_notify_info_wait(rc_notify_info_t *, rc_node_ptr_t *, char *, size_t);
+void rc_notify_info_fini(rc_notify_info_t *);
+
+int rc_snapshot_take_new(rc_node_ptr_t *, const char *,
+ const char *, const char *, rc_node_ptr_t *);
+int rc_snapshot_take_attach(rc_node_ptr_t *, rc_node_ptr_t *);
+int rc_snapshot_attach(rc_node_ptr_t *, rc_node_ptr_t *);
+
+/*
+ * file_object.c
+ */
+int object_fill_children(rc_node_t *);
+int object_create(rc_node_t *, uint32_t, const char *, rc_node_t **);
+int object_create_pg(rc_node_t *, uint32_t, const char *, const char *,
+ uint32_t, rc_node_t **);
+
+int object_delete(rc_node_t *);
+void object_free_values(const char *, uint32_t, size_t, size_t);
+
+int object_fill_snapshot(rc_snapshot_t *sp);
+
+int object_snapshot_take_new(rc_node_t *, const char *, const char *,
+ const char *, rc_node_t **);
+int object_snapshot_attach(rc_node_lookup_t *, uint32_t *, int);
+
+/*
+ * object.c
+ */
+int object_tx_commit(rc_node_lookup_t *, const void *, size_t, uint32_t *);
+
+/*
+ * snapshot.c
+ */
+int rc_snapshot_get(uint32_t, rc_snapshot_t **);
+void rc_snapshot_rele(rc_snapshot_t *);
+void rc_snaplevel_hold(rc_snaplevel_t *);
+void rc_snaplevel_rele(rc_snaplevel_t *);
+
+/*
+ * backend.c
+ */
+int backend_init(const char *, const char *, int);
+void backend_fini(void);
+
+rep_protocol_responseid_t backend_create_backup(const char *);
+
+/*
+ * call on any database inconsistency -- cleans up state as best it can,
+ * and exits with a "Database Bad" error code.
+ */
+void backend_panic(const char *, ...);
+#pragma rarely_called(backend_panic)
+
+backend_query_t *backend_query_alloc(void);
+void backend_query_append(backend_query_t *, const char *);
+void backend_query_add(backend_query_t *, const char *, ...);
+void backend_query_free(backend_query_t *);
+
+typedef int backend_run_callback_f(void *data, int columns, char **vals,
+ char **names);
+#define BACKEND_CALLBACK_CONTINUE 0
+#define BACKEND_CALLBACK_ABORT 1
+
+backend_run_callback_f backend_fail_if_seen; /* aborts TX if called */
+
+int backend_run(backend_type_t, backend_query_t *,
+ backend_run_callback_f *, void *);
+
+int backend_tx_begin(backend_type_t, backend_tx_t **);
+int backend_tx_begin_ro(backend_type_t, backend_tx_t **);
+void backend_tx_end_ro(backend_tx_t *);
+
+enum id_space {
+ BACKEND_ID_SERVICE_INSTANCE,
+ BACKEND_ID_PROPERTYGRP,
+ BACKEND_ID_GENERATION,
+ BACKEND_ID_PROPERTY,
+ BACKEND_ID_VALUE,
+ BACKEND_ID_SNAPNAME,
+ BACKEND_ID_SNAPSHOT,
+ BACKEND_ID_SNAPLEVEL,
+ BACKEND_ID_INVALID /* always illegal */
+};
+
+uint32_t backend_new_id(backend_tx_t *, enum id_space);
+int backend_tx_run_update(backend_tx_t *, const char *, ...);
+int backend_tx_run_update_changed(backend_tx_t *, const char *, ...);
+int backend_tx_run_single_int(backend_tx_t *tx, backend_query_t *q,
+ uint32_t *buf);
+int backend_tx_run(backend_tx_t *, backend_query_t *,
+ backend_run_callback_f *, void *);
+
+int backend_tx_commit(backend_tx_t *);
+void backend_tx_rollback(backend_tx_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CONFIGD_H */
diff --git a/usr/src/cmd/svc/configd/file_object.c b/usr/src/cmd/svc/configd/file_object.c
new file mode 100644
index 0000000000..52bff4858f
--- /dev/null
+++ b/usr/src/cmd/svc/configd/file_object.c
@@ -0,0 +1,2174 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * file_object.c - enter objects into and load them from the backend
+ *
+ * The primary entry points in this layer are object_create(),
+ * object_create_pg(), object_delete(), and object_fill_children(). They each
+ * take an rc_node_t and use the functions in the object_info_t info array for
+ * the node's type.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+
+#include "configd.h"
+#include "repcache_protocol.h"
+
+typedef struct child_info {
+ rc_node_t *ci_parent;
+ backend_tx_t *ci_tx; /* only for properties */
+ rc_node_lookup_t ci_base_nl;
+} child_info_t;
+
+typedef struct delete_ent delete_ent_t;
+typedef struct delete_stack delete_stack_t;
+typedef struct delete_info delete_info_t;
+
+typedef int delete_cb_func(delete_info_t *, const delete_ent_t *);
+
+struct delete_ent {
+ delete_cb_func *de_cb; /* callback */
+ uint32_t de_backend;
+ uint32_t de_id;
+ uint32_t de_gen; /* only for property groups */
+};
+
+struct delete_stack {
+ struct delete_stack *ds_next;
+ uint32_t ds_size; /* number of elements */
+ uint32_t ds_cur; /* current offset */
+ delete_ent_t ds_buf[1]; /* actually ds_size */
+};
+#define DELETE_STACK_SIZE(x) offsetof(delete_stack_t, ds_buf[(x)])
+
+struct delete_info {
+ backend_tx_t *di_tx;
+ backend_tx_t *di_np_tx;
+ delete_stack_t *di_stack;
+ delete_stack_t *di_free;
+};
+
+typedef struct object_info {
+ uint32_t obj_type;
+ enum id_space obj_id_space;
+
+ int (*obj_fill_children)(rc_node_t *);
+ int (*obj_setup_child_info)(rc_node_t *, uint32_t, child_info_t *);
+ int (*obj_query_child)(backend_query_t *, rc_node_lookup_t *,
+ const char *);
+ int (*obj_insert_child)(backend_tx_t *, rc_node_lookup_t *,
+ const char *);
+ int (*obj_insert_pg_child)(backend_tx_t *, rc_node_lookup_t *,
+ const char *, const char *, uint32_t, uint32_t);
+ int (*obj_delete_start)(rc_node_t *, delete_info_t *);
+} object_info_t;
+
+#define NUM_NEEDED 50
+
+static int
+delete_stack_push(delete_info_t *dip, uint32_t be, delete_cb_func *cb,
+ uint32_t id, uint32_t gen)
+{
+ delete_stack_t *cur = dip->di_stack;
+ delete_ent_t *ent;
+
+ if (cur == NULL || cur->ds_cur == cur->ds_size) {
+ delete_stack_t *new = dip->di_free;
+ dip->di_free = NULL;
+ if (new == NULL) {
+ new = uu_zalloc(DELETE_STACK_SIZE(NUM_NEEDED));
+ if (new == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ new->ds_size = NUM_NEEDED;
+ }
+ new->ds_cur = 0;
+ new->ds_next = dip->di_stack;
+ dip->di_stack = new;
+ cur = new;
+ }
+ assert(cur->ds_cur < cur->ds_size);
+ ent = &cur->ds_buf[cur->ds_cur++];
+
+ ent->de_backend = be;
+ ent->de_cb = cb;
+ ent->de_id = id;
+ ent->de_gen = gen;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+delete_stack_pop(delete_info_t *dip, delete_ent_t *out)
+{
+ delete_stack_t *cur = dip->di_stack;
+ delete_ent_t *ent;
+
+ if (cur == NULL)
+ return (NULL);
+ assert(cur->ds_cur > 0 && cur->ds_cur <= cur->ds_size);
+ ent = &cur->ds_buf[--cur->ds_cur];
+ if (cur->ds_cur == 0) {
+ dip->di_stack = cur->ds_next;
+ cur->ds_next = NULL;
+
+ if (dip->di_free != NULL)
+ uu_free(dip->di_free);
+ dip->di_free = cur;
+ }
+ if (ent == NULL)
+ return (0);
+
+ *out = *ent;
+ return (1);
+}
+
+static void
+delete_stack_cleanup(delete_info_t *dip)
+{
+ delete_stack_t *cur;
+ while ((cur = dip->di_stack) != NULL) {
+ dip->di_stack = cur->ds_next;
+
+ uu_free(cur);
+ }
+
+ if ((cur = dip->di_free) != NULL) {
+ assert(cur->ds_next == NULL); /* should only be one */
+ uu_free(cur);
+ dip->di_free = NULL;
+ }
+}
+
+struct delete_cb_info {
+ delete_info_t *dci_dip;
+ uint32_t dci_be;
+ delete_cb_func *dci_cb;
+ int dci_result;
+};
+
+/*ARGSUSED*/
+static int
+push_delete_callback(void *data, int columns, char **vals, char **names)
+{
+ struct delete_cb_info *info = data;
+
+ const char *id_str = *vals++;
+ const char *gen_str = *vals++;
+
+ uint32_t id;
+ uint32_t gen;
+
+ assert(columns == 2);
+
+ if (uu_strtouint(id_str, &id, sizeof (id), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+ if (uu_strtouint(gen_str, &gen, sizeof (gen), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ info->dci_result = delete_stack_push(info->dci_dip, info->dci_be,
+ info->dci_cb, id, gen);
+
+ if (info->dci_result != REP_PROTOCOL_SUCCESS)
+ return (BACKEND_CALLBACK_ABORT);
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+static int
+value_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ uint32_t be = ent->de_backend;
+ int r;
+
+ backend_query_t *q;
+
+ backend_tx_t *tx = (be == BACKEND_TYPE_NORMAL)? dip->di_tx :
+ dip->di_np_tx;
+
+ q = backend_query_alloc();
+
+ backend_query_add(q,
+ "SELECT 1 FROM prop_lnk_tbl WHERE (lnk_val_id = %d); "
+ "DELETE FROM value_tbl WHERE (value_id = %d); ",
+ ent->de_id, ent->de_id);
+ r = backend_tx_run(tx, q, backend_fail_if_seen, NULL);
+ backend_query_free(q);
+ if (r == REP_PROTOCOL_DONE)
+ return (REP_PROTOCOL_SUCCESS); /* still in use */
+ return (r);
+}
+
+static int
+pg_lnk_tbl_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ struct delete_cb_info info;
+ uint32_t be = ent->de_backend;
+ int r;
+
+ backend_query_t *q;
+
+ backend_tx_t *tx = (be == BACKEND_TYPE_NORMAL)? dip->di_tx :
+ dip->di_np_tx;
+
+ /*
+ * For non-persistent backends, we could only have one parent, and
+ * he's already been deleted.
+ *
+ * For normal backends, we need to check to see if we're in
+ * a snapshot or are the active generation for the property
+ * group. If we are, there's nothing to be done.
+ */
+ if (be == BACKEND_TYPE_NORMAL) {
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT 1 "
+ "FROM pg_tbl "
+ "WHERE (pg_id = %d AND pg_gen_id = %d); "
+ "SELECT 1 "
+ "FROM snaplevel_lnk_tbl "
+ "WHERE (snaplvl_pg_id = %d AND snaplvl_gen_id = %d);",
+ ent->de_id, ent->de_gen,
+ ent->de_id, ent->de_gen);
+ r = backend_tx_run(tx, q, backend_fail_if_seen, NULL);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE)
+ return (REP_PROTOCOL_SUCCESS); /* still in use */
+ }
+
+ info.dci_dip = dip;
+ info.dci_be = be;
+ info.dci_cb = &value_delete;
+ info.dci_result = REP_PROTOCOL_SUCCESS;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT DISTINCT lnk_val_id, 0 FROM prop_lnk_tbl "
+ "WHERE "
+ " (lnk_pg_id = %d AND lnk_gen_id = %d AND lnk_val_id NOTNULL); "
+ "DELETE FROM prop_lnk_tbl "
+ "WHERE (lnk_pg_id = %d AND lnk_gen_id = %d)",
+ ent->de_id, ent->de_gen, ent->de_id, ent->de_gen);
+
+ r = backend_tx_run(tx, q, push_delete_callback, &info);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE) {
+ assert(info.dci_result != REP_PROTOCOL_SUCCESS);
+ return (info.dci_result);
+ }
+ return (r);
+}
+
+static int
+propertygrp_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ uint32_t be = ent->de_backend;
+ backend_query_t *q;
+ uint32_t gen;
+
+ int r;
+
+ backend_tx_t *tx = (be == BACKEND_TYPE_NORMAL)? dip->di_tx :
+ dip->di_np_tx;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT pg_gen_id FROM pg_tbl WHERE pg_id = %d; "
+ "DELETE FROM pg_tbl WHERE pg_id = %d",
+ ent->de_id, ent->de_id);
+ r = backend_tx_run_single_int(tx, q, &gen);
+ backend_query_free(q);
+
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (r);
+
+ return (delete_stack_push(dip, be, &pg_lnk_tbl_delete,
+ ent->de_id, gen));
+}
+
+static int
+snaplevel_lnk_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ uint32_t be = ent->de_backend;
+ backend_query_t *q;
+ struct delete_cb_info info;
+
+ int r;
+
+ backend_tx_t *tx = (be == BACKEND_TYPE_NORMAL)? dip->di_tx :
+ dip->di_np_tx;
+
+ info.dci_dip = dip;
+ info.dci_be = be;
+ info.dci_cb = &pg_lnk_tbl_delete;
+ info.dci_result = REP_PROTOCOL_SUCCESS;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT snaplvl_pg_id, snaplvl_gen_id "
+ " FROM snaplevel_lnk_tbl "
+ " WHERE snaplvl_level_id = %d; "
+ "DELETE FROM snaplevel_lnk_tbl WHERE snaplvl_level_id = %d",
+ ent->de_id, ent->de_id);
+ r = backend_tx_run(tx, q, push_delete_callback, &info);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE) {
+ assert(info.dci_result != REP_PROTOCOL_SUCCESS);
+ return (info.dci_result);
+ }
+ return (r);
+}
+
+static int
+snaplevel_tbl_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ uint32_t be = ent->de_backend;
+ backend_tx_t *tx = (be == BACKEND_TYPE_NORMAL)? dip->di_tx :
+ dip->di_np_tx;
+
+ struct delete_cb_info info;
+ backend_query_t *q;
+ int r;
+
+ assert(be == BACKEND_TYPE_NORMAL);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT 1 FROM snapshot_lnk_tbl WHERE lnk_snap_id = %d",
+ ent->de_id);
+ r = backend_tx_run(tx, q, backend_fail_if_seen, NULL);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE)
+ return (REP_PROTOCOL_SUCCESS); /* still in use */
+
+ info.dci_dip = dip;
+ info.dci_be = be;
+ info.dci_cb = &snaplevel_lnk_delete;
+ info.dci_result = REP_PROTOCOL_SUCCESS;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT snap_level_id, 0 FROM snaplevel_tbl WHERE snap_id = %d;"
+ "DELETE FROM snaplevel_tbl WHERE snap_id = %d",
+ ent->de_id, ent->de_id);
+ r = backend_tx_run(tx, q, push_delete_callback, &info);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE) {
+ assert(info.dci_result != REP_PROTOCOL_SUCCESS);
+ return (info.dci_result);
+ }
+ return (r);
+}
+
+static int
+snapshot_lnk_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ uint32_t be = ent->de_backend;
+ backend_tx_t *tx = (be == BACKEND_TYPE_NORMAL)? dip->di_tx :
+ dip->di_np_tx;
+
+ backend_query_t *q;
+ uint32_t snapid;
+ int r;
+
+ assert(be == BACKEND_TYPE_NORMAL);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT lnk_snap_id FROM snapshot_lnk_tbl WHERE lnk_id = %d; "
+ "DELETE FROM snapshot_lnk_tbl WHERE lnk_id = %d",
+ ent->de_id, ent->de_id);
+ r = backend_tx_run_single_int(tx, q, &snapid);
+ backend_query_free(q);
+
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (r);
+
+ return (delete_stack_push(dip, be, &snaplevel_tbl_delete, snapid, 0));
+}
+
+static int
+pgparent_delete_add_pgs(delete_info_t *dip, uint32_t parent_id)
+{
+ struct delete_cb_info info;
+ backend_query_t *q;
+ int r;
+
+ info.dci_dip = dip;
+ info.dci_be = BACKEND_TYPE_NORMAL;
+ info.dci_cb = &propertygrp_delete;
+ info.dci_result = REP_PROTOCOL_SUCCESS;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT pg_id, 0 FROM pg_tbl WHERE pg_parent_id = %d",
+ parent_id);
+
+ r = backend_tx_run(dip->di_tx, q, push_delete_callback, &info);
+
+ if (r == REP_PROTOCOL_DONE) {
+ assert(info.dci_result != REP_PROTOCOL_SUCCESS);
+ backend_query_free(q);
+ return (info.dci_result);
+ }
+ if (r != REP_PROTOCOL_SUCCESS) {
+ backend_query_free(q);
+ return (r);
+ }
+
+ if (dip->di_np_tx != NULL) {
+ info.dci_be = BACKEND_TYPE_NONPERSIST;
+
+ r = backend_tx_run(dip->di_np_tx, q, push_delete_callback,
+ &info);
+
+ if (r == REP_PROTOCOL_DONE) {
+ assert(info.dci_result != REP_PROTOCOL_SUCCESS);
+ backend_query_free(q);
+ return (info.dci_result);
+ }
+ if (r != REP_PROTOCOL_SUCCESS) {
+ backend_query_free(q);
+ return (r);
+ }
+ }
+ backend_query_free(q);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+service_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ int r;
+
+ r = backend_tx_run_update_changed(dip->di_tx,
+ "DELETE FROM service_tbl WHERE svc_id = %d", ent->de_id);
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (r);
+
+ return (pgparent_delete_add_pgs(dip, ent->de_id));
+}
+
+static int
+instance_delete(delete_info_t *dip, const delete_ent_t *ent)
+{
+ struct delete_cb_info info;
+ int r;
+ backend_query_t *q;
+
+ r = backend_tx_run_update_changed(dip->di_tx,
+ "DELETE FROM instance_tbl WHERE instance_id = %d", ent->de_id);
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (r);
+
+ r = pgparent_delete_add_pgs(dip, ent->de_id);
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (r);
+
+ info.dci_dip = dip;
+ info.dci_be = BACKEND_TYPE_NORMAL;
+ info.dci_cb = &snapshot_lnk_delete;
+ info.dci_result = REP_PROTOCOL_SUCCESS;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT lnk_id, 0 FROM snapshot_lnk_tbl WHERE lnk_inst_id = %d",
+ ent->de_id);
+ r = backend_tx_run(dip->di_tx, q, push_delete_callback, &info);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE) {
+ assert(info.dci_result != REP_PROTOCOL_SUCCESS);
+ return (info.dci_result);
+ }
+ return (r);
+}
+
+/*ARGSUSED*/
+static int
+fill_child_callback(void *data, int columns, char **vals, char **names)
+{
+ child_info_t *cp = data;
+ rc_node_t *np;
+ uint32_t main_id;
+ const char *name;
+ const char *cur;
+ rc_node_lookup_t *lp = &cp->ci_base_nl;
+
+ assert(columns == 2);
+
+ name = *vals++;
+ columns--;
+
+ cur = *vals++;
+ columns--;
+ if (uu_strtouint(cur, &main_id, sizeof (main_id), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ lp->rl_main_id = main_id;
+
+ if ((np = rc_node_alloc()) == NULL)
+ return (BACKEND_CALLBACK_ABORT);
+
+ np = rc_node_setup(np, lp, name, cp->ci_parent);
+ rc_node_rele(np);
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*ARGSUSED*/
+static int
+fill_snapshot_callback(void *data, int columns, char **vals, char **names)
+{
+ child_info_t *cp = data;
+ rc_node_t *np;
+ uint32_t main_id;
+ uint32_t snap_id;
+ const char *name;
+ const char *cur;
+ const char *snap;
+ rc_node_lookup_t *lp = &cp->ci_base_nl;
+
+ assert(columns == 3);
+
+ name = *vals++;
+ columns--;
+
+ cur = *vals++;
+ columns--;
+ snap = *vals++;
+ columns--;
+ if (uu_strtouint(cur, &main_id, sizeof (main_id), 0, 0, 0) == -1 ||
+ uu_strtouint(snap, &snap_id, sizeof (snap_id), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ lp->rl_main_id = main_id;
+
+ if ((np = rc_node_alloc()) == NULL)
+ return (BACKEND_CALLBACK_ABORT);
+
+ np = rc_node_setup_snapshot(np, lp, name, snap_id, cp->ci_parent);
+ rc_node_rele(np);
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*ARGSUSED*/
+static int
+fill_pg_callback(void *data, int columns, char **vals, char **names)
+{
+ child_info_t *cip = data;
+ const char *name;
+ const char *type;
+ const char *cur;
+ uint32_t main_id;
+ uint32_t flags;
+ uint32_t gen_id;
+
+ rc_node_lookup_t *lp = &cip->ci_base_nl;
+ rc_node_t *newnode, *pg;
+
+ assert(columns == 5);
+
+ name = *vals++; /* pg_name */
+ columns--;
+
+ cur = *vals++; /* pg_id */
+ columns--;
+ if (uu_strtouint(cur, &main_id, sizeof (main_id), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ lp->rl_main_id = main_id;
+
+ cur = *vals++; /* pg_gen_id */
+ columns--;
+ if (uu_strtouint(cur, &gen_id, sizeof (gen_id), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ type = *vals++; /* pg_type */
+ columns--;
+
+ cur = *vals++; /* pg_flags */
+ columns--;
+ if (uu_strtouint(cur, &flags, sizeof (flags), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ if ((newnode = rc_node_alloc()) == NULL)
+ return (BACKEND_CALLBACK_ABORT);
+
+ pg = rc_node_setup_pg(newnode, lp, name, type, flags, gen_id,
+ cip->ci_parent);
+ if (pg == NULL) {
+ rc_node_destroy(newnode);
+ return (BACKEND_CALLBACK_ABORT);
+ }
+
+ rc_node_rele(pg);
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+struct property_value_info {
+ char *pvi_base;
+ size_t pvi_pos;
+ size_t pvi_size;
+ size_t pvi_count;
+};
+
+/*ARGSUSED*/
+static int
+property_value_size_cb(void *data, int columns, char **vals, char **names)
+{
+ struct property_value_info *info = data;
+ assert(columns == 1);
+
+ info->pvi_size += strlen(vals[0]) + 1; /* count the '\0' */
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*ARGSUSED*/
+static int
+property_value_cb(void *data, int columns, char **vals, char **names)
+{
+ struct property_value_info *info = data;
+ size_t pos, left, len;
+
+ assert(columns == 1);
+ pos = info->pvi_pos;
+ left = info->pvi_size - pos;
+
+ pos = info->pvi_pos;
+ left = info->pvi_size - pos;
+
+ if ((len = strlcpy(&info->pvi_base[pos], vals[0], left)) >= left) {
+ /*
+ * since we preallocated, above, this shouldn't happen
+ */
+ backend_panic("unexpected database change");
+ }
+
+ len += 1; /* count the '\0' */
+
+ info->pvi_pos += len;
+ info->pvi_count++;
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*ARGSUSED*/
+void
+object_free_values(const char *vals, uint32_t type, size_t count, size_t size)
+{
+ if (vals != NULL)
+ uu_free((void *)vals);
+}
+
+/*ARGSUSED*/
+static int
+fill_property_callback(void *data, int columns, char **vals, char **names)
+{
+ child_info_t *cp = data;
+ backend_tx_t *tx = cp->ci_tx;
+ uint32_t main_id;
+ const char *name;
+ const char *cur;
+ rep_protocol_value_type_t type;
+ rc_node_lookup_t *lp = &cp->ci_base_nl;
+ struct property_value_info info;
+ int rc;
+
+ assert(columns == 4);
+ assert(tx != NULL);
+
+ info.pvi_base = NULL;
+ info.pvi_pos = 0;
+ info.pvi_size = 0;
+ info.pvi_count = 0;
+
+ name = *vals++;
+
+ cur = *vals++;
+ if (uu_strtouint(cur, &main_id, sizeof (main_id), 0, 0, 0) == -1)
+ backend_panic("invalid integer in database");
+
+ cur = *vals++;
+ assert(('a' <= cur[0] && 'z' >= cur[0]) ||
+ ('A' <= cur[0] && 'Z' >= cur[0]) &&
+ (cur[1] == 0 || ('a' <= cur[1] && 'z' >= cur[1]) ||
+ ('A' <= cur[1] && 'Z' >= cur[1])));
+ type = cur[0] | (cur[1] << 8);
+
+ lp->rl_main_id = main_id;
+
+ /*
+ * fill in the values, if any
+ */
+ if ((cur = *vals++) != NULL) {
+ rep_protocol_responseid_t r;
+ backend_query_t *q = backend_query_alloc();
+
+ backend_query_add(q,
+ "SELECT value_value FROM value_tbl "
+ "WHERE (value_id = '%q')", cur);
+
+ switch (r = backend_tx_run(tx, q, property_value_size_cb,
+ &info)) {
+ case REP_PROTOCOL_SUCCESS:
+ break;
+
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ backend_query_free(q);
+ return (BACKEND_CALLBACK_ABORT);
+
+ case REP_PROTOCOL_DONE:
+ default:
+ backend_panic("backend_tx_run() returned %d", r);
+ }
+ if (info.pvi_size > 0) {
+ info.pvi_base = uu_zalloc(info.pvi_size);
+ if (info.pvi_base == NULL) {
+ backend_query_free(q);
+ return (BACKEND_CALLBACK_ABORT);
+ }
+ switch (r = backend_tx_run(tx, q, property_value_cb,
+ &info)) {
+ case REP_PROTOCOL_SUCCESS:
+ break;
+
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ uu_free(info.pvi_base);
+ backend_query_free(q);
+ return (BACKEND_CALLBACK_ABORT);
+
+ case REP_PROTOCOL_DONE:
+ default:
+ backend_panic("backend_tx_run() returned %d",
+ r);
+ }
+ }
+ backend_query_free(q);
+ }
+
+ rc = rc_node_create_property(cp->ci_parent, lp, name, type,
+ info.pvi_base, info.pvi_count, info.pvi_size);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ assert(rc == REP_PROTOCOL_FAIL_NO_RESOURCES);
+ return (BACKEND_CALLBACK_ABORT);
+ }
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*
+ * The *_setup_child_info() functions fill in a child_info_t structure with the
+ * information for the children of np with type type.
+ *
+ * They fail with
+ * _TYPE_MISMATCH - object cannot have children of type type
+ */
+
+static int
+scope_setup_child_info(rc_node_t *np, uint32_t type, child_info_t *cip)
+{
+ if (type != REP_PROTOCOL_ENTITY_SERVICE)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ bzero(cip, sizeof (*cip));
+ cip->ci_parent = np;
+ cip->ci_base_nl.rl_type = type;
+ cip->ci_base_nl.rl_backend = np->rn_id.rl_backend;
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+service_setup_child_info(rc_node_t *np, uint32_t type, child_info_t *cip)
+{
+ switch (type) {
+ case REP_PROTOCOL_ENTITY_INSTANCE:
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ break;
+ default:
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ bzero(cip, sizeof (*cip));
+ cip->ci_parent = np;
+ cip->ci_base_nl.rl_type = type;
+ cip->ci_base_nl.rl_backend = np->rn_id.rl_backend;
+ cip->ci_base_nl.rl_ids[ID_SERVICE] = np->rn_id.rl_main_id;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+instance_setup_child_info(rc_node_t *np, uint32_t type, child_info_t *cip)
+{
+ switch (type) {
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ case REP_PROTOCOL_ENTITY_SNAPSHOT:
+ break;
+ default:
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ bzero(cip, sizeof (*cip));
+ cip->ci_parent = np;
+ cip->ci_base_nl.rl_type = type;
+ cip->ci_base_nl.rl_backend = np->rn_id.rl_backend;
+ cip->ci_base_nl.rl_ids[ID_SERVICE] = np->rn_id.rl_ids[ID_SERVICE];
+ cip->ci_base_nl.rl_ids[ID_INSTANCE] = np->rn_id.rl_main_id;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+snaplevel_setup_child_info(rc_node_t *np, uint32_t type, child_info_t *cip)
+{
+ if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ bzero(cip, sizeof (*cip));
+ cip->ci_parent = np;
+ cip->ci_base_nl.rl_type = type;
+ cip->ci_base_nl.rl_backend = np->rn_id.rl_backend;
+ cip->ci_base_nl.rl_ids[ID_SERVICE] = np->rn_id.rl_ids[ID_SERVICE];
+ cip->ci_base_nl.rl_ids[ID_INSTANCE] = np->rn_id.rl_ids[ID_INSTANCE];
+ cip->ci_base_nl.rl_ids[ID_NAME] = np->rn_id.rl_ids[ID_NAME];
+ cip->ci_base_nl.rl_ids[ID_SNAPSHOT] = np->rn_id.rl_ids[ID_SNAPSHOT];
+ cip->ci_base_nl.rl_ids[ID_LEVEL] = np->rn_id.rl_main_id;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+propertygrp_setup_child_info(rc_node_t *pg, uint32_t type, child_info_t *cip)
+{
+ if (type != REP_PROTOCOL_ENTITY_PROPERTY)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ bzero(cip, sizeof (*cip));
+ cip->ci_parent = pg;
+ cip->ci_base_nl.rl_type = type;
+ cip->ci_base_nl.rl_backend = pg->rn_id.rl_backend;
+ cip->ci_base_nl.rl_ids[ID_SERVICE] = pg->rn_id.rl_ids[ID_SERVICE];
+ cip->ci_base_nl.rl_ids[ID_INSTANCE] = pg->rn_id.rl_ids[ID_INSTANCE];
+ cip->ci_base_nl.rl_ids[ID_PG] = pg->rn_id.rl_main_id;
+ cip->ci_base_nl.rl_ids[ID_GEN] = pg->rn_gen_id;
+ cip->ci_base_nl.rl_ids[ID_NAME] = pg->rn_id.rl_ids[ID_NAME];
+ cip->ci_base_nl.rl_ids[ID_SNAPSHOT] = pg->rn_id.rl_ids[ID_SNAPSHOT];
+ cip->ci_base_nl.rl_ids[ID_LEVEL] = pg->rn_id.rl_ids[ID_LEVEL];
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * The *_fill_children() functions populate the children of the given rc_node_t
+ * by querying the database and calling rc_node_setup_*() functions (usually
+ * via a fill_*_callback()).
+ *
+ * They fail with
+ * _NO_RESOURCES
+ */
+
+/*
+ * Returns
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+scope_fill_children(rc_node_t *np)
+{
+ backend_query_t *q;
+ child_info_t ci;
+ int res;
+
+ (void) scope_setup_child_info(np, REP_PROTOCOL_ENTITY_SERVICE, &ci);
+
+ q = backend_query_alloc();
+ backend_query_append(q, "SELECT svc_name, svc_id FROM service_tbl");
+ res = backend_run(BACKEND_TYPE_NORMAL, q, fill_child_callback, &ci);
+ backend_query_free(q);
+
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ return (res);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+service_fill_children(rc_node_t *np)
+{
+ backend_query_t *q;
+ child_info_t ci;
+ int res;
+
+ assert(np->rn_id.rl_backend == BACKEND_TYPE_NORMAL);
+
+ (void) service_setup_child_info(np, REP_PROTOCOL_ENTITY_INSTANCE, &ci);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT instance_name, instance_id FROM instance_tbl"
+ " WHERE (instance_svc = %d)",
+ np->rn_id.rl_main_id);
+ res = backend_run(BACKEND_TYPE_NORMAL, q, fill_child_callback, &ci);
+ backend_query_free(q);
+
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ if (res != REP_PROTOCOL_SUCCESS)
+ return (res);
+
+ (void) service_setup_child_info(np, REP_PROTOCOL_ENTITY_PROPERTYGRP,
+ &ci);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT pg_name, pg_id, pg_gen_id, pg_type, pg_flags FROM pg_tbl"
+ " WHERE (pg_parent_id = %d)",
+ np->rn_id.rl_main_id);
+
+ ci.ci_base_nl.rl_backend = BACKEND_TYPE_NORMAL;
+ res = backend_run(BACKEND_TYPE_NORMAL, q, fill_pg_callback, &ci);
+ if (res == REP_PROTOCOL_SUCCESS) {
+ ci.ci_base_nl.rl_backend = BACKEND_TYPE_NONPERSIST;
+ res = backend_run(BACKEND_TYPE_NONPERSIST, q,
+ fill_pg_callback, &ci);
+ /* nonpersistant database may not exist */
+ if (res == REP_PROTOCOL_FAIL_BACKEND_ACCESS)
+ res = REP_PROTOCOL_SUCCESS;
+ }
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ backend_query_free(q);
+
+ return (res);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+instance_fill_children(rc_node_t *np)
+{
+ backend_query_t *q;
+ child_info_t ci;
+ int res;
+
+ assert(np->rn_id.rl_backend == BACKEND_TYPE_NORMAL);
+
+ /* Get child property groups */
+ (void) instance_setup_child_info(np, REP_PROTOCOL_ENTITY_PROPERTYGRP,
+ &ci);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT pg_name, pg_id, pg_gen_id, pg_type, pg_flags FROM pg_tbl"
+ " WHERE (pg_parent_id = %d)",
+ np->rn_id.rl_main_id);
+ ci.ci_base_nl.rl_backend = BACKEND_TYPE_NORMAL;
+ res = backend_run(BACKEND_TYPE_NORMAL, q, fill_pg_callback, &ci);
+ if (res == REP_PROTOCOL_SUCCESS) {
+ ci.ci_base_nl.rl_backend = BACKEND_TYPE_NONPERSIST;
+ res = backend_run(BACKEND_TYPE_NONPERSIST, q,
+ fill_pg_callback, &ci);
+ /* nonpersistant database may not exist */
+ if (res == REP_PROTOCOL_FAIL_BACKEND_ACCESS)
+ res = REP_PROTOCOL_SUCCESS;
+ }
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ backend_query_free(q);
+
+ if (res != REP_PROTOCOL_SUCCESS)
+ return (res);
+
+ /* Get child snapshots */
+ (void) instance_setup_child_info(np, REP_PROTOCOL_ENTITY_SNAPSHOT,
+ &ci);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT lnk_snap_name, lnk_id, lnk_snap_id FROM snapshot_lnk_tbl"
+ " WHERE (lnk_inst_id = %d)",
+ np->rn_id.rl_main_id);
+ res = backend_run(BACKEND_TYPE_NORMAL, q, fill_snapshot_callback, &ci);
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ backend_query_free(q);
+
+ return (res);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+snapshot_fill_children(rc_node_t *np)
+{
+ rc_node_t *nnp;
+ rc_snapshot_t *sp, *oldsp;
+ rc_snaplevel_t *lvl;
+ rc_node_lookup_t nl;
+ int r;
+
+ /* Get the rc_snapshot_t (& its rc_snaplevel_t's). */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ sp = np->rn_snapshot;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ if (sp == NULL) {
+ r = rc_snapshot_get(np->rn_snapshot_id, &sp);
+ if (r != REP_PROTOCOL_SUCCESS) {
+ assert(r == REP_PROTOCOL_FAIL_NO_RESOURCES);
+ return (r);
+ }
+ (void) pthread_mutex_lock(&np->rn_lock);
+ oldsp = np->rn_snapshot;
+ assert(oldsp == NULL || oldsp == sp);
+ np->rn_snapshot = sp;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ if (oldsp != NULL)
+ rc_snapshot_rele(oldsp);
+ }
+
+ bzero(&nl, sizeof (nl));
+ nl.rl_type = REP_PROTOCOL_ENTITY_SNAPLEVEL;
+ nl.rl_backend = np->rn_id.rl_backend;
+ nl.rl_ids[ID_SERVICE] = np->rn_id.rl_ids[ID_SERVICE];
+ nl.rl_ids[ID_INSTANCE] = np->rn_id.rl_ids[ID_INSTANCE];
+ nl.rl_ids[ID_NAME] = np->rn_id.rl_main_id;
+ nl.rl_ids[ID_SNAPSHOT] = np->rn_snapshot_id;
+
+ /* Create rc_node_t's for the snapshot's rc_snaplevel_t's. */
+ for (lvl = sp->rs_levels; lvl != NULL; lvl = lvl->rsl_next) {
+ nnp = rc_node_alloc();
+ assert(nnp != NULL);
+ nl.rl_main_id = lvl->rsl_level_id;
+ nnp = rc_node_setup_snaplevel(nnp, &nl, lvl, np);
+ rc_node_rele(nnp);
+ }
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+snaplevel_fill_children(rc_node_t *np)
+{
+ rc_snaplevel_t *lvl = np->rn_snaplevel;
+ child_info_t ci;
+ int res;
+ backend_query_t *q;
+
+ (void) snaplevel_setup_child_info(np, REP_PROTOCOL_ENTITY_PROPERTYGRP,
+ &ci);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT snaplvl_pg_name, snaplvl_pg_id, snaplvl_gen_id, "
+ " snaplvl_pg_type, snaplvl_pg_flags "
+ " FROM snaplevel_lnk_tbl "
+ " WHERE (snaplvl_level_id = %d)",
+ lvl->rsl_level_id);
+ res = backend_run(BACKEND_TYPE_NORMAL, q, fill_pg_callback, &ci);
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ backend_query_free(q);
+
+ return (res);
+}
+
+/*
+ * Returns
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+propertygrp_fill_children(rc_node_t *np)
+{
+ backend_query_t *q;
+ child_info_t ci;
+ int res;
+ backend_tx_t *tx;
+
+ backend_type_t backend = np->rn_id.rl_backend;
+
+ (void) propertygrp_setup_child_info(np, REP_PROTOCOL_ENTITY_PROPERTY,
+ &ci);
+
+ res = backend_tx_begin_ro(backend, &tx);
+ if (res != REP_PROTOCOL_SUCCESS) {
+ /*
+ * If the backend didn't exist, we wouldn't have got this
+ * property group.
+ */
+ assert(res != REP_PROTOCOL_FAIL_BACKEND_ACCESS);
+ return (res);
+ }
+
+ ci.ci_tx = tx;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT lnk_prop_name, lnk_prop_id, lnk_prop_type, lnk_val_id "
+ "FROM prop_lnk_tbl "
+ "WHERE (lnk_pg_id = %d AND lnk_gen_id = %d)",
+ np->rn_id.rl_main_id, np->rn_gen_id);
+ res = backend_tx_run(tx, q, fill_property_callback, &ci);
+ if (res == REP_PROTOCOL_DONE)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ backend_query_free(q);
+ backend_tx_end_ro(tx);
+
+ return (res);
+}
+
+/*
+ * Fails with
+ * _TYPE_MISMATCH - lp is not for a service
+ * _INVALID_TYPE - lp has invalid type
+ * _BAD_REQUEST - name is invalid
+ */
+static int
+scope_query_child(backend_query_t *q, rc_node_lookup_t *lp, const char *name)
+{
+ uint32_t type = lp->rl_type;
+ int rc;
+
+ if (type != REP_PROTOCOL_ENTITY_SERVICE)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ backend_query_add(q,
+ "SELECT svc_id FROM service_tbl "
+ "WHERE svc_name = '%q'",
+ name);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ */
+static int
+scope_insert_child(backend_tx_t *tx, rc_node_lookup_t *lp, const char *name)
+{
+ return (backend_tx_run_update(tx,
+ "INSERT INTO service_tbl (svc_id, svc_name) "
+ "VALUES (%d, '%q')",
+ lp->rl_main_id, name));
+}
+
+/*
+ * Fails with
+ * _TYPE_MISMATCH - lp is not for an instance or property group
+ * _INVALID_TYPE - lp has invalid type
+ * _BAD_REQUEST - name is invalid
+ */
+static int
+service_query_child(backend_query_t *q, rc_node_lookup_t *lp, const char *name)
+{
+ uint32_t type = lp->rl_type;
+ int rc;
+
+ if (type != REP_PROTOCOL_ENTITY_INSTANCE &&
+ type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ switch (type) {
+ case REP_PROTOCOL_ENTITY_INSTANCE:
+ backend_query_add(q,
+ "SELECT instance_id FROM instance_tbl "
+ "WHERE instance_name = '%q' AND instance_svc = %d",
+ name, lp->rl_ids[ID_SERVICE]);
+ break;
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ backend_query_add(q,
+ "SELECT pg_id FROM pg_tbl "
+ " WHERE pg_name = '%q' AND pg_parent_id = %d",
+ name, lp->rl_ids[ID_SERVICE]);
+ break;
+ default:
+ assert(0);
+ abort();
+ }
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ */
+static int
+service_insert_child(backend_tx_t *tx, rc_node_lookup_t *lp, const char *name)
+{
+ return (backend_tx_run_update(tx,
+ "INSERT INTO instance_tbl "
+ " (instance_id, instance_name, instance_svc) "
+ "VALUES (%d, '%q', %d)",
+ lp->rl_main_id, name, lp->rl_ids[ID_SERVICE]));
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ */
+static int
+instance_insert_child(backend_tx_t *tx, rc_node_lookup_t *lp, const char *name)
+{
+ return (backend_tx_run_update(tx,
+ "INSERT INTO snapshot_lnk_tbl "
+ " (lnk_id, lnk_inst_id, lnk_snap_name, lnk_snap_id) "
+ "VALUES (%d, %d, '%q', 0)",
+ lp->rl_main_id, lp->rl_ids[ID_INSTANCE], name));
+}
+
+/*
+ * Fails with
+ * _TYPE_MISMATCH - lp is not for a property group or snapshot
+ * _INVALID_TYPE - lp has invalid type
+ * _BAD_REQUEST - name is invalid
+ */
+static int
+instance_query_child(backend_query_t *q, rc_node_lookup_t *lp, const char *name)
+{
+ uint32_t type = lp->rl_type;
+ int rc;
+
+ if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
+ type != REP_PROTOCOL_ENTITY_SNAPSHOT)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ switch (type) {
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ backend_query_add(q,
+ "SELECT pg_id FROM pg_tbl "
+ " WHERE pg_name = '%q' AND pg_parent_id = %d",
+ name, lp->rl_ids[ID_INSTANCE]);
+ break;
+ case REP_PROTOCOL_ENTITY_SNAPSHOT:
+ backend_query_add(q,
+ "SELECT lnk_id FROM snapshot_lnk_tbl "
+ " WHERE lnk_snap_name = '%q' AND lnk_inst_id = %d",
+ name, lp->rl_ids[ID_INSTANCE]);
+ break;
+ default:
+ assert(0);
+ abort();
+ }
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+generic_insert_pg_child(backend_tx_t *tx, rc_node_lookup_t *lp,
+ const char *name, const char *pgtype, uint32_t flags, uint32_t gen)
+{
+ int parent_id = (lp->rl_ids[ID_INSTANCE] != 0)?
+ lp->rl_ids[ID_INSTANCE] : lp->rl_ids[ID_SERVICE];
+ return (backend_tx_run_update(tx,
+ "INSERT INTO pg_tbl "
+ " (pg_id, pg_name, pg_parent_id, pg_type, pg_flags, pg_gen_id) "
+ "VALUES (%d, '%q', %d, '%q', %d, %d)",
+ lp->rl_main_id, name, parent_id, pgtype, flags, gen));
+}
+
+static int
+service_delete_start(rc_node_t *np, delete_info_t *dip)
+{
+ int r;
+ backend_query_t *q = backend_query_alloc();
+
+ /*
+ * Check for child instances, and refuse to delete if they exist.
+ */
+ backend_query_add(q,
+ "SELECT 1 FROM instance_tbl WHERE instance_svc = %d",
+ np->rn_id.rl_main_id);
+
+ r = backend_tx_run(dip->di_tx, q, backend_fail_if_seen, NULL);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE)
+ return (REP_PROTOCOL_FAIL_EXISTS); /* instances exist */
+
+ return (delete_stack_push(dip, BACKEND_TYPE_NORMAL, &service_delete,
+ np->rn_id.rl_main_id, 0));
+}
+
+static int
+instance_delete_start(rc_node_t *np, delete_info_t *dip)
+{
+ return (delete_stack_push(dip, BACKEND_TYPE_NORMAL, &instance_delete,
+ np->rn_id.rl_main_id, 0));
+}
+
+static int
+snapshot_delete_start(rc_node_t *np, delete_info_t *dip)
+{
+ return (delete_stack_push(dip, BACKEND_TYPE_NORMAL,
+ &snapshot_lnk_delete, np->rn_id.rl_main_id, 0));
+}
+
+static int
+propertygrp_delete_start(rc_node_t *np, delete_info_t *dip)
+{
+ return (delete_stack_push(dip, np->rn_id.rl_backend,
+ &propertygrp_delete, np->rn_id.rl_main_id, 0));
+}
+
+static object_info_t info[] = {
+ {REP_PROTOCOL_ENTITY_NONE},
+ {REP_PROTOCOL_ENTITY_SCOPE,
+ BACKEND_ID_INVALID,
+ scope_fill_children,
+ scope_setup_child_info,
+ scope_query_child,
+ scope_insert_child,
+ NULL,
+ NULL,
+ },
+ {REP_PROTOCOL_ENTITY_SERVICE,
+ BACKEND_ID_SERVICE_INSTANCE,
+ service_fill_children,
+ service_setup_child_info,
+ service_query_child,
+ service_insert_child,
+ generic_insert_pg_child,
+ service_delete_start,
+ },
+ {REP_PROTOCOL_ENTITY_INSTANCE,
+ BACKEND_ID_SERVICE_INSTANCE,
+ instance_fill_children,
+ instance_setup_child_info,
+ instance_query_child,
+ instance_insert_child,
+ generic_insert_pg_child,
+ instance_delete_start,
+ },
+ {REP_PROTOCOL_ENTITY_SNAPSHOT,
+ BACKEND_ID_SNAPNAME,
+ snapshot_fill_children,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ snapshot_delete_start,
+ },
+ {REP_PROTOCOL_ENTITY_SNAPLEVEL,
+ BACKEND_ID_SNAPLEVEL,
+ snaplevel_fill_children,
+ snaplevel_setup_child_info,
+ },
+ {REP_PROTOCOL_ENTITY_PROPERTYGRP,
+ BACKEND_ID_PROPERTYGRP,
+ propertygrp_fill_children,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ propertygrp_delete_start,
+ },
+ {REP_PROTOCOL_ENTITY_PROPERTY},
+ {-1UL}
+};
+#define NUM_INFO (sizeof (info) / sizeof (*info))
+
+/*
+ * object_fill_children() populates the child list of an rc_node_t by calling
+ * the appropriate <type>_fill_children() which runs backend queries that
+ * call an appropriate fill_*_callback() which takes a row of results,
+ * decodes them, and calls an rc_node_setup*() function in rc_node.c to create
+ * a child.
+ *
+ * Fails with
+ * _NO_RESOURCES
+ */
+int
+object_fill_children(rc_node_t *pp)
+{
+ uint32_t type = pp->rn_id.rl_type;
+ assert(type > 0 && type < NUM_INFO);
+
+ return ((*info[type].obj_fill_children)(pp));
+}
+
+int
+object_delete(rc_node_t *pp)
+{
+ int rc;
+
+ delete_info_t dip;
+ delete_ent_t de;
+
+ uint32_t type = pp->rn_id.rl_type;
+ assert(type > 0 && type < NUM_INFO);
+
+ if (info[type].obj_delete_start == NULL)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ (void) memset(&dip, '\0', sizeof (dip));
+ rc = backend_tx_begin(BACKEND_TYPE_NORMAL, &dip.di_tx);
+ if (rc != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ rc = backend_tx_begin(BACKEND_TYPE_NONPERSIST, &dip.di_np_tx);
+ if (rc == REP_PROTOCOL_FAIL_BACKEND_ACCESS ||
+ rc == REP_PROTOCOL_FAIL_BACKEND_READONLY)
+ dip.di_np_tx = NULL;
+ else if (rc != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(dip.di_tx);
+ return (rc);
+ }
+
+ if ((rc = (*info[type].obj_delete_start)(pp, &dip)) !=
+ REP_PROTOCOL_SUCCESS) {
+ goto fail;
+ }
+
+ while (delete_stack_pop(&dip, &de)) {
+ rc = (*de.de_cb)(&dip, &de);
+ if (rc != REP_PROTOCOL_SUCCESS)
+ goto fail;
+ }
+
+ rc = backend_tx_commit(dip.di_tx);
+ if (rc != REP_PROTOCOL_SUCCESS)
+ backend_tx_rollback(dip.di_np_tx);
+ else if (dip.di_np_tx)
+ (void) backend_tx_commit(dip.di_np_tx);
+
+ delete_stack_cleanup(&dip);
+
+ return (rc);
+
+fail:
+ backend_tx_rollback(dip.di_tx);
+ backend_tx_rollback(dip.di_np_tx);
+ delete_stack_cleanup(&dip);
+ return (rc);
+}
+
+int
+object_do_create(backend_tx_t *tx, child_info_t *cip, rc_node_t *pp,
+ uint32_t type, const char *name, rc_node_t **cpp)
+{
+ uint32_t ptype = pp->rn_id.rl_type;
+
+ backend_query_t *q;
+ uint32_t id;
+ rc_node_t *np = NULL;
+ int rc;
+ object_info_t *ip;
+
+ rc_node_lookup_t *lp = &cip->ci_base_nl;
+
+ assert(ptype > 0 && ptype < NUM_INFO);
+
+ ip = &info[ptype];
+
+ if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+
+ if (ip->obj_setup_child_info == NULL ||
+ ip->obj_query_child == NULL ||
+ ip->obj_insert_child == NULL)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ if ((rc = (*ip->obj_setup_child_info)(pp, type, cip)) !=
+ REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ q = backend_query_alloc();
+ if ((rc = (*ip->obj_query_child)(q, lp, name)) !=
+ REP_PROTOCOL_SUCCESS) {
+ assert(rc == REP_PROTOCOL_FAIL_BAD_REQUEST);
+ backend_query_free(q);
+ return (rc);
+ }
+
+ rc = backend_tx_run_single_int(tx, q, &id);
+ backend_query_free(q);
+
+ if (rc == REP_PROTOCOL_SUCCESS)
+ return (REP_PROTOCOL_FAIL_EXISTS);
+ else if (rc != REP_PROTOCOL_FAIL_NOT_FOUND)
+ return (rc);
+
+ if ((lp->rl_main_id = backend_new_id(tx,
+ info[type].obj_id_space)) == 0) {
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ if ((np = rc_node_alloc()) == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ if ((rc = (*ip->obj_insert_child)(tx, lp, name)) !=
+ REP_PROTOCOL_SUCCESS) {
+ rc_node_destroy(np);
+ return (rc);
+ }
+
+ *cpp = np;
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _NOT_APPLICABLE - type is _PROPERTYGRP
+ * _BAD_REQUEST - cannot create children for this type of node
+ * name is invalid
+ * _TYPE_MISMATCH - object cannot have children of type type
+ * _NO_RESOURCES - out of memory, or could not allocate new id
+ * _BACKEND_READONLY
+ * _BACKEND_ACCESS
+ * _EXISTS - child already exists
+ */
+int
+object_create(rc_node_t *pp, uint32_t type, const char *name, rc_node_t **cpp)
+{
+ backend_tx_t *tx;
+ rc_node_t *np = NULL;
+ child_info_t ci;
+ int rc;
+
+ if ((rc = backend_tx_begin(pp->rn_id.rl_backend, &tx)) !=
+ REP_PROTOCOL_SUCCESS) {
+ return (rc);
+ }
+
+ if ((rc = object_do_create(tx, &ci, pp, type, name, &np)) !=
+ REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ return (rc);
+ }
+
+ rc = backend_tx_commit(tx);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ rc_node_destroy(np);
+ return (rc);
+ }
+
+ *cpp = rc_node_setup(np, &ci.ci_base_nl, name, ci.ci_parent);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+object_create_pg(rc_node_t *pp, uint32_t type, const char *name,
+ const char *pgtype, uint32_t flags, rc_node_t **cpp)
+{
+ uint32_t ptype = pp->rn_id.rl_type;
+ backend_tx_t *tx_ro, *tx_wr;
+ backend_query_t *q;
+ uint32_t id;
+ uint32_t gen = 0;
+ rc_node_t *np = NULL;
+ int rc;
+ int rc_wr;
+ int rc_ro;
+ object_info_t *ip;
+
+ int nonpersist = (flags & SCF_PG_FLAG_NONPERSISTENT);
+
+ child_info_t ci;
+ rc_node_lookup_t *lp = &ci.ci_base_nl;
+
+ assert(ptype > 0 && ptype < NUM_INFO);
+
+ if (ptype != REP_PROTOCOL_ENTITY_SERVICE &&
+ ptype != REP_PROTOCOL_ENTITY_INSTANCE)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ ip = &info[ptype];
+
+ assert(ip->obj_setup_child_info != NULL &&
+ ip->obj_query_child != NULL &&
+ ip->obj_insert_pg_child != NULL);
+
+ if ((rc = (*ip->obj_setup_child_info)(pp, type, &ci)) !=
+ REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ q = backend_query_alloc();
+ if ((rc = (*ip->obj_query_child)(q, lp, name)) !=
+ REP_PROTOCOL_SUCCESS) {
+ backend_query_free(q);
+ return (rc);
+ }
+
+ if (!nonpersist) {
+ lp->rl_backend = BACKEND_TYPE_NORMAL;
+ rc_wr = backend_tx_begin(BACKEND_TYPE_NORMAL, &tx_wr);
+ rc_ro = backend_tx_begin_ro(BACKEND_TYPE_NONPERSIST, &tx_ro);
+ } else {
+ lp->rl_backend = BACKEND_TYPE_NONPERSIST;
+ rc_ro = backend_tx_begin_ro(BACKEND_TYPE_NORMAL, &tx_ro);
+ rc_wr = backend_tx_begin(BACKEND_TYPE_NONPERSIST, &tx_wr);
+ }
+
+ if (rc_wr != REP_PROTOCOL_SUCCESS) {
+ rc = rc_wr;
+ goto fail;
+ }
+ if (rc_ro != REP_PROTOCOL_SUCCESS &&
+ rc_ro != REP_PROTOCOL_FAIL_BACKEND_ACCESS) {
+ rc = rc_ro;
+ goto fail;
+ }
+
+ if (tx_ro != NULL) {
+ rc = backend_tx_run_single_int(tx_ro, q, &id);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ backend_query_free(q);
+ rc = REP_PROTOCOL_FAIL_EXISTS;
+ goto fail;
+ } else if (rc != REP_PROTOCOL_FAIL_NOT_FOUND) {
+ backend_query_free(q);
+ goto fail;
+ }
+ }
+
+ rc = backend_tx_run_single_int(tx_wr, q, &id);
+ backend_query_free(q);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ rc = REP_PROTOCOL_FAIL_EXISTS;
+ goto fail;
+ } else if (rc != REP_PROTOCOL_FAIL_NOT_FOUND) {
+ goto fail;
+ }
+
+ if (tx_ro != NULL)
+ backend_tx_end_ro(tx_ro);
+ tx_ro = NULL;
+
+ if ((lp->rl_main_id = backend_new_id(tx_wr,
+ info[type].obj_id_space)) == 0) {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+
+ if ((np = rc_node_alloc()) == NULL) {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+
+ if ((rc = (*ip->obj_insert_pg_child)(tx_wr, lp, name, pgtype, flags,
+ gen)) != REP_PROTOCOL_SUCCESS) {
+ rc_node_destroy(np);
+ goto fail;
+ }
+
+ rc = backend_tx_commit(tx_wr);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ rc_node_destroy(np);
+ return (rc);
+ }
+
+ *cpp = rc_node_setup_pg(np, lp, name, pgtype, flags, gen, ci.ci_parent);
+
+ return (REP_PROTOCOL_SUCCESS);
+
+fail:
+ if (tx_ro != NULL)
+ backend_tx_end_ro(tx_ro);
+ if (tx_wr != NULL)
+ backend_tx_rollback(tx_wr);
+ return (rc);
+}
+
+/*
+ * Given a row of snaplevel number, snaplevel id, service id, service name,
+ * instance id, & instance name, create a rc_snaplevel_t & prepend it onto the
+ * rs_levels list of the rc_snapshot_t passed in as data.
+ * Returns _CONTINUE on success or _ABORT if any allocations fail.
+ */
+/*ARGSUSED*/
+static int
+fill_snapshot_cb(void *data, int columns, char **vals, char **names)
+{
+ rc_snapshot_t *sp = data;
+ rc_snaplevel_t *lvl;
+ char *num = vals[0];
+ char *id = vals[1];
+ char *service_id = vals[2];
+ char *service = vals[3];
+ char *instance_id = vals[4];
+ char *instance = vals[5];
+ assert(columns == 6);
+
+ lvl = uu_zalloc(sizeof (*lvl));
+ if (lvl == NULL)
+ return (BACKEND_CALLBACK_ABORT);
+ lvl->rsl_parent = sp;
+ lvl->rsl_next = sp->rs_levels;
+ sp->rs_levels = lvl;
+
+ if (uu_strtouint(num, &lvl->rsl_level_num,
+ sizeof (lvl->rsl_level_num), 0, 0, 0) == -1 ||
+ uu_strtouint(id, &lvl->rsl_level_id,
+ sizeof (lvl->rsl_level_id), 0, 0, 0) == -1 ||
+ uu_strtouint(service_id, &lvl->rsl_service_id,
+ sizeof (lvl->rsl_level_num), 0, 0, 0) == -1 ||
+ (instance_id != NULL &&
+ uu_strtouint(instance_id, &lvl->rsl_instance_id,
+ sizeof (lvl->rsl_instance_id), 0, 0, 0) == -1)) {
+ backend_panic("invalid integer in database");
+ }
+
+ lvl->rsl_scope = (const char *)"localhost";
+ lvl->rsl_service = strdup(service);
+ if (lvl->rsl_service == NULL) {
+ uu_free(lvl);
+ return (BACKEND_CALLBACK_ABORT);
+ }
+ if (instance) {
+ assert(lvl->rsl_instance_id != 0);
+ lvl->rsl_instance = strdup(instance);
+ if (lvl->rsl_instance == NULL) {
+ free((void *)lvl->rsl_instance);
+ uu_free(lvl);
+ return (BACKEND_CALLBACK_ABORT);
+ }
+ } else {
+ assert(lvl->rsl_instance_id == 0);
+ }
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*
+ * Populate sp's rs_levels list from the snaplevel_tbl table.
+ * Fails with
+ * _NO_RESOURCES
+ */
+int
+object_fill_snapshot(rc_snapshot_t *sp)
+{
+ backend_query_t *q;
+ rc_snaplevel_t *sl;
+ int result;
+ int i;
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT snap_level_num, snap_level_id, "
+ " snap_level_service_id, snap_level_service, "
+ " snap_level_instance_id, snap_level_instance "
+ "FROM snaplevel_tbl "
+ "WHERE snap_id = %d "
+ "ORDER BY snap_level_id DESC",
+ sp->rs_snap_id);
+
+ result = backend_run(BACKEND_TYPE_NORMAL, q, fill_snapshot_cb, sp);
+ if (result == REP_PROTOCOL_DONE)
+ result = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ backend_query_free(q);
+
+ if (result == REP_PROTOCOL_SUCCESS) {
+ i = 0;
+ for (sl = sp->rs_levels; sl != NULL; sl = sl->rsl_next) {
+ if (sl->rsl_level_num != ++i) {
+ backend_panic("snaplevels corrupt; expected "
+ "level %d, got %d", i, sl->rsl_level_num);
+ }
+ }
+ }
+ return (result);
+}
+
+/*ARGSUSED*/
+static int
+object_copy_string(void *data_arg, int columns, char **vals, char **names)
+{
+ char **data = data_arg;
+
+ assert(columns == 1);
+
+ if (*data != NULL)
+ free(*data);
+ *data = NULL;
+
+ if (vals[0] != NULL) {
+ if ((*data = strdup(vals[0])) == NULL)
+ return (BACKEND_CALLBACK_ABORT);
+ }
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+struct snaplevel_add_info {
+ backend_query_t *sai_q;
+ uint32_t sai_level_id;
+ int sai_used; /* sai_q has been used */
+};
+
+/*ARGSUSED*/
+static int
+object_snaplevel_process_pg(void *data_arg, int columns, char **vals,
+ char **names)
+{
+ struct snaplevel_add_info *data = data_arg;
+
+ assert(columns == 5);
+
+ backend_query_add(data->sai_q,
+ "INSERT INTO snaplevel_lnk_tbl "
+ " (snaplvl_level_id, snaplvl_pg_id, snaplvl_pg_name, "
+ " snaplvl_pg_type, snaplvl_pg_flags, snaplvl_gen_id)"
+ "VALUES (%d, %s, '%q', '%q', %s, %s);",
+ data->sai_level_id, vals[0], vals[1], vals[2], vals[3], vals[4]);
+
+ data->sai_used = 1;
+
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*ARGSUSED*/
+static int
+object_snapshot_add_level(backend_tx_t *tx, uint32_t snap_id,
+ uint32_t snap_level_num, uint32_t svc_id, const char *svc_name,
+ uint32_t inst_id, const char *inst_name)
+{
+ struct snaplevel_add_info data;
+ backend_query_t *q;
+ int result;
+
+ assert((snap_level_num == 1 && inst_name != NULL) ||
+ snap_level_num == 2 && inst_name == NULL);
+
+ data.sai_level_id = backend_new_id(tx, BACKEND_ID_SNAPLEVEL);
+ if (data.sai_level_id == 0) {
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ result = backend_tx_run_update(tx,
+ "INSERT INTO snaplevel_tbl "
+ " (snap_id, snap_level_num, snap_level_id, "
+ " snap_level_service_id, snap_level_service, "
+ " snap_level_instance_id, snap_level_instance) "
+ "VALUES (%d, %d, %d, %d, %Q, %d, %Q);",
+ snap_id, snap_level_num, data.sai_level_id, svc_id, svc_name,
+ inst_id, inst_name);
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT pg_id, pg_name, pg_type, pg_flags, pg_gen_id FROM pg_tbl "
+ "WHERE (pg_parent_id = %d);",
+ (inst_name != NULL)? inst_id : svc_id);
+
+ data.sai_q = backend_query_alloc();
+ data.sai_used = 0;
+ result = backend_tx_run(tx, q, object_snaplevel_process_pg,
+ &data);
+ backend_query_free(q);
+
+ if (result == REP_PROTOCOL_SUCCESS && data.sai_used != 0)
+ result = backend_tx_run(tx, data.sai_q, NULL, NULL);
+ backend_query_free(data.sai_q);
+
+ return (result);
+}
+
+/*
+ * Fails with:
+ * _NO_RESOURCES - no new id or out of disk space
+ * _BACKEND_READONLY - persistent backend is read-only
+ */
+static int
+object_snapshot_do_take(uint32_t instid, const char *inst_name,
+ uint32_t svcid, const char *svc_name,
+ backend_tx_t **tx_out, uint32_t *snapid_out)
+{
+ backend_tx_t *tx;
+ backend_query_t *q;
+ int result;
+
+ char *svc_name_alloc = NULL;
+ char *inst_name_alloc = NULL;
+ uint32_t snapid;
+
+ result = backend_tx_begin(BACKEND_TYPE_NORMAL, &tx);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ snapid = backend_new_id(tx, BACKEND_ID_SNAPSHOT);
+ if (snapid == 0) {
+ result = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+
+ if (svc_name == NULL) {
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT svc_name FROM service_tbl "
+ "WHERE (svc_id = %d)", svcid);
+ result = backend_tx_run(tx, q, object_copy_string,
+ &svc_name_alloc);
+ backend_query_free(q);
+
+ svc_name = svc_name_alloc;
+
+ if (result == REP_PROTOCOL_DONE) {
+ result = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+ if (result == REP_PROTOCOL_SUCCESS && svc_name == NULL)
+ backend_panic("unable to find name for svc id %d\n",
+ svcid);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+ }
+
+ if (inst_name == NULL) {
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT instance_name FROM instance_tbl "
+ "WHERE (instance_id = %d)", instid);
+ result = backend_tx_run(tx, q, object_copy_string,
+ &inst_name_alloc);
+ backend_query_free(q);
+
+ inst_name = inst_name_alloc;
+
+ if (result == REP_PROTOCOL_DONE) {
+ result = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+
+ if (result == REP_PROTOCOL_SUCCESS && inst_name == NULL)
+ backend_panic(
+ "unable to find name for instance id %d\n", instid);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+ }
+
+ result = object_snapshot_add_level(tx, snapid, 1,
+ svcid, svc_name, instid, inst_name);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ result = object_snapshot_add_level(tx, snapid, 2,
+ svcid, svc_name, 0, NULL);
+
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ *snapid_out = snapid;
+ *tx_out = tx;
+
+ free(svc_name_alloc);
+ free(inst_name_alloc);
+
+ return (REP_PROTOCOL_SUCCESS);
+
+fail:
+ backend_tx_rollback(tx);
+ free(svc_name_alloc);
+ free(inst_name_alloc);
+ return (result);
+}
+
+/*
+ * Fails with:
+ * _TYPE_MISMATCH - pp is not an instance
+ * _NO_RESOURCES - no new id or out of disk space
+ * _BACKEND_READONLY - persistent backend is read-only
+ */
+int
+object_snapshot_take_new(rc_node_t *pp,
+ const char *svc_name, const char *inst_name,
+ const char *name, rc_node_t **outp)
+{
+ rc_node_lookup_t *insti = &pp->rn_id;
+
+ uint32_t instid = insti->rl_main_id;
+ uint32_t svcid = insti->rl_ids[ID_SERVICE];
+ uint32_t snapid = 0;
+ backend_tx_t *tx = NULL;
+ child_info_t ci;
+ rc_node_t *np;
+ int result;
+
+ if (insti->rl_type != REP_PROTOCOL_ENTITY_INSTANCE)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ result = object_snapshot_do_take(instid, inst_name, svcid, svc_name,
+ &tx, &snapid);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+
+ if ((result = object_do_create(tx, &ci, pp,
+ REP_PROTOCOL_ENTITY_SNAPSHOT, name, &np)) != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ return (result);
+ }
+
+ /*
+ * link the new object to the new snapshot.
+ */
+ np->rn_snapshot_id = snapid;
+
+ result = backend_tx_run_update(tx,
+ "UPDATE snapshot_lnk_tbl SET lnk_snap_id = %d WHERE lnk_id = %d;",
+ snapid, ci.ci_base_nl.rl_main_id);
+ if (result != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ rc_node_destroy(np);
+ return (result);
+ }
+ result = backend_tx_commit(tx);
+ if (result != REP_PROTOCOL_SUCCESS) {
+ rc_node_destroy(np);
+ return (result);
+ }
+
+ *outp = rc_node_setup(np, &ci.ci_base_nl, name, ci.ci_parent);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with:
+ * _TYPE_MISMATCH - pp is not an instance
+ * _NO_RESOURCES - no new id or out of disk space
+ * _BACKEND_READONLY - persistent backend is read-only
+ */
+int
+object_snapshot_attach(rc_node_lookup_t *snapi, uint32_t *snapid_ptr,
+ int takesnap)
+{
+ uint32_t svcid = snapi->rl_ids[ID_SERVICE];
+ uint32_t instid = snapi->rl_ids[ID_INSTANCE];
+ uint32_t snapid = *snapid_ptr;
+ uint32_t oldsnapid = 0;
+ backend_tx_t *tx = NULL;
+ backend_query_t *q;
+ int result;
+
+ delete_info_t dip;
+ delete_ent_t de;
+
+ if (snapi->rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ if (takesnap) {
+ result = object_snapshot_do_take(instid, NULL,
+ svcid, NULL, &tx, &snapid);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+ } else {
+ result = backend_tx_begin(BACKEND_TYPE_NORMAL, &tx);
+ if (result != REP_PROTOCOL_SUCCESS)
+ return (result);
+ }
+
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT lnk_snap_id FROM snapshot_lnk_tbl WHERE lnk_id = %d; "
+ "UPDATE snapshot_lnk_tbl SET lnk_snap_id = %d WHERE lnk_id = %d;",
+ snapi->rl_main_id, snapid, snapi->rl_main_id);
+ result = backend_tx_run_single_int(tx, q, &oldsnapid);
+ backend_query_free(q);
+
+ if (result == REP_PROTOCOL_FAIL_NOT_FOUND) {
+ backend_tx_rollback(tx);
+ backend_panic("unable to find snapshot id %d",
+ snapi->rl_main_id);
+ }
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ /*
+ * Now we use the delete stack to handle the possible unreferencing
+ * of oldsnapid.
+ */
+ (void) memset(&dip, 0, sizeof (dip));
+ dip.di_tx = tx;
+ dip.di_np_tx = NULL; /* no need for non-persistant backend */
+
+ if ((result = delete_stack_push(&dip, BACKEND_TYPE_NORMAL,
+ &snaplevel_tbl_delete, oldsnapid, 0)) != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ while (delete_stack_pop(&dip, &de)) {
+ result = (*de.de_cb)(&dip, &de);
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+ }
+
+ result = backend_tx_commit(tx);
+ if (result != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ delete_stack_cleanup(&dip);
+ *snapid_ptr = snapid;
+ return (REP_PROTOCOL_SUCCESS);
+
+fail:
+ backend_tx_rollback(tx);
+ delete_stack_cleanup(&dip);
+ return (result);
+}
diff --git a/usr/src/cmd/svc/configd/maindoor.c b/usr/src/cmd/svc/configd/maindoor.c
new file mode 100644
index 0000000000..b25ba02239
--- /dev/null
+++ b/usr/src/cmd/svc/configd/maindoor.c
@@ -0,0 +1,190 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <door.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <ucred.h>
+
+#include "repcache_protocol.h"
+#include "configd.h"
+
+#define INVALID_RESULT ((uint32_t)-1U)
+
+static int main_door_fd = -1;
+
+/*ARGSUSED*/
+static void
+main_switcher(void *cookie, char *argp, size_t arg_size, door_desc_t *desc,
+ uint_t n_desc)
+{
+ repository_door_request_t *request;
+ repository_door_response_t reply;
+ door_desc_t reply_desc;
+
+ thread_info_t *ti = thread_self();
+
+ int send_desc = 0;
+ int fd;
+
+ thread_newstate(ti, TI_MAIN_DOOR_CALL);
+ ti->ti_main_door_request = (void *)argp;
+
+ assert(cookie == REPOSITORY_DOOR_COOKIE);
+
+ reply.rdr_status = INVALID_RESULT;
+
+ if (argp == DOOR_UNREF_DATA) {
+ backend_fini();
+
+ exit(CONFIGD_EXIT_LOST_MAIN_DOOR);
+ }
+
+ /*
+ * No file descriptors allowed
+ */
+ assert(n_desc == 0);
+
+ /*
+ * first, we just check the version
+ */
+ if (arg_size < offsetofend(repository_door_request_t, rdr_version)) {
+ reply.rdr_status = REPOSITORY_DOOR_FAIL_BAD_REQUEST;
+ goto fail;
+ }
+
+ /* LINTED alignment */
+ request = (repository_door_request_t *)argp;
+ ti->ti_main_door_request = request;
+
+ if (request->rdr_version != REPOSITORY_DOOR_VERSION) {
+ reply.rdr_status = REPOSITORY_DOOR_FAIL_VERSION_MISMATCH;
+ goto fail;
+ }
+
+ /*
+ * Now, check that the argument is of the minimum required size
+ */
+ if (arg_size < offsetofend(repository_door_request_t, rdr_request)) {
+ reply.rdr_status = REPOSITORY_DOOR_FAIL_BAD_REQUEST;
+ goto fail;
+ }
+
+ if (door_ucred(&ti->ti_ucred) != 0) {
+ reply.rdr_status = REPOSITORY_DOOR_FAIL_PERMISSION_DENIED;
+ goto fail;
+ }
+
+ switch (request->rdr_request) {
+ case REPOSITORY_DOOR_REQUEST_CONNECT:
+ fd = -1;
+ reply.rdr_status = create_connection(ti->ti_ucred, request,
+ arg_size, &fd);
+ if (reply.rdr_status != REPOSITORY_DOOR_SUCCESS) {
+ assert(fd == -1);
+ goto fail;
+ }
+ assert(fd != -1);
+ reply_desc.d_attributes = DOOR_DESCRIPTOR | DOOR_RELEASE;
+ reply_desc.d_data.d_desc.d_descriptor = fd;
+ send_desc = 1;
+ break;
+
+ default:
+ reply.rdr_status = REPOSITORY_DOOR_FAIL_BAD_REQUEST;
+ goto fail;
+ }
+
+fail:
+ assert(reply.rdr_status != INVALID_RESULT);
+
+ thread_newstate(ti, TI_DOOR_RETURN);
+ ti->ti_main_door_request = NULL;
+
+ (void) door_return((char *)&reply, sizeof (reply),
+ &reply_desc, (send_desc)? 1:0);
+ (void) door_return(NULL, 0, NULL, 0);
+}
+
+int
+setup_main_door(const char *doorpath)
+{
+ mode_t oldmask;
+ int fd;
+
+ int door_flags = DOOR_UNREF | DOOR_REFUSE_DESC;
+#ifdef DOOR_NO_CANCEL
+ door_flags |= DOOR_NO_CANCEL;
+#endif
+ if ((main_door_fd = door_create(main_switcher, REPOSITORY_DOOR_COOKIE,
+ door_flags)) < 0) {
+ perror("door_create");
+ return (0);
+ }
+
+#ifdef DOOR_PARAM_DATA_MIN
+ if (door_setparam(main_door_fd, DOOR_PARAM_DATA_MIN,
+ offsetofend(repository_door_request_t, rdr_request)) == -1 ||
+ door_setparam(main_door_fd, DOOR_PARAM_DATA_MAX,
+ sizeof (repository_door_request_t)) == -1) {
+ perror("door_setparam");
+ return (0);
+ }
+#endif /* DOOR_PARAM_DATA_MIN */
+
+ /*
+ * Create the file if it doesn't exist. Ignore errors, since
+ * fattach(3C) will catch any real problems.
+ */
+ oldmask = umask(000); /* disable umask temporarily */
+ fd = open(doorpath, O_RDWR | O_CREAT | O_EXCL, 0644);
+ (void) umask(oldmask);
+
+ if (fd >= 0)
+ (void) close(fd);
+
+ if (fattach(main_door_fd, doorpath) < 0) {
+ if ((errno != EBUSY) ||
+ (fdetach(doorpath) < 0) ||
+ (fattach(main_door_fd, doorpath) < 0)) {
+ perror("fattach");
+ (void) door_revoke(main_door_fd);
+ main_door_fd = -1;
+ return (0);
+ }
+ }
+
+ return (1);
+}
diff --git a/usr/src/cmd/svc/configd/object.c b/usr/src/cmd/svc/configd/object.c
new file mode 100644
index 0000000000..7bc4155033
--- /dev/null
+++ b/usr/src/cmd/svc/configd/object.c
@@ -0,0 +1,559 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This file only contains the transaction commit logic.
+ */
+
+#include <assert.h>
+#include <alloca.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <sys/sysmacros.h>
+#include "configd.h"
+
+#define INVALID_OBJ_ID ((uint32_t)-1)
+#define INVALID_TYPE ((uint32_t)-1)
+
+struct tx_cmd {
+ const struct rep_protocol_transaction_cmd *tx_cmd;
+ const char *tx_prop;
+ uint32_t *tx_values;
+ uint32_t tx_nvalues;
+ uint32_t tx_orig_value_id;
+ char tx_found;
+ char tx_processed;
+ char tx_bad;
+};
+
+static int
+tx_cmd_compare(const void *key, const void *elem_arg)
+{
+ const struct tx_cmd *elem = elem_arg;
+
+ return (strcmp((const char *)key, elem->tx_prop));
+}
+
+struct tx_commit_data {
+ uint32_t txc_pg_id;
+ uint32_t txc_gen;
+ uint32_t txc_oldgen;
+ short txc_backend;
+ backend_tx_t *txc_tx;
+ backend_query_t *txc_inserts;
+ size_t txc_count;
+ rep_protocol_responseid_t txc_result;
+ struct tx_cmd txc_cmds[1]; /* actually txc_count */
+};
+#define TX_COMMIT_DATA_SIZE(count) \
+ offsetof(struct tx_commit_data, txc_cmds[count])
+
+/*ARGSUSED*/
+static int
+tx_check_genid(void *data_arg, int columns, char **vals, char **names)
+{
+ struct tx_commit_data *data = data_arg;
+ assert(columns == 1);
+ if (atoi(vals[0]) != data->txc_oldgen)
+ data->txc_result = REP_PROTOCOL_FAIL_NOT_LATEST;
+ else
+ data->txc_result = REP_PROTOCOL_SUCCESS;
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*
+ * tx_process_property() is called once for each property in current
+ * property group generation. Its purpose is threefold:
+ *
+ * 1. copy properties not mentioned in the transaction over unchanged.
+ * 2. mark DELETEd properties as seen (they will be left out of the new
+ * generation).
+ * 3. consistancy-check NEW, CLEAR, and REPLACE commands.
+ *
+ * Any consistancy problems set tx_bad, and seen properties are marked
+ * tx_found. These is used later, in tx_process_cmds().
+ */
+/*ARGSUSED*/
+static int
+tx_process_property(void *data_arg, int columns, char **vals, char **names)
+{
+ struct tx_commit_data *data = data_arg;
+ struct tx_cmd *elem;
+
+ const char *prop_name = vals[0];
+ const char *prop_type = vals[1];
+ const char *lnk_val_id = vals[2];
+
+ char *endptr;
+
+ assert(columns == 3);
+
+ elem = bsearch(prop_name, data->txc_cmds, data->txc_count,
+ sizeof (*data->txc_cmds), tx_cmd_compare);
+
+ if (elem == NULL) {
+ backend_query_add(data->txc_inserts,
+ "INSERT INTO prop_lnk_tbl"
+ " (lnk_pg_id, lnk_gen_id, lnk_prop_name, lnk_prop_type,"
+ " lnk_val_id) "
+ "VALUES ( %d, %d, '%q', '%q', %Q );",
+ data->txc_pg_id, data->txc_gen, prop_name, prop_type,
+ lnk_val_id);
+ } else {
+ assert(!elem->tx_found);
+ elem->tx_found = 1;
+
+ if (lnk_val_id != NULL) {
+ errno = 0;
+ elem->tx_orig_value_id =
+ strtoul(lnk_val_id, &endptr, 10);
+ if (elem->tx_orig_value_id == 0 || *endptr != 0 ||
+ errno != 0) {
+ return (BACKEND_CALLBACK_ABORT);
+ }
+ } else {
+ elem->tx_orig_value_id = 0;
+ }
+
+ switch (elem->tx_cmd->rptc_action) {
+ case REP_PROTOCOL_TX_ENTRY_NEW:
+ elem->tx_bad = 1;
+ data->txc_result = REP_PROTOCOL_FAIL_EXISTS;
+ break;
+ case REP_PROTOCOL_TX_ENTRY_CLEAR:
+ if (REP_PROTOCOL_BASE_TYPE(elem->tx_cmd->rptc_type) !=
+ prop_type[0] &&
+ REP_PROTOCOL_SUBTYPE(elem->tx_cmd->rptc_type) !=
+ prop_type[1]) {
+ elem->tx_bad = 1;
+ data->txc_result =
+ REP_PROTOCOL_FAIL_TYPE_MISMATCH;
+ }
+ break;
+ case REP_PROTOCOL_TX_ENTRY_REPLACE:
+ break;
+ case REP_PROTOCOL_TX_ENTRY_DELETE:
+ elem->tx_processed = 1;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+ return (BACKEND_CALLBACK_CONTINUE);
+}
+
+/*
+ * tx_process_cmds() finishes the job tx_process_property() started:
+ *
+ * 1. if tx_process_property() marked a command as bad, we skip it.
+ * 2. if a DELETE, REPLACE, or CLEAR operated on a non-existant property,
+ * we mark it as bad.
+ * 3. we complete the work of NEW, REPLACE, and CLEAR, by inserting the
+ * appropriate values into the database.
+ * 4. we delete all replaced data, if it is no longer referenced.
+ *
+ * Finally, we check all of the commands, and fail if anything was marked bad.
+ */
+static int
+tx_process_cmds(struct tx_commit_data *data)
+{
+ int idx;
+ int r;
+ int count = data->txc_count;
+ struct tx_cmd *elem;
+ uint32_t val_id = 0;
+ uint8_t type[3];
+
+ backend_query_t *q;
+ int do_delete;
+
+ /*
+ * For persistent pgs, we use backend_fail_if_seen to abort the
+ * deletion if there is a snapshot using our current state.
+ *
+ * All of the deletions in this function are safe, since
+ * rc_tx_commit() guarantees that all the data is in-cache.
+ */
+ q = backend_query_alloc();
+
+ if (data->txc_backend != BACKEND_TYPE_NONPERSIST) {
+ backend_query_add(q,
+ "SELECT 1 FROM snaplevel_lnk_tbl "
+ " WHERE (snaplvl_pg_id = %d AND snaplvl_gen_id = %d); ",
+ data->txc_pg_id, data->txc_oldgen);
+ }
+ backend_query_add(q,
+ "DELETE FROM prop_lnk_tbl"
+ " WHERE (lnk_pg_id = %d AND lnk_gen_id = %d)",
+ data->txc_pg_id, data->txc_oldgen);
+ r = backend_tx_run(data->txc_tx, q, backend_fail_if_seen, NULL);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_SUCCESS)
+ do_delete = 1;
+ else if (r == REP_PROTOCOL_DONE)
+ do_delete = 0; /* old gen_id is in use */
+ else
+ return (r);
+
+ for (idx = 0; idx < count; idx++) {
+ elem = &data->txc_cmds[idx];
+
+ if (elem->tx_bad)
+ continue;
+
+ switch (elem->tx_cmd->rptc_action) {
+ case REP_PROTOCOL_TX_ENTRY_DELETE:
+ case REP_PROTOCOL_TX_ENTRY_REPLACE:
+ case REP_PROTOCOL_TX_ENTRY_CLEAR:
+ if (!elem->tx_found) {
+ elem->tx_bad = 1;
+ continue;
+ }
+ break;
+ case REP_PROTOCOL_TX_ENTRY_NEW:
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (do_delete &&
+ elem->tx_cmd->rptc_action != REP_PROTOCOL_TX_ENTRY_NEW &&
+ elem->tx_orig_value_id != 0) {
+ /*
+ * delete the old values, if they are not in use
+ */
+ q = backend_query_alloc();
+ backend_query_add(q,
+ "SELECT 1 FROM prop_lnk_tbl "
+ " WHERE (lnk_val_id = %d); "
+ "DELETE FROM value_tbl"
+ " WHERE (value_id = %d)",
+ elem->tx_orig_value_id, elem->tx_orig_value_id);
+ r = backend_tx_run(data->txc_tx, q,
+ backend_fail_if_seen, NULL);
+ backend_query_free(q);
+ if (r != REP_PROTOCOL_SUCCESS && r != REP_PROTOCOL_DONE)
+ return (r);
+ }
+
+ if (elem->tx_cmd->rptc_action == REP_PROTOCOL_TX_ENTRY_DELETE)
+ continue; /* no further work to do */
+
+ type[0] = REP_PROTOCOL_BASE_TYPE(elem->tx_cmd->rptc_type);
+ type[1] = REP_PROTOCOL_SUBTYPE(elem->tx_cmd->rptc_type);
+ type[2] = 0;
+
+ if (elem->tx_nvalues == 0) {
+ r = backend_tx_run_update(data->txc_tx,
+ "INSERT INTO prop_lnk_tbl"
+ " (lnk_pg_id, lnk_gen_id, "
+ " lnk_prop_name, lnk_prop_type, lnk_val_id) "
+ "VALUES ( %d, %d, '%q', '%q', NULL );",
+ data->txc_pg_id, data->txc_gen, elem->tx_prop,
+ type);
+ } else {
+ uint32_t *v;
+ const char *str;
+
+ val_id = backend_new_id(data->txc_tx, BACKEND_ID_VALUE);
+ if (val_id == 0)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ r = backend_tx_run_update(data->txc_tx,
+ "INSERT INTO prop_lnk_tbl "
+ " (lnk_pg_id, lnk_gen_id, "
+ " lnk_prop_name, lnk_prop_type, lnk_val_id) "
+ "VALUES ( %d, %d, '%q', '%q', %d );",
+ data->txc_pg_id, data->txc_gen, elem->tx_prop,
+ type, val_id);
+
+ v = elem->tx_values;
+
+ while (r == REP_PROTOCOL_SUCCESS &&
+ elem->tx_nvalues--) {
+ str = (const char *)&v[1];
+
+ r = backend_tx_run_update(data->txc_tx,
+ "INSERT INTO value_tbl "
+ " (value_id, value_type, value_value) "
+ "VALUES (%d, '%c', '%q');\n",
+ val_id, elem->tx_cmd->rptc_type, str);
+
+ /*LINTED alignment*/
+ v = (uint32_t *)((caddr_t)str + TX_SIZE(*v));
+ }
+ }
+ if (r != REP_PROTOCOL_SUCCESS)
+ return (REP_PROTOCOL_FAIL_UNKNOWN);
+ elem->tx_processed = 1;
+ }
+
+ for (idx = 0; idx < count; idx++) {
+ elem = &data->txc_cmds[idx];
+
+ if (elem->tx_bad)
+ return (REP_PROTOCOL_FAIL_BAD_TX);
+ }
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static boolean_t
+check_string(uintptr_t loc, uint32_t len, uint32_t sz)
+{
+ const char *ptr = (const char *)loc;
+
+ if (len == 0 || len > sz || ptr[len - 1] != 0 || strlen(ptr) != len - 1)
+ return (0);
+ return (1);
+}
+
+static int
+tx_check_and_setup(struct tx_commit_data *data, const void *cmds_arg,
+ uint32_t count)
+{
+ const struct rep_protocol_transaction_cmd *cmds;
+ struct tx_cmd *cur;
+ struct tx_cmd *prev = NULL;
+
+ uintptr_t loc;
+ uint32_t sz, len;
+ int idx;
+
+ loc = (uintptr_t)cmds_arg;
+
+ for (idx = 0; idx < count; idx++) {
+ cur = &data->txc_cmds[idx];
+
+ cmds = (struct rep_protocol_transaction_cmd *)loc;
+ cur->tx_cmd = cmds;
+
+ sz = cmds->rptc_size;
+
+ loc += REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE;
+ sz -= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE;
+
+ len = cmds->rptc_name_len;
+ if (len <= 1 || !check_string(loc, len, sz)) {
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+ cur->tx_prop = (const char *)loc;
+
+ len = TX_SIZE(len);
+ loc += len;
+ sz -= len;
+
+ cur->tx_nvalues = 0;
+ cur->tx_values = (uint32_t *)loc;
+
+ while (sz > 0) {
+ if (sz < sizeof (uint32_t))
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ cur->tx_nvalues++;
+
+ len = *(uint32_t *)loc;
+ loc += sizeof (uint32_t);
+ sz -= sizeof (uint32_t);
+
+ if (!check_string(loc, len, sz))
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ /*
+ * XXX here, we should be checking that the values
+ * match the purported type
+ */
+
+ len = TX_SIZE(len);
+
+ if (len > sz)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ loc += len;
+ sz -= len;
+ }
+
+ if (prev != NULL && strcmp(prev->tx_prop, cur->tx_prop) >= 0)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ prev = cur;
+ }
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+int
+object_tx_commit(rc_node_lookup_t *lp, const void *cmds_arg, size_t cmds_sz,
+ uint32_t *gen)
+{
+ const struct rep_protocol_transaction_cmd *cmds;
+ uintptr_t loc;
+
+ struct tx_commit_data *data;
+ uint32_t count, sz;
+ uint32_t new_gen;
+
+ int ret;
+
+ rep_protocol_responseid_t r;
+
+ backend_tx_t *tx;
+ backend_query_t *q;
+
+ int backend = lp->rl_backend;
+
+ /*
+ * First, verify that the reported sizes make sense, and count
+ * the number of commands.
+ */
+ count = 0;
+ loc = (uintptr_t)cmds_arg;
+
+ while (cmds_sz > 0) {
+ cmds = (struct rep_protocol_transaction_cmd *)loc;
+
+ if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ sz = cmds->rptc_size;
+ if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ sz = TX_SIZE(sz);
+ if (sz > cmds_sz)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ loc += sz;
+ cmds_sz -= sz;
+ count++;
+ }
+
+ data = alloca(TX_COMMIT_DATA_SIZE(count));
+ (void) memset(data, 0, TX_COMMIT_DATA_SIZE(count));
+
+ /*
+ * verify that everything looks okay, and set up our command
+ * datastructures.
+ */
+ ret = tx_check_and_setup(data, cmds_arg, count);
+ if (ret != REP_PROTOCOL_SUCCESS)
+ return (ret);
+
+ ret = backend_tx_begin(backend, &tx);
+ if (ret != REP_PROTOCOL_SUCCESS)
+ return (ret);
+
+ /* Make sure the pg is up-to-date. */
+ data->txc_oldgen = *gen;
+ data->txc_backend = backend;
+ data->txc_result = REP_PROTOCOL_FAIL_NOT_FOUND;
+
+ q = backend_query_alloc();
+ backend_query_add(q, "SELECT pg_gen_id FROM pg_tbl WHERE (pg_id = %d);",
+ lp->rl_main_id);
+ r = backend_tx_run(tx, q, tx_check_genid, data);
+ backend_query_free(q);
+
+ if (r != REP_PROTOCOL_SUCCESS ||
+ (r = data->txc_result) != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ goto end;
+ }
+
+ /* If the transaction is empty, cut out early. */
+ if (count == 0) {
+ backend_tx_rollback(tx);
+ r = REP_PROTOCOL_DONE;
+ goto end;
+ }
+
+ new_gen = backend_new_id(tx, BACKEND_ID_GENERATION);
+ if (new_gen == 0) {
+ backend_tx_rollback(tx);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ data->txc_pg_id = lp->rl_main_id;
+ data->txc_gen = new_gen;
+ data->txc_tx = tx;
+ data->txc_count = count;
+
+ r = backend_tx_run_update(tx,
+ "UPDATE pg_tbl SET pg_gen_id = %d "
+ " WHERE (pg_id = %d AND pg_gen_id = %d);",
+ new_gen, lp->rl_main_id, *gen);
+
+ if (r != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ goto end;
+ }
+
+ q = backend_query_alloc();
+
+ backend_query_add(q,
+ "SELECT lnk_prop_name, lnk_prop_type, lnk_val_id "
+ "FROM prop_lnk_tbl "
+ "WHERE (lnk_pg_id = %d AND lnk_gen_id = %d)",
+ lp->rl_main_id, *gen);
+
+ data->txc_inserts = backend_query_alloc();
+ r = backend_tx_run(tx, q, tx_process_property, data);
+ backend_query_free(q);
+
+ if (r == REP_PROTOCOL_DONE)
+ r = REP_PROTOCOL_FAIL_UNKNOWN; /* corruption */
+
+ if (r != REP_PROTOCOL_SUCCESS ||
+ (r = data->txc_result) != REP_PROTOCOL_SUCCESS) {
+ backend_query_free(data->txc_inserts);
+ backend_tx_rollback(tx);
+ goto end;
+ }
+
+ r = backend_tx_run(tx, data->txc_inserts, NULL, NULL);
+ backend_query_free(data->txc_inserts);
+
+ if (r != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ goto end;
+ }
+
+ r = tx_process_cmds(data);
+ if (r != REP_PROTOCOL_SUCCESS) {
+ backend_tx_rollback(tx);
+ goto end;
+ }
+ r = backend_tx_commit(tx);
+
+ if (r == REP_PROTOCOL_SUCCESS)
+ *gen = new_gen;
+end:
+ return (r);
+}
diff --git a/usr/src/cmd/svc/configd/rc_node.c b/usr/src/cmd/svc/configd/rc_node.c
new file mode 100644
index 0000000000..acdd3c754b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/rc_node.c
@@ -0,0 +1,5345 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * rc_node.c - object management primitives
+ *
+ * This layer manages entities, their data structure, its locking, iterators,
+ * transactions, and change notification requests. Entities (scopes,
+ * services, instances, snapshots, snaplevels, property groups, "composed"
+ * property groups (see composition below), and properties) are represented by
+ * rc_node_t's and are kept in the cache_hash hash table. (Property values
+ * are kept in the rn_values member of the respective property -- not as
+ * separate objects.) Iterators are represented by rc_node_iter_t's.
+ * Transactions are represented by rc_node_tx_t's and are only allocated as
+ * part of repcache_tx_t's in the client layer (client.c). Change
+ * notification requests are represented by rc_notify_t structures and are
+ * described below.
+ *
+ * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
+ * the "localhost" scope. The tree is filled in from the database on-demand
+ * by rc_node_fill_children(), usually from rc_iter_create() since iterators
+ * are the only way to find the children of an entity.
+ *
+ * Each rc_node_t is protected by its rn_lock member. Operations which can
+ * take too long, however, should serialize on an RC_NODE_WAITING_FLAGS bit in
+ * rn_flags with the rc_node_{hold,rele}_flag() functions. And since pointers
+ * to rc_node_t's are allowed, rn_refs is a reference count maintained by
+ * rc_node_{hold,rele}(). See configd.h for locking order information.
+ *
+ * When a node (property group or snapshot) is updated, a new node takes the
+ * place of the old node in the global hash, and the old node is hung off of
+ * the rn_former list of the new node. At the same time, all of its children
+ * have their rn_parent_ref pointer set, and any holds they have are reflected
+ * in the old node's rn_other_refs count. This is automatically kept up
+ * to date, until the final reference to the subgraph is dropped, at which
+ * point the node is unrefed and destroyed, along with all of its children.
+ *
+ * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
+ * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
+ * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
+ * etc.). Once you have locked an rc_node_t you must check its rn_flags for
+ * RC_NODE_DEAD before you can use it. This is usually done with the
+ * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
+ * functions & RC_NODE_*() macros), which fail if the object has died.
+ *
+ * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
+ * call via rc_node_setup_iter() to populate the rn_children uu_list of the
+ * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
+ * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
+ * apropriate child.
+ *
+ * An ITER_START for an ENTITY_VALUE makes sure the node has its values
+ * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
+ * the proper values and updates the offset information.
+ *
+ * When a property group gets changed by a transaction, it sticks around as
+ * a child of its replacement property group, but is removed from the parent.
+ *
+ * To allow aliases, snapshots are implemented with a level of indirection.
+ * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
+ * snapshot.c which contains the authoritative snaplevel information. The
+ * snapid is "assigned" by rc_attach_snapshot().
+ *
+ * We provide the client layer with rc_node_ptr_t's to reference objects.
+ * Objects referred to by them are automatically held & released by
+ * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
+ * client.c entry points to read the pointers. They fetch the pointer to the
+ * object, return (from the function) if it is dead, and lock, hold, or hold
+ * a flag of the object.
+ */
+
+/*
+ * Permission checking is authorization-based: some operations may only
+ * proceed if the user has been assigned at least one of a set of
+ * authorization strings. The set of enabling authorizations depends on the
+ * operation and the target object. The set of authorizations assigned to
+ * a user is determined by reading /etc/security/policy.conf, querying the
+ * user_attr database, and possibly querying the prof_attr database, as per
+ * chkauthattr() in libsecdb.
+ *
+ * The fastest way to decide whether the two sets intersect is by entering the
+ * strings into a hash table and detecting collisions, which takes linear time
+ * in the total size of the sets. Except for the authorization patterns which
+ * may be assigned to users, which without advanced pattern-matching
+ * algorithms will take O(n) in the number of enabling authorizations, per
+ * pattern.
+ *
+ * We can achieve some practical speed-ups by noting that if we enter all of
+ * the authorizations from one of the sets into the hash table we can merely
+ * check the elements of the second set for existence without adding them.
+ * This reduces memory requirements and hash table clutter. The enabling set
+ * is well suited for this because it is internal to configd (for now, at
+ * least). Combine this with short-circuiting and we can even minimize the
+ * number of queries to the security databases (user_attr & prof_attr).
+ *
+ * To force this usage onto clients we provide functions for adding
+ * authorizations to the enabling set of a permission context structure
+ * (perm_add_*()) and one to decide whether the the user associated with the
+ * current door call client possesses any of them (perm_granted()).
+ *
+ * At some point, a generic version of this should move to libsecdb.
+ */
+
+/*
+ * Composition is the combination of sets of properties. The sets are ordered
+ * and properties in higher sets obscure properties of the same name in lower
+ * sets. Here we present a composed view of an instance's properties as the
+ * union of its properties and its service's properties. Similarly the
+ * properties of snaplevels are combined to form a composed view of the
+ * properties of a snapshot (which should match the composed view of the
+ * properties of the instance when the snapshot was taken).
+ *
+ * In terms of the client interface, the client may request that a property
+ * group iterator for an instance or snapshot be composed. Property groups
+ * traversed by such an iterator may not have the target entity as a parent.
+ * Similarly, the properties traversed by a property iterator for those
+ * property groups may not have the property groups iterated as parents.
+ *
+ * Implementation requires that iterators for instances and snapshots be
+ * composition-savvy, and that we have a "composed property group" entity
+ * which represents the composition of a number of property groups. Iteration
+ * over "composed property groups" yields properties which may have different
+ * parents, but for all other operations a composed property group behaves
+ * like the top-most property group it represents.
+ *
+ * The implementation is based on the rn_cchain[] array of rc_node_t pointers
+ * in rc_node_t. For instances, the pointers point to the instance and its
+ * parent service. For snapshots they point to the child snaplevels, and for
+ * composed property groups they point to property groups. A composed
+ * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
+ * int the rc_iter_*() code.
+ */
+
+#include <assert.h>
+#include <atomic.h>
+#include <errno.h>
+#include <libuutil.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <prof_attr.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <user_attr.h>
+
+#include "configd.h"
+
+#define AUTH_PREFIX "solaris.smf."
+#define AUTH_MANAGE AUTH_PREFIX "manage"
+#define AUTH_MODIFY AUTH_PREFIX "modify"
+#define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
+#define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
+#define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
+#define AUTH_PG_GENERAL SCF_PG_GENERAL
+#define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
+#define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
+#define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
+#define AUTH_PROP_ACTION "action_authorization"
+#define AUTH_PROP_ENABLED "enabled"
+#define AUTH_PROP_MODIFY "modify_authorization"
+#define AUTH_PROP_VALUE "value_authorization"
+/* libsecdb should take care of this. */
+#define RBAC_AUTH_SEP ","
+
+#define MAX_VALID_CHILDREN 3
+
+typedef struct rc_type_info {
+ uint32_t rt_type; /* matches array index */
+ uint32_t rt_num_ids;
+ uint32_t rt_name_flags;
+ uint32_t rt_valid_children[MAX_VALID_CHILDREN];
+} rc_type_info_t;
+
+#define RT_NO_NAME -1U
+
+static rc_type_info_t rc_types[] = {
+ {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
+ {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
+ {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
+ {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
+ {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
+ {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
+ {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
+ {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
+ {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
+ {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
+ {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
+ {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
+ {REP_PROTOCOL_ENTITY_PROPERTY}},
+ {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
+ {REP_PROTOCOL_ENTITY_PROPERTY}},
+ {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
+ {-1UL}
+};
+#define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
+
+/* Element of a permcheck_t hash table. */
+struct pc_elt {
+ struct pc_elt *pce_next;
+ char pce_auth[1];
+};
+
+/* An authorization set hash table. */
+typedef struct {
+ struct pc_elt **pc_buckets;
+ uint_t pc_bnum; /* number of buckets */
+ uint_t pc_enum; /* number of elements */
+} permcheck_t;
+
+static uu_list_pool_t *rc_children_pool;
+static uu_list_pool_t *rc_pg_notify_pool;
+static uu_list_pool_t *rc_notify_pool;
+static uu_list_pool_t *rc_notify_info_pool;
+
+static rc_node_t *rc_scope;
+
+static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
+static uint_t rc_notify_in_use; /* blocks removals */
+
+static pthread_mutex_t perm_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void rc_node_unrefed(rc_node_t *np);
+
+/*
+ * We support an arbitrary number of clients interested in events for certain
+ * types of changes. Each client is represented by an rc_notify_info_t, and
+ * all clients are chained onto the rc_notify_info_list.
+ *
+ * The rc_notify_list is the global notification list. Each entry is of
+ * type rc_notify_t, which is embedded in one of three other structures:
+ *
+ * rc_node_t property group update notification
+ * rc_notify_delete_t object deletion notification
+ * rc_notify_info_t notification clients
+ *
+ * Which type of object is determined by which pointer in the rc_notify_t is
+ * non-NULL.
+ *
+ * New notifications and clients are added to the end of the list.
+ * Notifications no-one is interested in are never added to the list.
+ *
+ * Clients use their position in the list to track which notifications they
+ * have not yet reported. As they process notifications, they move forward
+ * in the list past them. There is always a client at the beginning of the
+ * list -- as he moves past notifications, he removes them from the list and
+ * cleans them up.
+ *
+ * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
+ * is used for global signalling, and each client has a cv which he waits for
+ * events of interest on.
+ */
+static uu_list_t *rc_notify_info_list;
+static uu_list_t *rc_notify_list;
+
+#define HASH_SIZE 512
+#define HASH_MASK (HASH_SIZE - 1)
+
+#pragma align 64(cache_hash)
+static cache_bucket_t cache_hash[HASH_SIZE];
+
+#define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
+
+static uint32_t
+rc_node_hash(rc_node_lookup_t *lp)
+{
+ uint32_t type = lp->rl_type;
+ uint32_t backend = lp->rl_backend;
+ uint32_t main = lp->rl_main_id;
+ uint32_t *ids = lp->rl_ids;
+
+ rc_type_info_t *tp = &rc_types[type];
+ uint32_t num_ids;
+ uint32_t left;
+ uint32_t hash;
+
+ assert(backend == BACKEND_TYPE_NORMAL ||
+ backend == BACKEND_TYPE_NONPERSIST);
+
+ assert(type > 0 && type < NUM_TYPES);
+ num_ids = tp->rt_num_ids;
+
+ left = MAX_IDS - num_ids;
+ assert(num_ids <= MAX_IDS);
+
+ hash = type * 7 + main * 5 + backend;
+
+ while (num_ids-- > 0)
+ hash = hash * 11 + *ids++ * 7;
+
+ /*
+ * the rest should be zeroed
+ */
+ while (left-- > 0)
+ assert(*ids++ == 0);
+
+ return (hash);
+}
+
+static int
+rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
+{
+ rc_node_lookup_t *r = &np->rn_id;
+ rc_type_info_t *tp;
+ uint32_t type;
+ uint32_t num_ids;
+
+ if (r->rl_main_id != l->rl_main_id)
+ return (0);
+
+ type = r->rl_type;
+ if (type != l->rl_type)
+ return (0);
+
+ assert(type > 0 && type < NUM_TYPES);
+
+ tp = &rc_types[r->rl_type];
+ num_ids = tp->rt_num_ids;
+
+ assert(num_ids <= MAX_IDS);
+ while (num_ids-- > 0)
+ if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
+ return (0);
+
+ return (1);
+}
+
+/*
+ * the "other" references on a node are maintained in an atomically
+ * updated refcount, rn_other_refs. This can be bumped from arbitrary
+ * context, and tracks references to a possibly out-of-date node's children.
+ *
+ * To prevent the node from disappearing between the final drop of
+ * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
+ * 0->1 transitions and decremented (with the node lock held) on 1->0
+ * transitions.
+ */
+static void
+rc_node_hold_other(rc_node_t *np)
+{
+ if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
+ atomic_add_32(&np->rn_other_refs_held, 1);
+ assert(np->rn_other_refs_held > 0);
+ }
+ assert(np->rn_other_refs > 0);
+}
+
+/*
+ * No node locks may be held
+ */
+static void
+rc_node_rele_other(rc_node_t *np)
+{
+ assert(np->rn_other_refs > 0);
+ if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
+ (void) pthread_mutex_lock(&np->rn_lock);
+ assert(np->rn_other_refs_held > 0);
+ if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
+ np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD))
+ rc_node_unrefed(np);
+ else
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ }
+}
+
+static void
+rc_node_hold_locked(rc_node_t *np)
+{
+ assert(MUTEX_HELD(&np->rn_lock));
+
+ if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
+ rc_node_hold_other(np->rn_parent_ref);
+ np->rn_refs++;
+ assert(np->rn_refs > 0);
+}
+
+static void
+rc_node_hold(rc_node_t *np)
+{
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_hold_locked(np);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+}
+
+static void
+rc_node_rele_locked(rc_node_t *np)
+{
+ int unref = 0;
+ rc_node_t *par_ref = NULL;
+
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert(np->rn_refs > 0);
+
+ if (--np->rn_refs == 0) {
+ if (np->rn_flags & RC_NODE_PARENT_REF)
+ par_ref = np->rn_parent_ref;
+
+ if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
+ np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
+ unref = 1;
+ }
+
+ if (unref)
+ rc_node_unrefed(np);
+ else
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ if (par_ref != NULL)
+ rc_node_rele_other(par_ref);
+}
+
+void
+rc_node_rele(rc_node_t *np)
+{
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_locked(np);
+}
+
+static cache_bucket_t *
+cache_hold(uint32_t h)
+{
+ cache_bucket_t *bp = CACHE_BUCKET(h);
+ (void) pthread_mutex_lock(&bp->cb_lock);
+ return (bp);
+}
+
+static void
+cache_release(cache_bucket_t *bp)
+{
+ (void) pthread_mutex_unlock(&bp->cb_lock);
+}
+
+static rc_node_t *
+cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
+{
+ uint32_t h = rc_node_hash(lp);
+ rc_node_t *np;
+
+ assert(MUTEX_HELD(&bp->cb_lock));
+ assert(bp == CACHE_BUCKET(h));
+
+ for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
+ if (np->rn_hash == h && rc_node_match(np, lp)) {
+ rc_node_hold(np);
+ return (np);
+ }
+ }
+
+ return (NULL);
+}
+
+static rc_node_t *
+cache_lookup(rc_node_lookup_t *lp)
+{
+ uint32_t h;
+ cache_bucket_t *bp;
+ rc_node_t *np;
+
+ h = rc_node_hash(lp);
+ bp = cache_hold(h);
+
+ np = cache_lookup_unlocked(bp, lp);
+
+ cache_release(bp);
+
+ return (np);
+}
+
+static void
+cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
+{
+ assert(MUTEX_HELD(&bp->cb_lock));
+ assert(np->rn_hash == rc_node_hash(&np->rn_id));
+ assert(bp == CACHE_BUCKET(np->rn_hash));
+
+ assert(np->rn_hash_next == NULL);
+
+ np->rn_hash_next = bp->cb_head;
+ bp->cb_head = np;
+}
+
+static void
+cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
+{
+ rc_node_t **npp;
+
+ assert(MUTEX_HELD(&bp->cb_lock));
+ assert(np->rn_hash == rc_node_hash(&np->rn_id));
+ assert(bp == CACHE_BUCKET(np->rn_hash));
+
+ for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
+ if (*npp == np)
+ break;
+
+ assert(*npp == np);
+ *npp = np->rn_hash_next;
+ np->rn_hash_next = NULL;
+}
+
+/*
+ * verify that the 'parent' type can have a child typed 'child'
+ * Fails with
+ * _INVALID_TYPE - argument is invalid
+ * _TYPE_MISMATCH - parent type cannot have children of type child
+ */
+static int
+rc_check_parent_child(uint32_t parent, uint32_t child)
+{
+ int idx;
+ uint32_t type;
+
+ if (parent == 0 || parent >= NUM_TYPES ||
+ child == 0 || child >= NUM_TYPES)
+ return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
+
+ for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
+ type = rc_types[parent].rt_valid_children[idx];
+ if (type == child)
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+}
+
+/*
+ * Fails with
+ * _INVALID_TYPE - type is invalid
+ * _BAD_REQUEST - name is an invalid name for a node of type type
+ */
+int
+rc_check_type_name(uint32_t type, const char *name)
+{
+ if (type == 0 || type >= NUM_TYPES)
+ return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
+
+ if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+rc_check_pgtype_name(const char *name)
+{
+ if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static int
+rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
+{
+ rc_node_t *nnp = np->rcn_node;
+ int i;
+
+ assert(MUTEX_HELD(&rc_pg_notify_lock));
+
+ if (np->rcn_delete != NULL) {
+ assert(np->rcn_info == NULL && np->rcn_node == NULL);
+ return (1); /* everyone likes deletes */
+ }
+ if (np->rcn_node == NULL) {
+ assert(np->rcn_info != NULL || np->rcn_delete != NULL);
+ return (0);
+ }
+ assert(np->rcn_info == NULL);
+
+ for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
+ if (rnip->rni_namelist[i] != NULL) {
+ if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
+ return (1);
+ }
+ if (rnip->rni_typelist[i] != NULL) {
+ if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static void
+rc_notify_insert_node(rc_node_t *nnp)
+{
+ rc_notify_t *np = &nnp->rn_notify;
+ rc_notify_info_t *nip;
+ int found = 0;
+
+ assert(np->rcn_info == NULL);
+
+ if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
+ return;
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ np->rcn_node = nnp;
+ for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
+ nip = uu_list_next(rc_notify_info_list, nip)) {
+ if (rc_notify_info_interested(nip, np)) {
+ (void) pthread_cond_broadcast(&nip->rni_cv);
+ found++;
+ }
+ }
+ if (found)
+ (void) uu_list_insert_before(rc_notify_list, NULL, np);
+ else
+ np->rcn_node = NULL;
+
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+}
+
+static void
+rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
+ const char *instance, const char *pg)
+{
+ rc_notify_info_t *nip;
+
+ uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
+ rc_notify_pool);
+ ndp->rnd_notify.rcn_delete = ndp;
+
+ (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
+ "svc:/%s%s%s%s%s", service,
+ (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
+ (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
+
+ /*
+ * add to notification list, notify watchers
+ */
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
+ nip = uu_list_next(rc_notify_info_list, nip))
+ (void) pthread_cond_broadcast(&nip->rni_cv);
+ (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+}
+
+static void
+rc_notify_remove_node(rc_node_t *nnp)
+{
+ rc_notify_t *np = &nnp->rn_notify;
+
+ assert(np->rcn_info == NULL);
+ assert(!MUTEX_HELD(&nnp->rn_lock));
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ while (np->rcn_node != NULL) {
+ if (rc_notify_in_use) {
+ (void) pthread_cond_wait(&rc_pg_notify_cv,
+ &rc_pg_notify_lock);
+ continue;
+ }
+ (void) uu_list_remove(rc_notify_list, np);
+ np->rcn_node = NULL;
+ break;
+ }
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+}
+
+static void
+rc_notify_remove_locked(rc_notify_t *np)
+{
+ assert(MUTEX_HELD(&rc_pg_notify_lock));
+ assert(rc_notify_in_use == 0);
+
+ (void) uu_list_remove(rc_notify_list, np);
+ if (np->rcn_node) {
+ np->rcn_node = NULL;
+ } else if (np->rcn_delete) {
+ uu_free(np->rcn_delete);
+ } else {
+ assert(0); /* CAN'T HAPPEN */
+ }
+}
+
+/*
+ * Permission checking functions. See comment atop this file.
+ */
+#ifndef NATIVE_BUILD
+static permcheck_t *
+pc_create()
+{
+ permcheck_t *p;
+
+ p = uu_zalloc(sizeof (*p));
+ if (p == NULL)
+ return (NULL);
+ p->pc_bnum = 8; /* Normal case will only have 2 elts. */
+ p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
+ if (p->pc_buckets == NULL) {
+ uu_free(p);
+ return (NULL);
+ }
+
+ p->pc_enum = 0;
+ return (p);
+}
+
+static void
+pc_free(permcheck_t *pcp)
+{
+ uint_t i;
+ struct pc_elt *ep, *next;
+
+ for (i = 0; i < pcp->pc_bnum; ++i) {
+ for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
+ next = ep->pce_next;
+ free(ep);
+ }
+ }
+
+ free(pcp->pc_buckets);
+ free(pcp);
+}
+
+static uint32_t
+pc_hash(const char *auth)
+{
+ uint32_t h = 0, g;
+ const char *p;
+
+ /*
+ * Generic hash function from uts/common/os/modhash.c.
+ */
+ for (p = auth; *p != '\0'; ++p) {
+ h = (h << 4) + *p;
+ g = (h & 0xf0000000);
+ if (g != 0) {
+ h ^= (g >> 24);
+ h ^= g;
+ }
+ }
+
+ return (h);
+}
+
+static int
+pc_exists(const permcheck_t *pcp, const char *auth)
+{
+ uint32_t h;
+ struct pc_elt *ep;
+
+ h = pc_hash(auth);
+ for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
+ ep != NULL;
+ ep = ep->pce_next) {
+ if (strcmp(auth, ep->pce_auth) == 0)
+ return (1);
+ }
+
+ return (0);
+}
+
+static int
+pc_match(const permcheck_t *pcp, const char *pattern)
+{
+ uint_t i;
+ struct pc_elt *ep;
+
+ for (i = 0; i < pcp->pc_bnum; ++i) {
+ for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
+ if (_auth_match(pattern, ep->pce_auth))
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+pc_grow(permcheck_t *pcp)
+{
+ uint_t new_bnum, i, j;
+ struct pc_elt **new_buckets;
+ struct pc_elt *ep, *next;
+
+ new_bnum = pcp->pc_bnum * 2;
+ if (new_bnum < pcp->pc_bnum)
+ /* Homey don't play that. */
+ return (-1);
+
+ new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
+ if (new_buckets == NULL)
+ return (-1);
+
+ for (i = 0; i < pcp->pc_bnum; ++i) {
+ for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
+ next = ep->pce_next;
+ j = pc_hash(ep->pce_auth) & (new_bnum - 1);
+ ep->pce_next = new_buckets[j];
+ new_buckets[j] = ep;
+ }
+ }
+
+ uu_free(pcp->pc_buckets);
+ pcp->pc_buckets = new_buckets;
+ pcp->pc_bnum = new_bnum;
+
+ return (0);
+}
+
+static int
+pc_add(permcheck_t *pcp, const char *auth)
+{
+ struct pc_elt *ep;
+ uint_t i;
+
+ ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
+ if (ep == NULL)
+ return (-1);
+
+ /* Grow if pc_enum / pc_bnum > 3/4. */
+ if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
+ /* Failure is not a stopper; we'll try again next time. */
+ (void) pc_grow(pcp);
+
+ (void) strcpy(ep->pce_auth, auth);
+
+ i = pc_hash(auth) & (pcp->pc_bnum - 1);
+ ep->pce_next = pcp->pc_buckets[i];
+ pcp->pc_buckets[i] = ep;
+
+ ++pcp->pc_enum;
+
+ return (0);
+}
+
+/*
+ * For the type of a property group, return the authorization which may be
+ * used to modify it.
+ */
+static const char *
+perm_auth_for_pgtype(const char *pgtype)
+{
+ if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
+ return (AUTH_MODIFY_PREFIX "method");
+ else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
+ return (AUTH_MODIFY_PREFIX "dependency");
+ else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
+ return (AUTH_MODIFY_PREFIX "application");
+ else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
+ return (AUTH_MODIFY_PREFIX "framework");
+ else
+ return (NULL);
+}
+
+/*
+ * Fails with
+ * _NO_RESOURCES - out of memory
+ */
+static int
+perm_add_enabling(permcheck_t *pcp, const char *auth)
+{
+ return (pc_add(pcp, auth) == 0 ? REP_PROTOCOL_SUCCESS :
+ REP_PROTOCOL_FAIL_NO_RESOURCES);
+}
+
+/* Note that perm_add_enabling_values() is defined below. */
+
+/*
+ * perm_granted() returns 1 if the current door caller has one of the enabling
+ * authorizations in pcp, 0 if it doesn't, and -1 if an error (usually lack of
+ * memory) occurs. check_auth_list() checks an RBAC_AUTH_SEP-separated list
+ * of authorizations for existance in pcp, and check_prof_list() checks the
+ * authorizations granted to an RBAC_AUTH_SEP-separated list of profiles.
+ */
+static int
+check_auth_list(const permcheck_t *pcp, char *authlist)
+{
+ char *auth, *lasts;
+ int ret;
+
+ for (auth = (char *)strtok_r(authlist, RBAC_AUTH_SEP, &lasts);
+ auth != NULL;
+ auth = (char *)strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) {
+ if (strchr(auth, KV_WILDCHAR) == NULL)
+ ret = pc_exists(pcp, auth);
+ else
+ ret = pc_match(pcp, auth);
+
+ if (ret)
+ return (ret);
+ }
+
+ return (0);
+}
+
+static int
+check_prof_list(const permcheck_t *pcp, char *proflist)
+{
+ char *prof, *lasts, *authlist, *subproflist;
+ profattr_t *pap;
+ int ret = 0;
+
+ for (prof = strtok_r(proflist, RBAC_AUTH_SEP, &lasts);
+ prof != NULL;
+ prof = strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) {
+ pap = getprofnam(prof);
+ if (pap == NULL)
+ continue;
+
+ authlist = kva_match(pap->attr, PROFATTR_AUTHS_KW);
+ if (authlist != NULL)
+ ret = check_auth_list(pcp, authlist);
+
+ if (!ret) {
+ subproflist = kva_match(pap->attr, PROFATTR_PROFS_KW);
+ if (subproflist != NULL)
+ /* depth check to avoid invinite recursion? */
+ ret = check_prof_list(pcp, subproflist);
+ }
+
+ free_profattr(pap);
+ if (ret)
+ return (ret);
+ }
+
+ return (ret);
+}
+
+static int
+perm_granted(const permcheck_t *pcp)
+{
+ ucred_t *uc;
+
+ int ret = 0;
+ uid_t uid;
+ userattr_t *uap;
+ char *authlist, *proflist, *def_prof = NULL;
+
+ /*
+ * Get generic authorizations from policy.conf
+ *
+ * Note that _get_auth_policy is not threadsafe, so we single-thread
+ * access to it.
+ */
+ (void) pthread_mutex_lock(&perm_lock);
+ ret = _get_auth_policy(&authlist, &def_prof);
+ (void) pthread_mutex_unlock(&perm_lock);
+
+ if (ret != 0)
+ return (-1);
+
+ if (authlist != NULL) {
+ ret = check_auth_list(pcp, authlist);
+ free(authlist);
+
+ if (ret) {
+ free(def_prof);
+ return (ret);
+ }
+ }
+
+ /*
+ * Put off checking def_prof for later in an attempt to consolidate
+ * prof_attr accesses.
+ */
+
+ /* Get the uid */
+ if ((uc = get_ucred()) == NULL) {
+ free(def_prof);
+
+ if (errno == EINVAL) {
+ /*
+ * Client is no longer waiting for our response (e.g.,
+ * it received a signal & resumed with EINTR).
+ * Punting with door_return() would be nice but we
+ * need to release all of the locks & references we
+ * hold. And we must report failure to the client
+ * layer to keep it from ignoring retries as
+ * already-done (idempotency & all that). None of the
+ * error codes fit very well, so we might as well
+ * force the return of _PERMISSION_DENIED since we
+ * couldn't determine the user.
+ */
+ return (0);
+ }
+ assert(0);
+ abort();
+ }
+
+ uid = ucred_geteuid(uc);
+ assert(uid != -1);
+
+ uap = getuseruid(uid);
+ if (uap != NULL) {
+ /* Get the authorizations from user_attr. */
+ authlist = kva_match(uap->attr, USERATTR_AUTHS_KW);
+ if (authlist != NULL)
+ ret = check_auth_list(pcp, authlist);
+ }
+
+ if (!ret && def_prof != NULL) {
+ /* Check generic profiles. */
+ ret = check_prof_list(pcp, def_prof);
+ }
+
+ if (!ret && uap != NULL) {
+ proflist = kva_match(uap->attr, USERATTR_PROFILES_KW);
+ if (proflist != NULL)
+ ret = check_prof_list(pcp, proflist);
+ }
+
+ if (def_prof != NULL)
+ free(def_prof);
+ if (uap != NULL)
+ free_userattr(uap);
+
+ return (ret);
+}
+#endif /* NATIVE_BUILD */
+
+/*
+ * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
+ * serialize certain actions, and to wait for certain operations to complete
+ *
+ * The waiting flags are:
+ * RC_NODE_CHILDREN_CHANGING
+ * The child list is being built or changed (due to creation
+ * or deletion). All iterators pause.
+ *
+ * RC_NODE_USING_PARENT
+ * Someone is actively using the parent pointer, so we can't
+ * be removed from the parent list.
+ *
+ * RC_NODE_CREATING_CHILD
+ * A child is being created -- locks out other creations, to
+ * prevent insert-insert races.
+ *
+ * RC_NODE_IN_TX
+ * This object is running a transaction.
+ *
+ * RC_NODE_DYING
+ * This node might be dying. Always set as a set, using
+ * RC_NODE_DYING_FLAGS (which is everything but
+ * RC_NODE_USING_PARENT)
+ */
+static int
+rc_node_hold_flag(rc_node_t *np, uint32_t flag)
+{
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
+
+ while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
+ (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
+ }
+ if (np->rn_flags & RC_NODE_DEAD)
+ return (0);
+
+ np->rn_flags |= flag;
+ return (1);
+}
+
+static void
+rc_node_rele_flag(rc_node_t *np, uint32_t flag)
+{
+ assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert((np->rn_flags & flag) == flag);
+ np->rn_flags &= ~flag;
+ (void) pthread_cond_broadcast(&np->rn_cv);
+}
+
+/*
+ * wait until a particular flag has cleared. Fails if the object dies.
+ */
+static int
+rc_node_wait_flag(rc_node_t *np, uint32_t flag)
+{
+ assert(MUTEX_HELD(&np->rn_lock));
+ while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
+ (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
+
+ return (!(np->rn_flags & RC_NODE_DEAD));
+}
+
+/*
+ * On entry, np's lock must be held, and this thread must be holding
+ * RC_NODE_USING_PARENT. On return, both of them are released.
+ *
+ * If the return value is NULL, np either does not have a parent, or
+ * the parent has been marked DEAD.
+ *
+ * If the return value is non-NULL, it is the parent of np, and both
+ * its lock and the requested flags are held.
+ */
+static rc_node_t *
+rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
+{
+ rc_node_t *pp;
+
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert(np->rn_flags & RC_NODE_USING_PARENT);
+
+ if ((pp = np->rn_parent) == NULL) {
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (NULL);
+ }
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ if (!rc_node_hold_flag(pp, flag)) {
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ return (NULL);
+ }
+ return (pp);
+}
+
+rc_node_t *
+rc_node_alloc(void)
+{
+ rc_node_t *np = uu_zalloc(sizeof (*np));
+
+ if (np == NULL)
+ return (NULL);
+
+ (void) pthread_mutex_init(&np->rn_lock, NULL);
+ (void) pthread_cond_init(&np->rn_cv, NULL);
+
+ np->rn_children = uu_list_create(rc_children_pool, np, 0);
+ np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
+
+ uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
+
+ uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
+ rc_notify_pool);
+
+ return (np);
+}
+
+void
+rc_node_destroy(rc_node_t *np)
+{
+ int i;
+
+ if (np->rn_flags & RC_NODE_UNREFED)
+ return; /* being handled elsewhere */
+
+ assert(np->rn_refs == 0 && np->rn_other_refs == 0);
+ assert(np->rn_former == NULL);
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ /* Release the holds from rc_iter_next(). */
+ for (i = 0; i < COMPOSITION_DEPTH; ++i) {
+ /* rn_cchain[i] may be NULL for empty snapshots. */
+ if (np->rn_cchain[i] != NULL)
+ rc_node_rele(np->rn_cchain[i]);
+ }
+ }
+
+ if (np->rn_name != NULL)
+ free((void *)np->rn_name);
+ np->rn_name = NULL;
+ if (np->rn_type != NULL)
+ free((void *)np->rn_type);
+ np->rn_type = NULL;
+ if (np->rn_values != NULL)
+ object_free_values(np->rn_values, np->rn_valtype,
+ np->rn_values_count, np->rn_values_size);
+ np->rn_values = NULL;
+
+ if (np->rn_snaplevel != NULL)
+ rc_snaplevel_rele(np->rn_snaplevel);
+ np->rn_snaplevel = NULL;
+
+ uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
+
+ uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
+ rc_notify_pool);
+
+ assert(uu_list_first(np->rn_children) == NULL);
+ uu_list_destroy(np->rn_children);
+ uu_list_destroy(np->rn_pg_notify_list);
+
+ (void) pthread_mutex_destroy(&np->rn_lock);
+ (void) pthread_cond_destroy(&np->rn_cv);
+
+ uu_free(np);
+}
+
+/*
+ * Link in a child node.
+ *
+ * Because of the lock ordering, cp has to already be in the hash table with
+ * its lock dropped before we get it. To prevent anyone from noticing that
+ * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
+ * we've linked it in, we release the flag.
+ */
+static void
+rc_node_link_child(rc_node_t *np, rc_node_t *cp)
+{
+ assert(!MUTEX_HELD(&np->rn_lock));
+ assert(!MUTEX_HELD(&cp->rn_lock));
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
+ (cp->rn_flags & RC_NODE_USING_PARENT));
+
+ assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
+ REP_PROTOCOL_SUCCESS);
+
+ cp->rn_parent = np;
+ cp->rn_flags |= RC_NODE_IN_PARENT;
+ (void) uu_list_insert_before(np->rn_children, NULL, cp);
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
+ (void) pthread_mutex_unlock(&cp->rn_lock);
+}
+
+/*
+ * Sets the rn_parent_ref field of all the children of np to pp -- always
+ * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
+ *
+ * This is used when we mark a node RC_NODE_OLD, so that when the object and
+ * its children are no longer referenced, they will all be deleted as a unit.
+ */
+static void
+rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
+{
+ rc_node_t *cp;
+
+ assert(MUTEX_HELD(&np->rn_lock));
+
+ for (cp = uu_list_first(np->rn_children); cp != NULL;
+ cp = uu_list_next(np->rn_children, cp)) {
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ if (cp->rn_flags & RC_NODE_PARENT_REF) {
+ assert(cp->rn_parent_ref == pp);
+ } else {
+ assert(cp->rn_parent_ref == NULL);
+
+ cp->rn_flags |= RC_NODE_PARENT_REF;
+ cp->rn_parent_ref = pp;
+ if (cp->rn_refs != 0)
+ rc_node_hold_other(pp);
+ }
+ rc_node_setup_parent_ref(cp, pp); /* recurse */
+ (void) pthread_mutex_unlock(&cp->rn_lock);
+ }
+}
+
+/*
+ * Atomically replace 'np' with 'newp', with a parent of 'pp'.
+ *
+ * Requirements:
+ * *no* node locks may be held.
+ * pp must be held with RC_NODE_CHILDREN_CHANGING
+ * newp and np must be held with RC_NODE_IN_TX
+ * np must be marked RC_NODE_IN_PARENT, newp must not be
+ * np must be marked RC_NODE_OLD
+ *
+ * Afterwards:
+ * pp's RC_NODE_CHILDREN_CHANGING is dropped
+ * newp and np's RC_NODE_IN_TX is dropped
+ * newp->rn_former = np;
+ * newp is RC_NODE_IN_PARENT, np is not.
+ * interested notify subscribers have been notified of newp's new status.
+ */
+static void
+rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
+{
+ cache_bucket_t *bp;
+ /*
+ * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
+ * keeps rc_node_update() from seeing it until we are done.
+ */
+ bp = cache_hold(newp->rn_hash);
+ cache_remove_unlocked(bp, np);
+ cache_insert_unlocked(bp, newp);
+ cache_release(bp);
+
+ /*
+ * replace np with newp in pp's list, and attach it to newp's rn_former
+ * link.
+ */
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
+
+ (void) pthread_mutex_lock(&newp->rn_lock);
+ assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
+ assert(newp->rn_flags & RC_NODE_IN_TX);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ assert(np->rn_flags & RC_NODE_IN_PARENT);
+ assert(np->rn_flags & RC_NODE_OLD);
+ assert(np->rn_flags & RC_NODE_IN_TX);
+
+ newp->rn_parent = pp;
+ newp->rn_flags |= RC_NODE_IN_PARENT;
+
+ /*
+ * Note that we carefully add newp before removing np -- this
+ * keeps iterators on the list from missing us.
+ */
+ (void) uu_list_insert_after(pp->rn_children, np, newp);
+ (void) uu_list_remove(pp->rn_children, np);
+
+ /*
+ * re-set np
+ */
+ newp->rn_former = np;
+ np->rn_parent = NULL;
+ np->rn_flags &= ~RC_NODE_IN_PARENT;
+ np->rn_flags |= RC_NODE_ON_FORMER;
+
+ rc_notify_insert_node(newp);
+
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
+ (void) pthread_mutex_unlock(&newp->rn_lock);
+ rc_node_setup_parent_ref(np, np);
+ rc_node_rele_flag(np, RC_NODE_IN_TX);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+}
+
+/*
+ * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
+ * 'cp' is used (and returned) if the node does not yet exist. If it does
+ * exist, 'cp' is freed, and the existent node is returned instead.
+ */
+rc_node_t *
+rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
+ rc_node_t *pp)
+{
+ rc_node_t *np;
+ cache_bucket_t *bp;
+ uint32_t h = rc_node_hash(nip);
+
+ assert(cp->rn_refs == 0);
+
+ bp = cache_hold(h);
+ if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
+ cache_release(bp);
+
+ /*
+ * make sure it matches our expectations
+ */
+ assert(np->rn_parent == pp);
+ assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
+ assert(strcmp(np->rn_name, name) == 0);
+ assert(np->rn_type == NULL);
+ assert(np->rn_flags & RC_NODE_IN_PARENT);
+
+ rc_node_destroy(cp);
+ return (np);
+ }
+
+ /*
+ * No one is there -- create a new node.
+ */
+ np = cp;
+ rc_node_hold(np);
+ np->rn_id = *nip;
+ np->rn_hash = h;
+ np->rn_name = strdup(name);
+
+ np->rn_flags |= RC_NODE_USING_PARENT;
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
+#if COMPOSITION_DEPTH == 2
+ np->rn_cchain[0] = np;
+ np->rn_cchain[1] = pp;
+#else
+#error This code must be updated.
+#endif
+ }
+
+ cache_insert_unlocked(bp, np);
+ cache_release(bp); /* we are now visible */
+
+ rc_node_link_child(pp, np);
+
+ return (np);
+}
+
+/*
+ * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
+ * 'cp' is used (and returned) if the node does not yet exist. If it does
+ * exist, 'cp' is freed, and the existent node is returned instead.
+ */
+rc_node_t *
+rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
+ uint32_t snap_id, rc_node_t *pp)
+{
+ rc_node_t *np;
+ cache_bucket_t *bp;
+ uint32_t h = rc_node_hash(nip);
+
+ assert(cp->rn_refs == 0);
+
+ bp = cache_hold(h);
+ if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
+ cache_release(bp);
+
+ /*
+ * make sure it matches our expectations
+ */
+ assert(np->rn_parent == pp);
+ assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
+ assert(strcmp(np->rn_name, name) == 0);
+ assert(np->rn_type == NULL);
+ assert(np->rn_flags & RC_NODE_IN_PARENT);
+
+ rc_node_destroy(cp);
+ return (np);
+ }
+
+ /*
+ * No one is there -- create a new node.
+ */
+ np = cp;
+ rc_node_hold(np);
+ np->rn_id = *nip;
+ np->rn_hash = h;
+ np->rn_name = strdup(name);
+ np->rn_snapshot_id = snap_id;
+
+ np->rn_flags |= RC_NODE_USING_PARENT;
+
+ cache_insert_unlocked(bp, np);
+ cache_release(bp); /* we are now visible */
+
+ rc_node_link_child(pp, np);
+
+ return (np);
+}
+
+/*
+ * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
+ * used (and returned) if the node does not yet exist. If it does exist, 'cp'
+ * is freed, and the existent node is returned instead.
+ */
+rc_node_t *
+rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
+ rc_snaplevel_t *lvl, rc_node_t *pp)
+{
+ rc_node_t *np;
+ cache_bucket_t *bp;
+ uint32_t h = rc_node_hash(nip);
+
+ assert(cp->rn_refs == 0);
+
+ bp = cache_hold(h);
+ if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
+ cache_release(bp);
+
+ /*
+ * make sure it matches our expectations
+ */
+ assert(np->rn_parent == pp);
+ assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
+ assert(np->rn_name == NULL);
+ assert(np->rn_type == NULL);
+ assert(np->rn_flags & RC_NODE_IN_PARENT);
+
+ rc_node_destroy(cp);
+ return (np);
+ }
+
+ /*
+ * No one is there -- create a new node.
+ */
+ np = cp;
+ rc_node_hold(np); /* released in snapshot_fill_children() */
+ np->rn_id = *nip;
+ np->rn_hash = h;
+
+ rc_snaplevel_hold(lvl);
+ np->rn_snaplevel = lvl;
+
+ np->rn_flags |= RC_NODE_USING_PARENT;
+
+ cache_insert_unlocked(bp, np);
+ cache_release(bp); /* we are now visible */
+
+ /* Add this snaplevel to the snapshot's composition chain. */
+ assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
+ pp->rn_cchain[lvl->rsl_level_num - 1] = np;
+
+ rc_node_link_child(pp, np);
+
+ return (np);
+}
+
+/*
+ * Returns NULL if strdup() fails.
+ */
+rc_node_t *
+rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
+ const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
+{
+ rc_node_t *np;
+ cache_bucket_t *bp;
+
+ uint32_t h = rc_node_hash(nip);
+ bp = cache_hold(h);
+ if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
+ cache_release(bp);
+
+ /*
+ * make sure it matches our expectations (don't check
+ * the generation number or parent, since someone could
+ * have gotten a transaction through while we weren't
+ * looking)
+ */
+ assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
+ assert(strcmp(np->rn_name, name) == 0);
+ assert(strcmp(np->rn_type, type) == 0);
+ assert(np->rn_pgflags == flags);
+ assert(np->rn_flags & RC_NODE_IN_PARENT);
+
+ rc_node_destroy(cp);
+ return (np);
+ }
+
+ np = cp;
+ rc_node_hold(np); /* released in fill_pg_callback() */
+ np->rn_id = *nip;
+ np->rn_hash = h;
+ np->rn_name = strdup(name);
+ if (np->rn_name == NULL) {
+ rc_node_rele(np);
+ return (NULL);
+ }
+ np->rn_type = strdup(type);
+ if (np->rn_type == NULL) {
+ free((void *)np->rn_name);
+ rc_node_rele(np);
+ return (NULL);
+ }
+ np->rn_pgflags = flags;
+ np->rn_gen_id = gen_id;
+
+ np->rn_flags |= RC_NODE_USING_PARENT;
+
+ cache_insert_unlocked(bp, np);
+ cache_release(bp); /* we are now visible */
+
+ rc_node_link_child(pp, np);
+
+ return (np);
+}
+
+#if COMPOSITION_DEPTH == 2
+/*
+ * Initialize a "composed property group" which represents the composition of
+ * property groups pg1 & pg2. It is ephemeral: once created & returned for an
+ * ITER_READ request, keeping it out of cache_hash and any child lists
+ * prevents it from being looked up. Operations besides iteration are passed
+ * through to pg1.
+ *
+ * pg1 & pg2 should be held before entering this function. They will be
+ * released in rc_node_destroy().
+ */
+static int
+rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
+{
+ if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
+ cpg->rn_name = strdup(pg1->rn_name);
+ if (cpg->rn_name == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ cpg->rn_cchain[0] = pg1;
+ cpg->rn_cchain[1] = pg2;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+#else
+#error This code must be updated.
+#endif
+
+/*
+ * Fails with _NO_RESOURCES.
+ */
+int
+rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
+ const char *name, rep_protocol_value_type_t type,
+ const char *vals, size_t count, size_t size)
+{
+ rc_node_t *np;
+ cache_bucket_t *bp;
+
+ uint32_t h = rc_node_hash(nip);
+ bp = cache_hold(h);
+ if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
+ cache_release(bp);
+ /*
+ * make sure it matches our expectations
+ */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
+ assert(np->rn_parent == pp);
+ assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
+ assert(strcmp(np->rn_name, name) == 0);
+ assert(np->rn_valtype == type);
+ assert(np->rn_values_count == count);
+ assert(np->rn_values_size == size);
+ assert(vals == NULL ||
+ memcmp(np->rn_values, vals, size) == 0);
+ assert(np->rn_flags & RC_NODE_IN_PARENT);
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ }
+ rc_node_rele_locked(np);
+ object_free_values(vals, type, count, size);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ /*
+ * No one is there -- create a new node.
+ */
+ np = rc_node_alloc();
+ if (np == NULL) {
+ cache_release(bp);
+ object_free_values(vals, type, count, size);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+ np->rn_id = *nip;
+ np->rn_hash = h;
+ np->rn_name = strdup(name);
+ if (np->rn_name == NULL) {
+ cache_release(bp);
+ object_free_values(vals, type, count, size);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ np->rn_valtype = type;
+ np->rn_values = vals;
+ np->rn_values_count = count;
+ np->rn_values_size = size;
+
+ np->rn_flags |= RC_NODE_USING_PARENT;
+
+ cache_insert_unlocked(bp, np);
+ cache_release(bp); /* we are now visible */
+
+ rc_node_link_child(pp, np);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+int
+rc_node_init(void)
+{
+ rc_node_t *np;
+ cache_bucket_t *bp;
+
+ rc_children_pool = uu_list_pool_create("rc_children_pool",
+ sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
+ NULL, UU_LIST_POOL_DEBUG);
+
+ rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
+ sizeof (rc_node_pg_notify_t),
+ offsetof(rc_node_pg_notify_t, rnpn_node),
+ NULL, UU_LIST_POOL_DEBUG);
+
+ rc_notify_pool = uu_list_pool_create("rc_notify_pool",
+ sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
+ NULL, UU_LIST_POOL_DEBUG);
+
+ rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
+ sizeof (rc_notify_info_t),
+ offsetof(rc_notify_info_t, rni_list_node),
+ NULL, UU_LIST_POOL_DEBUG);
+
+ if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
+ rc_notify_pool == NULL || rc_notify_info_pool == NULL)
+ uu_die("out of memory");
+
+ rc_notify_list = uu_list_create(rc_notify_pool,
+ &rc_notify_list, 0);
+
+ rc_notify_info_list = uu_list_create(rc_notify_info_pool,
+ &rc_notify_info_list, 0);
+
+ if (rc_notify_list == NULL || rc_notify_info_list == NULL)
+ uu_die("out of memory");
+
+ if ((np = rc_node_alloc()) == NULL)
+ uu_die("out of memory");
+
+ rc_node_hold(np);
+ np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
+ np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
+ np->rn_hash = rc_node_hash(&np->rn_id);
+ np->rn_name = "localhost";
+
+ bp = cache_hold(np->rn_hash);
+ cache_insert_unlocked(bp, np);
+ cache_release(bp);
+
+ rc_scope = np;
+ return (1);
+}
+
+/*
+ * Fails with
+ * _INVALID_TYPE - type is invalid
+ * _TYPE_MISMATCH - np doesn't carry children of type type
+ * _DELETED - np has been deleted
+ * _NO_RESOURCES
+ */
+static int
+rc_node_fill_children(rc_node_t *np, uint32_t type)
+{
+ int rc;
+
+ assert(MUTEX_HELD(&np->rn_lock));
+
+ if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
+ REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
+ return (REP_PROTOCOL_FAIL_DELETED);
+
+ if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
+ rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc = object_fill_children(np);
+ (void) pthread_mutex_lock(&np->rn_lock);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ np->rn_flags |= RC_NODE_HAS_CHILDREN;
+ }
+ rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
+
+ return (rc);
+}
+
+/*
+ * Returns
+ * _INVALID_TYPE - type is invalid
+ * _TYPE_MISMATCH - np doesn't carry children of type type
+ * _DELETED - np has been deleted
+ * _NO_RESOURCES
+ * _SUCCESS - if *cpp is not NULL, it is held
+ */
+static int
+rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
+ rc_node_t **cpp)
+{
+ int ret;
+ rc_node_t *cp;
+
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
+
+ ret = rc_node_fill_children(np, type);
+ if (ret != REP_PROTOCOL_SUCCESS)
+ return (ret);
+
+ for (cp = uu_list_first(np->rn_children);
+ cp != NULL;
+ cp = uu_list_next(np->rn_children, cp)) {
+ if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
+ break;
+ }
+
+ if (cp != NULL)
+ rc_node_hold(cp);
+ *cpp = cp;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+#ifndef NATIVE_BUILD
+static int rc_node_parent(rc_node_t *, rc_node_t **);
+
+/*
+ * If the propname property exists in pg, and it is of type string, add its
+ * values as authorizations to pcp. pg must not be locked on entry, and it is
+ * returned unlocked. Returns
+ * _DELETED - pg was deleted
+ * _NO_RESOURCES
+ * _NOT_FOUND - pg has no property named propname
+ * _SUCCESS
+ */
+static int
+perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
+{
+ rc_node_t *prop;
+ int result;
+
+ uint_t count;
+ const char *cp;
+
+ assert(!MUTEX_HELD(&pg->rn_lock));
+ assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
+ assert(pg->rn_id.rl_ids[ID_SNAPSHOT] == 0);
+
+ (void) pthread_mutex_lock(&pg->rn_lock);
+ result = rc_node_find_named_child(pg, propname,
+ REP_PROTOCOL_ENTITY_PROPERTY, &prop);
+ (void) pthread_mutex_unlock(&pg->rn_lock);
+ if (result != REP_PROTOCOL_SUCCESS) {
+ switch (result) {
+ case REP_PROTOCOL_FAIL_DELETED:
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ return (result);
+
+ case REP_PROTOCOL_FAIL_INVALID_TYPE:
+ case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
+ default:
+ bad_error("rc_node_find_named_child", result);
+ }
+ }
+
+ if (prop == NULL)
+ return (REP_PROTOCOL_FAIL_NOT_FOUND);
+
+ /* rn_valtype is immutable, so no locking. */
+ if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
+ rc_node_rele(prop);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ (void) pthread_mutex_lock(&prop->rn_lock);
+ for (count = prop->rn_values_count, cp = prop->rn_values;
+ count > 0;
+ --count) {
+ result = perm_add_enabling(pcp, cp);
+ if (result != REP_PROTOCOL_SUCCESS)
+ break;
+
+ cp = strchr(cp, '\0') + 1;
+ }
+
+ rc_node_rele_locked(prop);
+
+ return (result);
+}
+
+/*
+ * Assuming that ent is a service or instance node, if the pgname property
+ * group has type pgtype, and it has a propname property with string type, add
+ * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
+ * Returns
+ * _SUCCESS
+ * _DELETED - ent was deleted
+ * _NO_RESOURCES - no resources
+ * _NOT_FOUND - ent does not have pgname pg or propname property
+ */
+static int
+perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
+ const char *pgtype, const char *propname)
+{
+ int r;
+ rc_node_t *pg;
+
+ assert(!MUTEX_HELD(&ent->rn_lock));
+
+ (void) pthread_mutex_lock(&ent->rn_lock);
+ r = rc_node_find_named_child(ent, pgname,
+ REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
+ (void) pthread_mutex_unlock(&ent->rn_lock);
+
+ switch (r) {
+ case REP_PROTOCOL_SUCCESS:
+ break;
+
+ case REP_PROTOCOL_FAIL_DELETED:
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ return (r);
+
+ default:
+ bad_error("rc_node_find_named_child", r);
+ }
+
+ if (pg == NULL)
+ return (REP_PROTOCOL_FAIL_NOT_FOUND);
+
+ if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
+ r = perm_add_pg_prop_values(pcp, pg, propname);
+ switch (r) {
+ case REP_PROTOCOL_FAIL_DELETED:
+ r = REP_PROTOCOL_FAIL_NOT_FOUND;
+ break;
+
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ case REP_PROTOCOL_SUCCESS:
+ case REP_PROTOCOL_FAIL_NOT_FOUND:
+ break;
+
+ default:
+ bad_error("perm_add_pg_prop_values", r);
+ }
+ }
+
+ rc_node_rele(pg);
+
+ return (r);
+}
+
+/*
+ * If pg has a property named propname, and it string typed, add its values as
+ * authorizations to pcp. If pg has no such property, and its parent is an
+ * instance, walk up to the service and try doing the same with the property
+ * of the same name from the property group of the same name. Returns
+ * _SUCCESS
+ * _NO_RESOURCES
+ * _DELETED - pg (or an ancestor) was deleted
+ */
+static int
+perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
+{
+ int r;
+
+ r = perm_add_pg_prop_values(pcp, pg, propname);
+
+ if (r == REP_PROTOCOL_FAIL_NOT_FOUND) {
+ char pgname[REP_PROTOCOL_NAME_LEN + 1];
+ rc_node_t *inst, *svc;
+ size_t sz;
+
+ assert(!MUTEX_HELD(&pg->rn_lock));
+
+ if (pg->rn_id.rl_ids[ID_INSTANCE] == 0) {
+ /* not an instance pg */
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
+ assert(sz < sizeof (pgname));
+
+ /* get pg's parent */
+ r = rc_node_parent(pg, &inst);
+ if (r != REP_PROTOCOL_SUCCESS) {
+ assert(r == REP_PROTOCOL_FAIL_DELETED);
+ return (r);
+ }
+
+ assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
+
+ /* get instance's parent */
+ r = rc_node_parent(inst, &svc);
+ rc_node_rele(inst);
+ if (r != REP_PROTOCOL_SUCCESS) {
+ assert(r == REP_PROTOCOL_FAIL_DELETED);
+ return (r);
+ }
+
+ assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
+
+ r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
+
+ rc_node_rele(svc);
+
+ if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
+ r = REP_PROTOCOL_SUCCESS;
+ }
+
+ return (r);
+}
+
+/*
+ * Call perm_add_enabling_values() for the "action_authorization" property of
+ * the "general" property group of inst. Returns
+ * _DELETED - inst (or an ancestor) was deleted
+ * _NO_RESOURCES
+ * _SUCCESS
+ */
+static int
+perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
+{
+ int r;
+ rc_node_t *svc;
+
+ assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
+
+ r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
+ AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
+
+ if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
+ return (r);
+
+ r = rc_node_parent(inst, &svc);
+ if (r != REP_PROTOCOL_SUCCESS) {
+ assert(r == REP_PROTOCOL_FAIL_DELETED);
+ return (r);
+ }
+
+ r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
+ AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
+
+ return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
+}
+#endif /* NATIVE_BUILD */
+
+void
+rc_node_ptr_init(rc_node_ptr_t *out)
+{
+ out->rnp_node = NULL;
+ out->rnp_authorized = 0;
+ out->rnp_deleted = 0;
+}
+
+static void
+rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
+{
+ rc_node_t *cur = out->rnp_node;
+ if (val != NULL)
+ rc_node_hold(val);
+ out->rnp_node = val;
+ if (cur != NULL)
+ rc_node_rele(cur);
+ out->rnp_authorized = 0;
+ out->rnp_deleted = 0;
+}
+
+void
+rc_node_clear(rc_node_ptr_t *out, int deleted)
+{
+ rc_node_assign(out, NULL);
+ out->rnp_deleted = deleted;
+}
+
+void
+rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
+{
+ rc_node_assign(out, val->rnp_node);
+}
+
+/*
+ * rc_node_check()/RC_NODE_CHECK()
+ * generic "entry" checks, run before the use of an rc_node pointer.
+ *
+ * Fails with
+ * _NOT_SET
+ * _DELETED
+ */
+static int
+rc_node_check_and_lock(rc_node_t *np)
+{
+ int result = REP_PROTOCOL_SUCCESS;
+ if (np == NULL)
+ return (REP_PROTOCOL_FAIL_NOT_SET);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
+ result = REP_PROTOCOL_FAIL_DELETED;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ }
+
+ return (result);
+}
+
+/*
+ * Fails with
+ * _NOT_SET - ptr is reset
+ * _DELETED - node has been deleted
+ */
+static rc_node_t *
+rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
+{
+ rc_node_t *np = npp->rnp_node;
+ if (np == NULL) {
+ if (npp->rnp_deleted)
+ *res = REP_PROTOCOL_FAIL_DELETED;
+ else
+ *res = REP_PROTOCOL_FAIL_NOT_SET;
+ return (NULL);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_clear(npp, 1);
+ *res = REP_PROTOCOL_FAIL_DELETED;
+ return (NULL);
+ }
+ return (np);
+}
+
+#define RC_NODE_CHECK_AND_LOCK(n) { \
+ int rc__res; \
+ if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
+ return (rc__res); \
+}
+
+#define RC_NODE_CHECK(n) { \
+ RC_NODE_CHECK_AND_LOCK(n); \
+ (void) pthread_mutex_unlock(&(n)->rn_lock); \
+}
+
+#define RC_NODE_CHECK_AND_HOLD(n) { \
+ RC_NODE_CHECK_AND_LOCK(n); \
+ rc_node_hold_locked(n); \
+ (void) pthread_mutex_unlock(&(n)->rn_lock); \
+}
+
+#define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
+ int rc__res; \
+ if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
+ return (rc__res); \
+}
+
+#define RC_NODE_PTR_GET_CHECK(np, npp) { \
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
+ (void) pthread_mutex_unlock(&(np)->rn_lock); \
+}
+
+#define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
+ rc_node_hold_locked(np); \
+ (void) pthread_mutex_unlock(&(np)->rn_lock); \
+}
+
+#define HOLD_FLAG_OR_RETURN(np, flag) { \
+ assert(MUTEX_HELD(&(np)->rn_lock)); \
+ assert(!((np)->rn_flags & RC_NODE_DEAD)); \
+ if (!rc_node_hold_flag((np), flag)) { \
+ (void) pthread_mutex_unlock(&(np)->rn_lock); \
+ return (REP_PROTOCOL_FAIL_DELETED); \
+ } \
+}
+
+#define HOLD_PTR_FLAG_OR_RETURN(np, npp, flag) { \
+ assert(MUTEX_HELD(&(np)->rn_lock)); \
+ assert(!((np)->rn_flags & RC_NODE_DEAD)); \
+ if (!rc_node_hold_flag((np), flag)) { \
+ (void) pthread_mutex_unlock(&(np)->rn_lock); \
+ assert((np) == (npp)->rnp_node); \
+ rc_node_clear(npp, 1); \
+ return (REP_PROTOCOL_FAIL_DELETED); \
+ } \
+}
+
+int
+rc_local_scope(uint32_t type, rc_node_ptr_t *out)
+{
+ if (type != REP_PROTOCOL_ENTITY_SCOPE) {
+ rc_node_clear(out, 0);
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ /*
+ * the main scope never gets destroyed
+ */
+ rc_node_assign(out, rc_scope);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _NOT_SET - npp is not set
+ * _DELETED - the node npp pointed at has been deleted
+ * _TYPE_MISMATCH - type is not _SCOPE
+ * _NOT_FOUND - scope has no parent
+ */
+static int
+rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
+{
+ rc_node_t *np;
+
+ rc_node_clear(out, 0);
+
+ RC_NODE_PTR_GET_CHECK(np, npp);
+
+ if (type != REP_PROTOCOL_ENTITY_SCOPE)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ return (REP_PROTOCOL_FAIL_NOT_FOUND);
+}
+
+/*
+ * Fails with
+ * _NOT_SET
+ * _DELETED
+ * _NOT_APPLICABLE
+ * _NOT_FOUND
+ * _BAD_REQUEST
+ * _TRUNCATED
+ */
+int
+rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
+ size_t *sz_out)
+{
+ size_t actual;
+ rc_node_t *np;
+
+ assert(sz == *sz_out);
+
+ RC_NODE_PTR_GET_CHECK(np, npp);
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ np = np->rn_cchain[0];
+ RC_NODE_CHECK(np);
+ }
+
+ switch (answertype) {
+ case RP_ENTITY_NAME_NAME:
+ if (np->rn_name == NULL)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ actual = strlcpy(buf, np->rn_name, sz);
+ break;
+ case RP_ENTITY_NAME_PGTYPE:
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ actual = strlcpy(buf, np->rn_type, sz);
+ break;
+ case RP_ENTITY_NAME_PGFLAGS:
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ actual = snprintf(buf, sz, "%d", np->rn_pgflags);
+ break;
+ case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
+ break;
+ case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
+ break;
+ case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ if (np->rn_snaplevel->rsl_instance == NULL)
+ return (REP_PROTOCOL_FAIL_NOT_FOUND);
+ actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
+ break;
+ default:
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+ if (actual >= sz)
+ return (REP_PROTOCOL_FAIL_TRUNCATED);
+
+ *sz_out = actual;
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+int
+rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
+{
+ rc_node_t *np;
+
+ RC_NODE_PTR_GET_CHECK(np, npp);
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+
+ *out = np->rn_valtype;
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
+ * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
+ */
+static int
+rc_node_parent(rc_node_t *np, rc_node_t **out)
+{
+ rc_node_t *pnp;
+ rc_node_t *np_orig;
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ RC_NODE_CHECK_AND_LOCK(np);
+ } else {
+ np = np->rn_cchain[0];
+ RC_NODE_CHECK_AND_LOCK(np);
+ }
+
+ np_orig = np;
+ rc_node_hold_locked(np); /* simplifies the remainder */
+
+ for (;;) {
+ if (!rc_node_wait_flag(np,
+ RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
+ rc_node_rele_locked(np);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (!(np->rn_flags & RC_NODE_OLD))
+ break;
+
+ rc_node_rele_locked(np);
+ np = cache_lookup(&np_orig->rn_id);
+ assert(np != np_orig);
+
+ if (np == NULL)
+ goto deleted;
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+
+ /* guaranteed to succeed without dropping the lock */
+ if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ *out = NULL;
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ assert(np->rn_parent != NULL);
+ pnp = np->rn_parent;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ (void) pthread_mutex_lock(&pnp->rn_lock);
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc_node_hold_locked(pnp);
+
+ (void) pthread_mutex_unlock(&pnp->rn_lock);
+
+ rc_node_rele(np);
+ *out = pnp;
+ return (REP_PROTOCOL_SUCCESS);
+
+deleted:
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_DELETED);
+}
+
+/*
+ * Fails with
+ * _NOT_SET
+ * _DELETED
+ */
+static int
+rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
+{
+ rc_node_t *np;
+
+ RC_NODE_PTR_GET_CHECK(np, npp);
+
+ return (rc_node_parent(np, out));
+}
+
+/*
+ * Fails with
+ * _NOT_SET - npp is not set
+ * _DELETED - the node npp pointed at has been deleted
+ * _TYPE_MISMATCH - npp's node's parent is not of type type
+ *
+ * If npp points to a scope, can also fail with
+ * _NOT_FOUND - scope has no parent
+ */
+int
+rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
+{
+ rc_node_t *pnp;
+ int rc;
+
+ if (npp->rnp_node != NULL &&
+ npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
+ return (rc_scope_parent_scope(npp, type, out));
+
+ if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
+ rc_node_clear(out, 0);
+ return (rc);
+ }
+
+ if (type != pnp->rn_id.rl_type) {
+ rc_node_rele(pnp);
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ rc_node_assign(out, pnp);
+ rc_node_rele(pnp);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+int
+rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
+{
+ rc_node_t *pnp;
+ int rc;
+
+ if (npp->rnp_node != NULL &&
+ npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
+ *type_out = REP_PROTOCOL_ENTITY_SCOPE;
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ *type_out = pnp->rn_id.rl_type;
+
+ rc_node_rele(pnp);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Fails with
+ * _INVALID_TYPE - type is invalid
+ * _TYPE_MISMATCH - np doesn't carry children of type type
+ * _DELETED - np has been deleted
+ * _NOT_FOUND - no child with that name/type combo found
+ * _NO_RESOURCES
+ * _BACKEND_ACCESS
+ */
+int
+rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
+ rc_node_ptr_t *outp)
+{
+ rc_node_t *np, *cp;
+ rc_node_t *child = NULL;
+ int ret, idx;
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+ if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ ret = rc_node_find_named_child(np, name, type, &child);
+ } else {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ ret = REP_PROTOCOL_SUCCESS;
+ for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
+ cp = np->rn_cchain[idx];
+ if (cp == NULL)
+ break;
+ RC_NODE_CHECK_AND_LOCK(cp);
+ ret = rc_node_find_named_child(cp, name, type,
+ &child);
+ (void) pthread_mutex_unlock(&cp->rn_lock);
+ /*
+ * loop only if we succeeded, but no child of
+ * the correct name was found.
+ */
+ if (ret != REP_PROTOCOL_SUCCESS ||
+ child != NULL)
+ break;
+ }
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+ }
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ if (ret == REP_PROTOCOL_SUCCESS) {
+ rc_node_assign(outp, child);
+ if (child != NULL)
+ rc_node_rele(child);
+ else
+ ret = REP_PROTOCOL_FAIL_NOT_FOUND;
+ } else {
+ rc_node_assign(outp, NULL);
+ }
+ return (ret);
+}
+
+int
+rc_node_update(rc_node_ptr_t *npp)
+{
+ cache_bucket_t *bp;
+ rc_node_t *np = npp->rnp_node;
+ rc_node_t *nnp;
+ rc_node_t *cpg = NULL;
+
+ if (np != NULL &&
+ np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ /*
+ * If we're updating a composed property group, actually
+ * update the top-level property group & return the
+ * appropriate value. But leave *nnp pointing at us.
+ */
+ cpg = np;
+ np = np->rn_cchain[0];
+ }
+
+ RC_NODE_CHECK(np);
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
+ np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ for (;;) {
+ bp = cache_hold(np->rn_hash);
+ nnp = cache_lookup_unlocked(bp, &np->rn_id);
+ if (nnp == NULL) {
+ cache_release(bp);
+ rc_node_clear(npp, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+ /*
+ * grab the lock before dropping the cache bucket, so
+ * that no one else can sneak in
+ */
+ (void) pthread_mutex_lock(&nnp->rn_lock);
+ cache_release(bp);
+
+ if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
+ !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
+ break;
+
+ rc_node_rele_locked(nnp);
+ }
+
+ /*
+ * If it is dead, we want to update it so that it will continue to
+ * report being dead.
+ */
+ if (nnp->rn_flags & RC_NODE_DEAD) {
+ (void) pthread_mutex_unlock(&nnp->rn_lock);
+ if (nnp != np && cpg == NULL)
+ rc_node_assign(npp, nnp); /* updated */
+ rc_node_rele(nnp);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ assert(!(nnp->rn_flags & RC_NODE_OLD));
+ (void) pthread_mutex_unlock(&nnp->rn_lock);
+
+ if (nnp != np && cpg == NULL)
+ rc_node_assign(npp, nnp); /* updated */
+
+ rc_node_rele(nnp);
+
+ return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
+}
+
+/*
+ * does a generic modification check, for creation, deletion, and snapshot
+ * management only. Property group transactions have different checks.
+ */
+int
+rc_node_modify_permission_check(void)
+{
+ int rc = REP_PROTOCOL_SUCCESS;
+ permcheck_t *pcp;
+ int granted;
+
+ if (!client_is_privileged()) {
+#ifdef NATIVE_BUILD
+ rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
+#else
+ pcp = pc_create();
+ if (pcp != NULL) {
+ rc = perm_add_enabling(pcp, AUTH_MODIFY);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ granted = perm_granted(pcp);
+
+ if (granted < 0)
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ pc_free(pcp);
+ } else {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS && !granted)
+ rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
+#endif /* NATIVE_BUILD */
+ }
+ return (rc);
+}
+
+/*
+ * Fails with
+ * _DELETED - node has been deleted
+ * _NOT_SET - npp is reset
+ * _NOT_APPLICABLE - type is _PROPERTYGRP
+ * _INVALID_TYPE - node is corrupt or type is invalid
+ * _TYPE_MISMATCH - node cannot have children of type type
+ * _BAD_REQUEST - name is invalid
+ * cannot create children for this type of node
+ * _NO_RESOURCES - out of memory, or could not allocate new id
+ * _PERMISSION_DENIED
+ * _BACKEND_ACCESS
+ * _BACKEND_READONLY
+ * _EXISTS - child already exists
+ */
+int
+rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
+ rc_node_ptr_t *cpp)
+{
+ rc_node_t *np;
+ rc_node_t *cp = NULL;
+ int rc;
+
+ rc_node_clear(cpp, 0);
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+
+ /*
+ * there is a separate interface for creating property groups
+ */
+ if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ }
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ np = np->rn_cchain[0];
+ RC_NODE_CHECK_AND_LOCK(np);
+ }
+
+ if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
+ REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+ if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ if ((rc = rc_node_modify_permission_check()) != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ HOLD_PTR_FLAG_OR_RETURN(np, npp, RC_NODE_CREATING_CHILD);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc = object_create(np, type, name, &cp);
+ assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ rc_node_assign(cpp, cp);
+ rc_node_rele(cp);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ return (rc);
+}
+
+int
+rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
+ const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
+{
+ rc_node_t *np;
+ rc_node_t *cp;
+ int rc;
+ permcheck_t *pcp;
+ int granted;
+
+ rc_node_clear(cpp, 0);
+
+ /* verify flags is valid */
+ if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
+
+ if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ }
+
+ if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
+ REP_PROTOCOL_SUCCESS) {
+ rc_node_rele(np);
+ return (rc);
+ }
+ if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
+ (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
+ rc_node_rele(np);
+ return (rc);
+ }
+
+ if (!client_is_privileged()) {
+#ifdef NATIVE_BUILD
+ rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
+#else
+ /* Must have .smf.modify or smf.modify.<type> authorization */
+ pcp = pc_create();
+ if (pcp != NULL) {
+ rc = perm_add_enabling(pcp, AUTH_MODIFY);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ const char * const auth =
+ perm_auth_for_pgtype(pgtype);
+
+ if (auth != NULL)
+ rc = perm_add_enabling(pcp, auth);
+ }
+
+ /*
+ * .manage or $action_authorization can be used to
+ * create the actions pg and the general_ovr pg.
+ */
+ if (rc == REP_PROTOCOL_SUCCESS &&
+ (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
+ np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
+ ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
+ strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
+ (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
+ strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
+ rc = perm_add_enabling(pcp, AUTH_MANAGE);
+
+ if (rc == REP_PROTOCOL_SUCCESS)
+ rc = perm_add_inst_action_auth(pcp, np);
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ granted = perm_granted(pcp);
+
+ if (granted < 0)
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ pc_free(pcp);
+ } else {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS && !granted)
+ rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
+#endif /* NATIVE_BUILD */
+
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ rc_node_rele(np);
+ return (rc);
+ }
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ HOLD_PTR_FLAG_OR_RETURN(np, npp, RC_NODE_CREATING_CHILD);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc = object_create_pg(np, type, name, pgtype, flags, &cp);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ rc_node_assign(cpp, cp);
+ rc_node_rele(cp);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ return (rc);
+}
+
+static void
+rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
+{
+ assert(MUTEX_HELD(&rc_pg_notify_lock));
+
+ if (pnp->rnpn_pg != NULL) {
+ uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
+ (void) close(pnp->rnpn_fd);
+
+ pnp->rnpn_pg = NULL;
+ pnp->rnpn_fd = -1;
+ } else {
+ assert(pnp->rnpn_fd == -1);
+ }
+}
+
+static void
+rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
+{
+ rc_node_t *svc = NULL;
+ rc_node_t *inst = NULL;
+ rc_node_t *pg = NULL;
+ rc_node_t *np = np_arg;
+ rc_node_t *nnp;
+
+ while (svc == NULL) {
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ goto cleanup;
+ }
+ nnp = np->rn_parent;
+ rc_node_hold_locked(np); /* hold it in place */
+
+ switch (np->rn_id.rl_type) {
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ assert(pg == NULL);
+ pg = np;
+ break;
+ case REP_PROTOCOL_ENTITY_INSTANCE:
+ assert(inst == NULL);
+ inst = np;
+ break;
+ case REP_PROTOCOL_ENTITY_SERVICE:
+ assert(svc == NULL);
+ svc = np;
+ break;
+ default:
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ rc_node_rele_locked(np);
+ goto cleanup;
+ }
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ np = nnp;
+ if (np == NULL)
+ goto cleanup;
+ }
+
+ rc_notify_deletion(ndp,
+ svc->rn_name,
+ inst != NULL ? inst->rn_name : NULL,
+ pg != NULL ? pg->rn_name : NULL);
+
+ ndp = NULL;
+
+cleanup:
+ if (ndp != NULL)
+ uu_free(ndp);
+
+ for (;;) {
+ if (svc != NULL) {
+ np = svc;
+ svc = NULL;
+ } else if (inst != NULL) {
+ np = inst;
+ inst = NULL;
+ } else if (pg != NULL) {
+ np = pg;
+ pg = NULL;
+ } else
+ break;
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ rc_node_rele_locked(np);
+ }
+}
+
+/*
+ * N.B.: this function drops np->rn_lock on the way out.
+ */
+static void
+rc_node_delete_hold(rc_node_t *np, int andformer)
+{
+ rc_node_t *cp;
+
+again:
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
+
+ for (cp = uu_list_first(np->rn_children); cp != NULL;
+ cp = uu_list_next(np->rn_children, cp)) {
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
+ /*
+ * already marked as dead -- can't happen, since that
+ * would require setting RC_NODE_CHILDREN_CHANGING
+ * in np, and we're holding that...
+ */
+ abort();
+ }
+ rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+ if (andformer && (cp = np->rn_former) != NULL) {
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
+ abort(); /* can't happen, see above */
+ np = cp;
+ goto again; /* tail-recurse down rn_former */
+ }
+ (void) pthread_mutex_unlock(&np->rn_lock);
+}
+
+/*
+ * N.B.: this function drops np->rn_lock on the way out.
+ */
+static void
+rc_node_delete_rele(rc_node_t *np, int andformer)
+{
+ rc_node_t *cp;
+
+again:
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
+
+ for (cp = uu_list_first(np->rn_children); cp != NULL;
+ cp = uu_list_next(np->rn_children, cp)) {
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+ if (andformer && (cp = np->rn_former) != NULL) {
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ np = cp;
+ goto again; /* tail-recurse down rn_former */
+ }
+ rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+}
+
+static void
+rc_node_finish_delete(rc_node_t *cp)
+{
+ cache_bucket_t *bp;
+ rc_node_pg_notify_t *pnp;
+
+ assert(MUTEX_HELD(&cp->rn_lock));
+
+ if (!(cp->rn_flags & RC_NODE_OLD)) {
+ assert(cp->rn_flags & RC_NODE_IN_PARENT);
+ if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
+ abort(); /* can't happen, see above */
+ }
+ cp->rn_flags &= ~RC_NODE_IN_PARENT;
+ cp->rn_parent = NULL;
+ }
+
+ cp->rn_flags |= RC_NODE_DEAD;
+
+ /*
+ * If this node is not out-dated, we need to remove it from
+ * the notify list and cache hash table.
+ */
+ if (!(cp->rn_flags & RC_NODE_OLD)) {
+ assert(cp->rn_refs > 0); /* can't go away yet */
+ (void) pthread_mutex_unlock(&cp->rn_lock);
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
+ rc_pg_notify_fire(pnp);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+ rc_notify_remove_node(cp);
+
+ bp = cache_hold(cp->rn_hash);
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ cache_remove_unlocked(bp, cp);
+ cache_release(bp);
+ }
+}
+
+/*
+ * N.B.: this function drops np->rn_lock and a reference on the way out.
+ */
+static void
+rc_node_delete_children(rc_node_t *np, int andformer)
+{
+ rc_node_t *cp;
+
+again:
+ assert(np->rn_refs > 0);
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert(np->rn_flags & RC_NODE_DEAD);
+
+ while ((cp = uu_list_first(np->rn_children)) != NULL) {
+ uu_list_remove(np->rn_children, cp);
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_hold_locked(cp); /* hold while we recurse */
+ rc_node_finish_delete(cp);
+ rc_node_delete_children(cp, andformer); /* drops lock + ref */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+
+ /*
+ * when we drop cp's lock, all the children will be gone, so we
+ * can release DYING_FLAGS.
+ */
+ rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
+ if (andformer && (cp = np->rn_former) != NULL) {
+ np->rn_former = NULL; /* unlink */
+ (void) pthread_mutex_lock(&cp->rn_lock);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ np->rn_flags &= ~RC_NODE_ON_FORMER;
+
+ rc_node_hold_locked(cp); /* hold while we loop */
+
+ rc_node_finish_delete(cp);
+
+ rc_node_rele(np); /* drop the old reference */
+
+ np = cp;
+ goto again; /* tail-recurse down rn_former */
+ }
+ rc_node_rele_locked(np);
+}
+
+static void
+rc_node_unrefed(rc_node_t *np)
+{
+ int unrefed;
+ rc_node_t *pp, *cur;
+
+ assert(MUTEX_HELD(&np->rn_lock));
+ assert(np->rn_refs == 0);
+ assert(np->rn_other_refs == 0);
+ assert(np->rn_other_refs_held == 0);
+
+ if (np->rn_flags & RC_NODE_DEAD) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_destroy(np);
+ return;
+ }
+
+ assert(np->rn_flags & RC_NODE_OLD);
+ if (np->rn_flags & RC_NODE_UNREFED) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return;
+ }
+ np->rn_flags |= RC_NODE_UNREFED;
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ /*
+ * find the current in-hash object, and grab it's RC_NODE_IN_TX
+ * flag. That protects the entire rn_former chain.
+ */
+ for (;;) {
+ pp = cache_lookup(&np->rn_id);
+ if (pp == NULL) {
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (np->rn_flags & RC_NODE_DEAD)
+ goto died;
+ /*
+ * We are trying to unreference this node, but the
+ * owner of the former list does not exist. It must
+ * be the case that another thread is deleting this
+ * entire sub-branch, but has not yet reached us.
+ * We will in short order be deleted.
+ */
+ np->rn_flags &= ~RC_NODE_UNREFED;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return;
+ }
+ if (pp == np) {
+ /*
+ * no longer unreferenced
+ */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ np->rn_flags &= ~RC_NODE_UNREFED;
+ rc_node_rele_locked(np);
+ return;
+ }
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ if ((pp->rn_flags & RC_NODE_OLD) ||
+ !rc_node_hold_flag(pp, RC_NODE_IN_TX)) {
+ rc_node_rele_locked(pp);
+ continue;
+ }
+ if (!(pp->rn_flags & RC_NODE_OLD)) {
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ break;
+ }
+ rc_node_rele_flag(pp, RC_NODE_IN_TX);
+ rc_node_rele_locked(pp);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
+ np->rn_refs != 0 || np->rn_other_refs != 0 ||
+ np->rn_other_refs_held != 0) {
+ np->rn_flags &= ~RC_NODE_UNREFED;
+ (void) pthread_mutex_lock(&pp->rn_lock);
+
+ rc_node_rele_flag(pp, RC_NODE_IN_TX);
+ rc_node_rele_locked(pp);
+ return;
+ }
+
+ if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc_node_rele_flag(pp, RC_NODE_IN_TX);
+ rc_node_rele_locked(pp);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ goto died;
+ }
+
+ rc_node_delete_hold(np, 0);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!(np->rn_flags & RC_NODE_OLD) ||
+ np->rn_refs != 0 || np->rn_other_refs != 0 ||
+ np->rn_other_refs_held != 0) {
+ np->rn_flags &= ~RC_NODE_UNREFED;
+ rc_node_delete_rele(np, 0);
+
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_IN_TX);
+ rc_node_rele_locked(pp);
+ return;
+ }
+
+ np->rn_flags |= RC_NODE_DEAD;
+ rc_node_hold_locked(np);
+ rc_node_delete_children(np, 0);
+
+ /*
+ * It's gone -- remove it from the former chain and destroy it.
+ */
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ for (cur = pp; cur != NULL && cur->rn_former != np;
+ cur = cur->rn_former)
+ ;
+ assert(cur != NULL && cur != np);
+
+ cur->rn_former = np->rn_former;
+ np->rn_former = NULL;
+
+ rc_node_rele_flag(pp, RC_NODE_IN_TX);
+ rc_node_rele_locked(pp);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ assert(np->rn_flags & RC_NODE_ON_FORMER);
+ np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_destroy(np);
+ return;
+
+died:
+ np->rn_flags &= ~RC_NODE_UNREFED;
+ unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
+ np->rn_other_refs_held == 0);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ if (unrefed)
+ rc_node_destroy(np);
+}
+
+/*
+ * Fails with
+ * _NOT_SET
+ * _DELETED
+ * _BAD_REQUEST
+ * _PERMISSION_DENIED
+ * _NO_RESOURCES
+ * and whatever object_delete() fails with.
+ */
+int
+rc_node_delete(rc_node_ptr_t *npp)
+{
+ rc_node_t *np, *np_orig;
+ rc_node_t *pp = NULL;
+ int rc;
+ rc_node_pg_notify_t *pnp;
+ cache_bucket_t *bp;
+ rc_notify_delete_t *ndp;
+ permcheck_t *pcp;
+ int granted;
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+
+ switch (np->rn_id.rl_type) {
+ case REP_PROTOCOL_ENTITY_SERVICE:
+ case REP_PROTOCOL_ENTITY_INSTANCE:
+ case REP_PROTOCOL_ENTITY_SNAPSHOT:
+ break; /* deletable */
+
+ case REP_PROTOCOL_ENTITY_SCOPE:
+ case REP_PROTOCOL_ENTITY_SNAPLEVEL:
+ /* Scopes and snaplevels are indelible. */
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ np = np->rn_cchain[0];
+ RC_NODE_CHECK_AND_LOCK(np);
+ break;
+
+ case REP_PROTOCOL_ENTITY_PROPERTYGRP:
+ if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0)
+ break;
+
+ /* Snapshot property groups are indelible. */
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
+
+ case REP_PROTOCOL_ENTITY_PROPERTY:
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ default:
+ assert(0);
+ abort();
+ break;
+ }
+
+ np_orig = np;
+ rc_node_hold_locked(np); /* simplifies rest of the code */
+
+again:
+ /*
+ * The following loop is to deal with the fact that snapshots and
+ * property groups are moving targets -- changes to them result
+ * in a new "child" node. Since we can only delete from the top node,
+ * we have to loop until we have a non-RC_NODE_OLD version.
+ */
+ for (;;) {
+ if (!rc_node_wait_flag(np,
+ RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
+ rc_node_rele_locked(np);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (np->rn_flags & RC_NODE_OLD) {
+ rc_node_rele_locked(np);
+ np = cache_lookup(&np_orig->rn_id);
+ assert(np != np_orig);
+
+ if (np == NULL) {
+ rc = REP_PROTOCOL_FAIL_DELETED;
+ goto fail;
+ }
+ (void) pthread_mutex_lock(&np->rn_lock);
+ continue;
+ }
+
+ if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
+ rc_node_rele_locked(np);
+ rc_node_clear(npp, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ /*
+ * Mark our parent as children changing. this call drops our
+ * lock and the RC_NODE_USING_PARENT flag, and returns with
+ * pp's lock held
+ */
+ pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
+ if (pp == NULL) {
+ /* our parent is gone, we're going next... */
+ rc_node_rele(np);
+
+ rc_node_clear(npp, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ rc_node_hold_locked(pp); /* hold for later */
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!(np->rn_flags & RC_NODE_OLD))
+ break; /* not old -- we're done */
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ rc_node_rele_locked(pp);
+ (void) pthread_mutex_lock(&np->rn_lock);
+ continue; /* loop around and try again */
+ }
+ /*
+ * Everyone out of the pool -- we grab everything but
+ * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
+ * any changes from occurring while we are attempting to
+ * delete the node.
+ */
+ if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc = REP_PROTOCOL_FAIL_DELETED;
+ goto fail;
+ }
+
+ assert(!(np->rn_flags & RC_NODE_OLD));
+
+ if (!client_is_privileged()) {
+ /* permission check */
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+#ifdef NATIVE_BUILD
+ rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
+#else
+ pcp = pc_create();
+ if (pcp != NULL) {
+ rc = perm_add_enabling(pcp, AUTH_MODIFY);
+
+ /* add .smf.modify.<type> for pgs. */
+ if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
+ REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ const char * const auth =
+ perm_auth_for_pgtype(np->rn_type);
+
+ if (auth != NULL)
+ rc = perm_add_enabling(pcp, auth);
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ granted = perm_granted(pcp);
+
+ if (granted < 0)
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ pc_free(pcp);
+ } else {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS && !granted)
+ rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
+#endif /* NATIVE_BUILD */
+
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ goto fail;
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+
+ ndp = uu_zalloc(sizeof (*ndp));
+ if (ndp == NULL) {
+ rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+
+ rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
+
+ rc = object_delete(np);
+
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_delete_rele(np, 1); /* drops lock */
+ uu_free(ndp);
+ goto fail;
+ }
+
+ /*
+ * Now, delicately unlink and delete the object.
+ *
+ * Create the delete notification, atomically remove
+ * from the hash table and set the NODE_DEAD flag, and
+ * remove from the parent's children list.
+ */
+ rc_notify_node_delete(ndp, np); /* frees or uses ndp */
+
+ bp = cache_hold(np->rn_hash);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ cache_remove_unlocked(bp, np);
+ cache_release(bp);
+
+ np->rn_flags |= RC_NODE_DEAD;
+ if (pp != NULL) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ (void) pthread_mutex_lock(&np->rn_lock);
+ uu_list_remove(pp->rn_children, np);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ np->rn_flags &= ~RC_NODE_IN_PARENT;
+ }
+ /*
+ * finally, propagate death to our children, handle notifications,
+ * and release our hold.
+ */
+ rc_node_hold_locked(np); /* hold for delete */
+ rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
+
+ rc_node_clear(npp, 1);
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
+ rc_pg_notify_fire(pnp);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+ rc_notify_remove_node(np);
+
+ rc_node_rele(np);
+
+ return (rc);
+
+fail:
+ rc_node_rele(np);
+ if (rc == REP_PROTOCOL_FAIL_DELETED)
+ rc_node_clear(npp, 1);
+ if (pp != NULL) {
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ rc_node_rele_locked(pp); /* drop ref and lock */
+ }
+ return (rc);
+}
+
+int
+rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
+{
+ rc_node_t *np;
+ rc_node_t *cp, *pp;
+ int res;
+
+ rc_node_clear(cpp, 0);
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
+ np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ }
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ if ((res = rc_node_fill_children(np,
+ REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (res);
+ }
+
+ for (cp = uu_list_first(np->rn_children);
+ cp != NULL;
+ cp = uu_list_next(np->rn_children, cp)) {
+ if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
+ continue;
+ rc_node_hold(cp);
+ break;
+ }
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ } else {
+ HOLD_PTR_FLAG_OR_RETURN(np, npp, RC_NODE_USING_PARENT);
+ /*
+ * mark our parent as children changing. This call drops our
+ * lock and the RC_NODE_USING_PARENT flag, and returns with
+ * pp's lock held
+ */
+ pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
+ if (pp == NULL) {
+ /* our parent is gone, we're going next... */
+
+ rc_node_clear(npp, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ /*
+ * find the next snaplevel
+ */
+ cp = np;
+ while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
+ cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
+ ;
+
+ /* it must match the snaplevel list */
+ assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
+ (cp != NULL && np->rn_snaplevel->rsl_next ==
+ cp->rn_snaplevel));
+
+ if (cp != NULL)
+ rc_node_hold(cp);
+
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ }
+
+ rc_node_assign(cpp, cp);
+ if (cp != NULL) {
+ rc_node_rele(cp);
+
+ return (REP_PROTOCOL_SUCCESS);
+ }
+ return (REP_PROTOCOL_FAIL_NOT_FOUND);
+}
+
+/*
+ * This call takes a snapshot (np) and either:
+ * an existing snapid (to be associated with np), or
+ * a non-NULL parentp (from which a new snapshot is taken, and associated
+ * with np)
+ *
+ * To do the association, np is duplicated, the duplicate is made to
+ * represent the new snapid, and np is replaced with the new rc_node_t on
+ * np's parent's child list. np is placed on the new node's rn_former list,
+ * and replaces np in cache_hash (so rc_node_update() will find the new one).
+ */
+static int
+rc_attach_snapshot(rc_node_t *np, uint32_t snapid, rc_node_t *parentp)
+{
+ rc_node_t *np_orig;
+ rc_node_t *nnp, *prev;
+ rc_node_t *pp;
+ int rc;
+
+ if (parentp != NULL)
+ assert(snapid == 0);
+
+ assert(MUTEX_HELD(&np->rn_lock));
+
+ if ((rc = rc_node_modify_permission_check()) != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ np_orig = np;
+ rc_node_hold_locked(np); /* simplifies the remainder */
+
+ /*
+ * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
+ * list from changing.
+ */
+ for (;;) {
+ if (!(np->rn_flags & RC_NODE_OLD)) {
+ if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
+ goto again;
+ }
+ pp = rc_node_hold_parent_flag(np,
+ RC_NODE_CHILDREN_CHANGING);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (pp == NULL) {
+ goto again;
+ }
+ if (np->rn_flags & RC_NODE_OLD) {
+ rc_node_rele_flag(pp,
+ RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ goto again;
+ }
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+
+ if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
+ /*
+ * Can't happen, since we're holding our
+ * parent's CHILDREN_CHANGING flag...
+ */
+ abort();
+ }
+ break; /* everything's ready */
+ }
+again:
+ rc_node_rele_locked(np);
+ np = cache_lookup(&np_orig->rn_id);
+
+ if (np == NULL)
+ return (REP_PROTOCOL_FAIL_DELETED);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ }
+
+ if (parentp != NULL) {
+ if (pp != parentp) {
+ rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ goto fail;
+ }
+ nnp = NULL;
+ } else {
+ /*
+ * look for a former node with the snapid we need.
+ */
+ if (np->rn_snapshot_id == snapid) {
+ rc_node_rele_flag(np, RC_NODE_IN_TX);
+ rc_node_rele_locked(np);
+
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ return (REP_PROTOCOL_SUCCESS); /* nothing to do */
+ }
+
+ prev = np;
+ while ((nnp = prev->rn_former) != NULL) {
+ if (nnp->rn_snapshot_id == snapid) {
+ rc_node_hold(nnp);
+ break; /* existing node with that id */
+ }
+ prev = nnp;
+ }
+ }
+
+ if (nnp == NULL) {
+ prev = NULL;
+ nnp = rc_node_alloc();
+ if (nnp == NULL) {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+
+ nnp->rn_id = np->rn_id; /* structure assignment */
+ nnp->rn_hash = np->rn_hash;
+ nnp->rn_name = strdup(np->rn_name);
+ nnp->rn_snapshot_id = snapid;
+ nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
+
+ if (nnp->rn_name == NULL) {
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ goto fail;
+ }
+ }
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
+
+ if (parentp != NULL)
+ nnp->rn_snapshot_id = snapid; /* fill in new snapid */
+ else
+ assert(nnp->rn_snapshot_id == snapid);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (rc != REP_PROTOCOL_SUCCESS)
+ goto fail;
+
+ /*
+ * fix up the former chain
+ */
+ if (prev != NULL) {
+ prev->rn_former = nnp->rn_former;
+ (void) pthread_mutex_lock(&nnp->rn_lock);
+ nnp->rn_flags &= ~RC_NODE_ON_FORMER;
+ nnp->rn_former = NULL;
+ (void) pthread_mutex_unlock(&nnp->rn_lock);
+ }
+ np->rn_flags |= RC_NODE_OLD;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ /*
+ * replace np with nnp
+ */
+ rc_node_relink_child(pp, np, nnp);
+
+ rc_node_rele(np);
+
+ return (REP_PROTOCOL_SUCCESS);
+
+fail:
+ rc_node_rele_flag(np, RC_NODE_IN_TX);
+ rc_node_rele_locked(np);
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+
+ if (nnp != NULL) {
+ if (prev == NULL)
+ rc_node_destroy(nnp);
+ else
+ rc_node_rele(nnp);
+ }
+
+ return (rc);
+}
+
+int
+rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
+ const char *instname, const char *name, rc_node_ptr_t *outpp)
+{
+ rc_node_t *np;
+ rc_node_t *outp = NULL;
+ int rc;
+
+ rc_node_clear(outpp, 0);
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ if (svcname != NULL && (rc =
+ rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
+ REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ if (instname != NULL && (rc =
+ rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
+ REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ if ((rc = rc_node_modify_permission_check()) != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (rc);
+ }
+
+ HOLD_PTR_FLAG_OR_RETURN(np, npp, RC_NODE_CREATING_CHILD);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ rc_node_assign(outpp, outp);
+ rc_node_rele(outp);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ return (rc);
+}
+
+int
+rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
+{
+ rc_node_t *np, *outp;
+
+ RC_NODE_PTR_GET_CHECK(np, npp);
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
+ if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ (void) pthread_mutex_unlock(&outp->rn_lock);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ return (rc_attach_snapshot(outp, 0, np)); /* drops outp's lock */
+}
+
+int
+rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
+{
+ rc_node_t *np;
+ rc_node_t *cp;
+ uint32_t snapid;
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+ snapid = np->rn_snapshot_id;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
+ if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ (void) pthread_mutex_unlock(&cp->rn_lock);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ return (rc_attach_snapshot(cp, snapid, NULL)); /* drops cp's lock */
+}
+
+/*
+ * Iteration
+ */
+static int
+rc_iter_filter_name(rc_node_t *np, void *s)
+{
+ const char *name = s;
+
+ return (strcmp(np->rn_name, name) == 0);
+}
+
+static int
+rc_iter_filter_type(rc_node_t *np, void *s)
+{
+ const char *type = s;
+
+ return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
+}
+
+/*ARGSUSED*/
+static int
+rc_iter_null_filter(rc_node_t *np, void *s)
+{
+ return (1);
+}
+
+/*
+ * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
+ * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
+ * If successful, leaves a hold on np & increments np->rn_other_refs
+ *
+ * If composed is true, then set up for iteration across the top level of np's
+ * composition chain. If successful, leaves a hold on np and increments
+ * rn_other_refs for the top level of np's composition chain.
+ *
+ * Fails with
+ * _NO_RESOURCES
+ * _INVALID_TYPE
+ * _TYPE_MISMATCH - np cannot carry type children
+ * _DELETED
+ */
+static int
+rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
+ rc_iter_filter_func *filter, void *arg, boolean_t composed)
+{
+ rc_node_iter_t *nip;
+ int res;
+
+ assert(*resp == NULL);
+
+ nip = uu_zalloc(sizeof (*nip));
+ if (nip == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ /* np is held by the client's rc_node_ptr_t */
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
+ composed = 1;
+
+ if (!composed) {
+ (void) pthread_mutex_lock(&np->rn_lock);
+
+ if ((res = rc_node_fill_children(np, type)) !=
+ REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ uu_free(nip);
+ return (res);
+ }
+
+ nip->rni_clevel = -1;
+
+ nip->rni_iter = uu_list_walk_start(np->rn_children,
+ UU_WALK_ROBUST);
+ if (nip->rni_iter != NULL) {
+ nip->rni_iter_node = np;
+ rc_node_hold_other(np);
+ } else {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ uu_free(nip);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ } else {
+ rc_node_t *ent;
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
+ /* rn_cchain isn't valid until children are loaded. */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ res = rc_node_fill_children(np,
+ REP_PROTOCOL_ENTITY_SNAPLEVEL);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ if (res != REP_PROTOCOL_SUCCESS) {
+ uu_free(nip);
+ return (res);
+ }
+
+ /* Check for an empty snapshot. */
+ if (np->rn_cchain[0] == NULL)
+ goto empty;
+ }
+
+ /* Start at the top of the composition chain. */
+ for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
+ if (nip->rni_clevel >= COMPOSITION_DEPTH) {
+ /* Empty composition chain. */
+empty:
+ nip->rni_clevel = -1;
+ nip->rni_iter = NULL;
+ /* It's ok, iter_next() will return _DONE. */
+ goto out;
+ }
+
+ ent = np->rn_cchain[nip->rni_clevel];
+ assert(ent != NULL);
+
+ if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
+ break;
+
+ /* Someone deleted it, so try the next one. */
+ }
+
+ res = rc_node_fill_children(ent, type);
+
+ if (res == REP_PROTOCOL_SUCCESS) {
+ nip->rni_iter = uu_list_walk_start(ent->rn_children,
+ UU_WALK_ROBUST);
+
+ if (nip->rni_iter == NULL)
+ res = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ else {
+ nip->rni_iter_node = ent;
+ rc_node_hold_other(ent);
+ }
+ }
+
+ if (res != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&ent->rn_lock);
+ uu_free(nip);
+ return (res);
+ }
+
+ (void) pthread_mutex_unlock(&ent->rn_lock);
+ }
+
+out:
+ rc_node_hold(np); /* released by rc_iter_end() */
+ nip->rni_parent = np;
+ nip->rni_type = type;
+ nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
+ nip->rni_filter_arg = arg;
+ *resp = nip;
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+static void
+rc_iter_end(rc_node_iter_t *iter)
+{
+ rc_node_t *np = iter->rni_parent;
+
+ if (iter->rni_clevel >= 0)
+ np = np->rn_cchain[iter->rni_clevel];
+
+ assert(MUTEX_HELD(&np->rn_lock));
+ if (iter->rni_iter != NULL)
+ uu_list_walk_end(iter->rni_iter);
+ iter->rni_iter = NULL;
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_rele(iter->rni_parent);
+ if (iter->rni_iter_node != NULL)
+ rc_node_rele_other(iter->rni_iter_node);
+}
+
+/*
+ * Fails with
+ * _NOT_SET - npp is reset
+ * _DELETED - npp's node has been deleted
+ * _NOT_APPLICABLE - npp's node is not a property
+ * _NO_RESOURCES - out of memory
+ */
+static int
+rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
+{
+ rc_node_t *np;
+
+ rc_node_iter_t *nip;
+
+ assert(*iterp == NULL);
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ }
+
+ nip = uu_zalloc(sizeof (*nip));
+ if (nip == NULL) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ nip->rni_parent = np;
+ nip->rni_iter = NULL;
+ nip->rni_clevel = -1;
+ nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
+ nip->rni_offset = 0;
+ nip->rni_last_offset = 0;
+
+ rc_node_hold_locked(np);
+
+ *iterp = nip;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Returns:
+ * _NOT_SET - npp is reset
+ * _DELETED - npp's node has been deleted
+ * _TYPE_MISMATCH - npp's node is not a property
+ * _NOT_FOUND - property has no values
+ * _TRUNCATED - property has >1 values (first is written into out)
+ * _SUCCESS - property has 1 value (which is written into out)
+ *
+ * We shorten *sz_out to not include anything after the final '\0'.
+ */
+int
+rc_node_get_property_value(rc_node_ptr_t *npp,
+ struct rep_protocol_value_response *out, size_t *sz_out)
+{
+ rc_node_t *np;
+ size_t w;
+ int ret;
+
+ assert(*sz_out == sizeof (*out));
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ if (np->rn_values_size == 0) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NOT_FOUND);
+ }
+ out->rpr_type = np->rn_valtype;
+ w = strlcpy(out->rpr_value, &np->rn_values[0],
+ sizeof (out->rpr_value));
+
+ if (w >= sizeof (out->rpr_value))
+ backend_panic("value too large");
+
+ *sz_out = offsetof(struct rep_protocol_value_response,
+ rpr_value[w + 1]);
+
+ ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
+ REP_PROTOCOL_SUCCESS;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (ret);
+}
+
+int
+rc_iter_next_value(rc_node_iter_t *iter,
+ struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
+{
+ rc_node_t *np = iter->rni_parent;
+ const char *vals;
+ size_t len;
+
+ size_t start;
+ size_t w;
+
+ rep_protocol_responseid_t result;
+
+ assert(*sz_out == sizeof (*out));
+
+ (void) memset(out, '\0', *sz_out);
+
+ if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ RC_NODE_CHECK_AND_LOCK(np);
+
+ vals = np->rn_values;
+ len = np->rn_values_size;
+
+ out->rpr_type = np->rn_valtype;
+
+ start = (repeat)? iter->rni_last_offset : iter->rni_offset;
+
+ if (len == 0 || start >= len) {
+ result = REP_PROTOCOL_DONE;
+ *sz_out -= sizeof (out->rpr_value);
+ } else {
+ w = strlcpy(out->rpr_value, &vals[start],
+ sizeof (out->rpr_value));
+
+ if (w >= sizeof (out->rpr_value))
+ backend_panic("value too large");
+
+ *sz_out = offsetof(struct rep_protocol_value_response,
+ rpr_value[w + 1]);
+
+ /*
+ * update the offsets if we're not repeating
+ */
+ if (!repeat) {
+ iter->rni_last_offset = iter->rni_offset;
+ iter->rni_offset += (w + 1);
+ }
+
+ result = REP_PROTOCOL_SUCCESS;
+ }
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (result);
+}
+
+/*
+ * Entry point for ITER_START from client.c. Validate the arguments & call
+ * rc_iter_create().
+ *
+ * Fails with
+ * _NOT_SET
+ * _DELETED
+ * _TYPE_MISMATCH - np cannot carry type children
+ * _BAD_REQUEST - flags is invalid
+ * pattern is invalid
+ * _NO_RESOURCES
+ * _INVALID_TYPE
+ * _TYPE_MISMATCH - *npp cannot have children of type
+ * _BACKEND_ACCESS
+ */
+int
+rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
+ uint32_t type, uint32_t flags, const char *pattern)
+{
+ rc_node_t *np;
+ rc_iter_filter_func *f = NULL;
+ int rc;
+
+ RC_NODE_PTR_GET_CHECK(np, npp);
+
+ if (pattern != NULL && pattern[0] == '\0')
+ pattern = NULL;
+
+ if (type == REP_PROTOCOL_ENTITY_VALUE) {
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ if (flags != RP_ITER_START_ALL || pattern != NULL)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ rc = rc_node_setup_value_iter(npp, iterp);
+ assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
+ return (rc);
+ }
+
+ if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
+ REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
+ (pattern == NULL))
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ /* Composition only works for instances & snapshots. */
+ if ((flags & RP_ITER_START_COMPOSED) &&
+ (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
+ np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ if (pattern != NULL) {
+ if ((rc = rc_check_type_name(type, pattern)) !=
+ REP_PROTOCOL_SUCCESS)
+ return (rc);
+ pattern = strdup(pattern);
+ if (pattern == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ switch (flags & RP_ITER_START_FILT_MASK) {
+ case RP_ITER_START_ALL:
+ f = NULL;
+ break;
+ case RP_ITER_START_EXACT:
+ f = rc_iter_filter_name;
+ break;
+ case RP_ITER_START_PGTYPE:
+ if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ free((void *)pattern);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+ f = rc_iter_filter_type;
+ break;
+ default:
+ free((void *)pattern);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
+ flags & RP_ITER_START_COMPOSED);
+ if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
+ free((void *)pattern);
+
+ return (rc);
+}
+
+/*
+ * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
+ * the filter.
+ * For composed iterators, then check to see if there's an overlapping entity
+ * (see embedded comments). If we reach the end of the list, start over at
+ * the next level.
+ *
+ * Returns
+ * _BAD_REQUEST - iter walks values
+ * _TYPE_MISMATCH - iter does not walk type entities
+ * _DELETED - parent was deleted
+ * _NO_RESOURCES
+ * _INVALID_TYPE - type is invalid
+ * _DONE
+ * _SUCCESS
+ *
+ * For composed property group iterators, can also return
+ * _TYPE_MISMATCH - parent cannot have type children
+ */
+int
+rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
+{
+ rc_node_t *np = iter->rni_parent;
+ rc_node_t *res;
+ int rc;
+
+ if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+
+ if (iter->rni_iter == NULL) {
+ rc_node_clear(out, 0);
+ return (REP_PROTOCOL_DONE);
+ }
+
+ if (iter->rni_type != type) {
+ rc_node_clear(out, 0);
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
+
+ if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_clear(out, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (iter->rni_clevel >= 0) {
+ /* Composed iterator. Iterate over appropriate level. */
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ np = np->rn_cchain[iter->rni_clevel];
+ /*
+ * If iter->rni_parent is an instance or a snapshot, np must
+ * be valid since iter holds iter->rni_parent & possible
+ * levels (service, instance, snaplevel) cannot be destroyed
+ * while rni_parent is held. If iter->rni_parent is
+ * a composed property group then rc_node_setup_cpg() put
+ * a hold on np.
+ */
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+
+ if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_clear(out, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+ }
+
+ assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
+
+ for (;;) {
+ res = uu_list_walk_next(iter->rni_iter);
+ if (res == NULL) {
+ rc_node_t *parent = iter->rni_parent;
+
+#if COMPOSITION_DEPTH == 2
+ if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
+ /* release walker and lock */
+ rc_iter_end(iter);
+ break;
+ }
+
+ /* Stop walking current level. */
+ uu_list_walk_end(iter->rni_iter);
+ iter->rni_iter = NULL;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_rele_other(iter->rni_iter_node);
+ iter->rni_iter_node = NULL;
+
+ /* Start walking next level. */
+ ++iter->rni_clevel;
+ np = parent->rn_cchain[iter->rni_clevel];
+ assert(np != NULL);
+#else
+#error This code must be updated.
+#endif
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+
+ rc = rc_node_fill_children(np, iter->rni_type);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ iter->rni_iter =
+ uu_list_walk_start(np->rn_children,
+ UU_WALK_ROBUST);
+
+ if (iter->rni_iter == NULL)
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ else {
+ iter->rni_iter_node = np;
+ rc_node_hold_other(np);
+ }
+ }
+
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_clear(out, 0);
+ return (rc);
+ }
+
+ continue;
+ }
+
+ if (res->rn_id.rl_type != type ||
+ !iter->rni_filter(res, iter->rni_filter_arg))
+ continue;
+
+ /*
+ * If we're composed and not at the top level, check to see if
+ * there's an entity at a higher level with the same name. If
+ * so, skip this one.
+ */
+ if (iter->rni_clevel > 0) {
+ rc_node_t *ent = iter->rni_parent->rn_cchain[0];
+ rc_node_t *pg;
+
+#if COMPOSITION_DEPTH == 2
+ assert(iter->rni_clevel == 1);
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ (void) pthread_mutex_lock(&ent->rn_lock);
+ rc = rc_node_find_named_child(ent, res->rn_name, type,
+ &pg);
+ if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
+ rc_node_rele(pg);
+ (void) pthread_mutex_unlock(&ent->rn_lock);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ rc_node_clear(out, 0);
+ return (rc);
+ }
+ (void) pthread_mutex_lock(&np->rn_lock);
+
+ /* Make sure np isn't being deleted all of a sudden. */
+ if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_clear(out, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (pg != NULL)
+ /* Keep going. */
+ continue;
+#else
+#error This code must be updated.
+#endif
+ }
+
+ /*
+ * If we're composed, iterating over property groups, and not
+ * at the bottom level, check to see if there's a pg at lower
+ * level with the same name. If so, return a cpg.
+ */
+ if (iter->rni_clevel >= 0 &&
+ type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
+ iter->rni_clevel < COMPOSITION_DEPTH - 1) {
+#if COMPOSITION_DEPTH == 2
+ rc_node_t *pg;
+ rc_node_t *ent = iter->rni_parent->rn_cchain[1];
+
+ rc_node_hold(res); /* While we drop np->rn_lock */
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ (void) pthread_mutex_lock(&ent->rn_lock);
+ rc = rc_node_find_named_child(ent, res->rn_name, type,
+ &pg);
+ /* holds pg if not NULL */
+ (void) pthread_mutex_unlock(&ent->rn_lock);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ rc_node_rele(res);
+ rc_node_clear(out, 0);
+ return (rc);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_rele(res);
+ if (pg != NULL)
+ rc_node_rele(pg);
+ rc_node_clear(out, 1);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (pg == NULL) {
+ rc_node_rele(res);
+ } else {
+ rc_node_t *cpg;
+
+ /* Keep res held for rc_node_setup_cpg(). */
+
+ cpg = rc_node_alloc();
+ if (cpg == NULL) {
+ (void) pthread_mutex_unlock(
+ &np->rn_lock);
+ rc_node_rele(res);
+ rc_node_rele(pg);
+ rc_node_clear(out, 0);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ switch (rc_node_setup_cpg(cpg, res, pg)) {
+ case REP_PROTOCOL_SUCCESS:
+ res = cpg;
+ break;
+
+ case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
+ /* Nevermind. */
+ rc_node_destroy(cpg);
+ rc_node_rele(pg);
+ rc_node_rele(res);
+ break;
+
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ rc_node_destroy(cpg);
+ (void) pthread_mutex_unlock(
+ &np->rn_lock);
+ rc_node_rele(res);
+ rc_node_rele(pg);
+ rc_node_clear(out, 0);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+#else
+#error This code must be updated.
+#endif
+ }
+
+ rc_node_hold(res);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ break;
+ }
+ rc_node_assign(out, res);
+
+ if (res == NULL)
+ return (REP_PROTOCOL_DONE);
+ rc_node_rele(res);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+void
+rc_iter_destroy(rc_node_iter_t **nipp)
+{
+ rc_node_iter_t *nip = *nipp;
+ rc_node_t *np;
+
+ if (nip == NULL)
+ return; /* already freed */
+
+ np = nip->rni_parent;
+
+ if (nip->rni_filter_arg != NULL)
+ free(nip->rni_filter_arg);
+ nip->rni_filter_arg = NULL;
+
+ if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
+ nip->rni_iter != NULL) {
+ if (nip->rni_clevel < 0)
+ (void) pthread_mutex_lock(&np->rn_lock);
+ else
+ (void) pthread_mutex_lock(
+ &np->rn_cchain[nip->rni_clevel]->rn_lock);
+ rc_iter_end(nip); /* release walker and lock */
+ }
+ nip->rni_parent = NULL;
+
+ uu_free(nip);
+ *nipp = NULL;
+}
+
+int
+rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
+{
+ rc_node_t *np;
+ permcheck_t *pcp;
+ int ret;
+ int authorized = 0;
+
+ RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
+
+ if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
+ rc_node_rele(np);
+ np = np->rn_cchain[0];
+ RC_NODE_CHECK_AND_HOLD(np);
+ }
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
+ }
+
+ if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
+ }
+
+ if (client_is_privileged())
+ goto skip_checks;
+
+#ifdef NATIVE_BUILD
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
+#else
+ /* permission check */
+ pcp = pc_create();
+ if (pcp == NULL) {
+ rc_node_rele(np);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
+ ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
+ strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
+ (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
+ strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
+ rc_node_t *instn;
+
+ /* solaris.smf.manage can be used. */
+ ret = perm_add_enabling(pcp, AUTH_MANAGE);
+
+ if (ret != REP_PROTOCOL_SUCCESS) {
+ pc_free(pcp);
+ rc_node_rele(np);
+ return (ret);
+ }
+
+ /* general/action_authorization values can be used. */
+ ret = rc_node_parent(np, &instn);
+ if (ret != REP_PROTOCOL_SUCCESS) {
+ assert(ret == REP_PROTOCOL_FAIL_DELETED);
+ rc_node_rele(np);
+ pc_free(pcp);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
+
+ ret = perm_add_inst_action_auth(pcp, instn);
+ rc_node_rele(instn);
+ switch (ret) {
+ case REP_PROTOCOL_SUCCESS:
+ break;
+
+ case REP_PROTOCOL_FAIL_DELETED:
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ rc_node_rele(np);
+ pc_free(pcp);
+ return (ret);
+
+ default:
+ bad_error("perm_add_inst_action_auth", ret);
+ }
+
+ if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
+ authorized = 1; /* Don't check on commit. */
+ } else {
+ ret = perm_add_enabling(pcp, AUTH_MODIFY);
+
+ if (ret == REP_PROTOCOL_SUCCESS) {
+ /* propertygroup-type-specific authorization */
+ /* no locking because rn_type won't change anyway */
+ const char * const auth =
+ perm_auth_for_pgtype(np->rn_type);
+
+ if (auth != NULL)
+ ret = perm_add_enabling(pcp, auth);
+ }
+
+ if (ret == REP_PROTOCOL_SUCCESS)
+ /* propertygroup/transaction-type-specific auths */
+ ret =
+ perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
+
+ if (ret == REP_PROTOCOL_SUCCESS)
+ ret =
+ perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
+
+ /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
+ if (ret == REP_PROTOCOL_SUCCESS &&
+ strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
+ strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
+ ret = perm_add_enabling(pcp, AUTH_MANAGE);
+
+ if (ret != REP_PROTOCOL_SUCCESS) {
+ pc_free(pcp);
+ rc_node_rele(np);
+ return (ret);
+ }
+ }
+
+ ret = perm_granted(pcp);
+ if (ret != 1) {
+ pc_free(pcp);
+ rc_node_rele(np);
+ return (ret == 0 ? REP_PROTOCOL_FAIL_PERMISSION_DENIED :
+ REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ pc_free(pcp);
+#endif /* NATIVE_BUILD */
+
+skip_checks:
+ rc_node_assign(txp, np);
+ txp->rnp_authorized = authorized;
+
+ rc_node_rele(np);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+/*
+ * Return 1 if the given transaction commands only modify the values of
+ * properties other than "modify_authorization". Return -1 if any of the
+ * commands are invalid, and 0 otherwise.
+ */
+static int
+tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
+{
+ const struct rep_protocol_transaction_cmd *cmds;
+ uintptr_t loc;
+ uint32_t sz;
+ rc_node_t *prop;
+ boolean_t ok;
+
+ assert(!MUTEX_HELD(&pg->rn_lock));
+
+ loc = (uintptr_t)cmds_arg;
+
+ while (cmds_sz > 0) {
+ cmds = (struct rep_protocol_transaction_cmd *)loc;
+
+ if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (-1);
+
+ sz = cmds->rptc_size;
+ if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (-1);
+
+ sz = TX_SIZE(sz);
+ if (sz > cmds_sz)
+ return (-1);
+
+ switch (cmds[0].rptc_action) {
+ case REP_PROTOCOL_TX_ENTRY_CLEAR:
+ break;
+
+ case REP_PROTOCOL_TX_ENTRY_REPLACE:
+ /* Check type */
+ (void) pthread_mutex_lock(&pg->rn_lock);
+ if (rc_node_find_named_child(pg,
+ (const char *)cmds[0].rptc_data,
+ REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
+ REP_PROTOCOL_SUCCESS) {
+ ok = (prop != NULL &&
+ prop->rn_valtype == cmds[0].rptc_type);
+ } else {
+ /* Return more particular error? */
+ ok = B_FALSE;
+ }
+ (void) pthread_mutex_unlock(&pg->rn_lock);
+ if (ok)
+ break;
+ return (0);
+
+ default:
+ return (0);
+ }
+
+ if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
+ == 0)
+ return (0);
+
+ loc += sz;
+ cmds_sz -= sz;
+ }
+
+ return (1);
+}
+
+/*
+ * Return 1 if any of the given transaction commands affect
+ * "action_authorization". Return -1 if any of the commands are invalid and
+ * 0 in all other cases.
+ */
+static int
+tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
+{
+ const struct rep_protocol_transaction_cmd *cmds;
+ uintptr_t loc;
+ uint32_t sz;
+
+ loc = (uintptr_t)cmds_arg;
+
+ while (cmds_sz > 0) {
+ cmds = (struct rep_protocol_transaction_cmd *)loc;
+
+ if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (-1);
+
+ sz = cmds->rptc_size;
+ if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (-1);
+
+ sz = TX_SIZE(sz);
+ if (sz > cmds_sz)
+ return (-1);
+
+ if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
+ == 0)
+ return (1);
+
+ loc += sz;
+ cmds_sz -= sz;
+ }
+
+ return (0);
+}
+
+/*
+ * Returns 1 if the transaction commands only modify properties named
+ * 'enabled'.
+ */
+static int
+tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
+{
+ const struct rep_protocol_transaction_cmd *cmd;
+ uintptr_t loc;
+ uint32_t sz;
+
+ loc = (uintptr_t)cmds_arg;
+
+ while (cmds_sz > 0) {
+ cmd = (struct rep_protocol_transaction_cmd *)loc;
+
+ if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (-1);
+
+ sz = cmd->rptc_size;
+ if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
+ return (-1);
+
+ sz = TX_SIZE(sz);
+ if (sz > cmds_sz)
+ return (-1);
+
+ if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
+ != 0)
+ return (0);
+
+ loc += sz;
+ cmds_sz -= sz;
+ }
+
+ return (1);
+}
+
+int
+rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
+{
+ rc_node_t *np = txp->rnp_node;
+ rc_node_t *pp;
+ rc_node_t *nnp;
+ rc_node_pg_notify_t *pnp;
+ int rc;
+ permcheck_t *pcp;
+ int granted, normal;
+
+ RC_NODE_CHECK(np);
+
+ if (!client_is_privileged() && !txp->rnp_authorized) {
+#ifdef NATIVE_BUILD
+ return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
+#else
+ /* permission check: depends on contents of transaction */
+ pcp = pc_create();
+ if (pcp == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ /* If normal is cleared, we won't do the normal checks. */
+ normal = 1;
+ rc = REP_PROTOCOL_SUCCESS;
+
+ if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
+ strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
+ /* Touching general[framework]/action_authorization? */
+ rc = tx_modifies_action(cmds, cmds_sz);
+ if (rc == -1) {
+ pc_free(pcp);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ if (rc) {
+ /* Yes: only AUTH_MANAGE can be used. */
+ rc = perm_add_enabling(pcp, AUTH_MANAGE);
+ normal = 0;
+ } else {
+ rc = REP_PROTOCOL_SUCCESS;
+ }
+ } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
+ strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
+ strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
+ rc_node_t *instn;
+
+ rc = tx_only_enabled(cmds, cmds_sz);
+ if (rc == -1) {
+ pc_free(pcp);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ if (rc) {
+ rc = rc_node_parent(np, &instn);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ assert(rc == REP_PROTOCOL_FAIL_DELETED);
+ pc_free(pcp);
+ return (rc);
+ }
+
+ assert(instn->rn_id.rl_type ==
+ REP_PROTOCOL_ENTITY_INSTANCE);
+
+ rc = perm_add_inst_action_auth(pcp, instn);
+ rc_node_rele(instn);
+ switch (rc) {
+ case REP_PROTOCOL_SUCCESS:
+ break;
+
+ case REP_PROTOCOL_FAIL_DELETED:
+ case REP_PROTOCOL_FAIL_NO_RESOURCES:
+ pc_free(pcp);
+ return (rc);
+
+ default:
+ bad_error("perm_add_inst_action_auth",
+ rc);
+ }
+ } else {
+ rc = REP_PROTOCOL_SUCCESS;
+ }
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS && normal) {
+ rc = perm_add_enabling(pcp, AUTH_MODIFY);
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ /* Add pgtype-specific authorization. */
+ const char * const auth =
+ perm_auth_for_pgtype(np->rn_type);
+
+ if (auth != NULL)
+ rc = perm_add_enabling(pcp, auth);
+ }
+
+ /* Add pg-specific modify_authorization auths. */
+ if (rc == REP_PROTOCOL_SUCCESS)
+ rc = perm_add_enabling_values(pcp, np,
+ AUTH_PROP_MODIFY);
+
+ /* If value_authorization values are ok, add them. */
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ rc = tx_allow_value(cmds, cmds_sz, np);
+ if (rc == -1)
+ rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
+ else if (rc)
+ rc = perm_add_enabling_values(pcp, np,
+ AUTH_PROP_VALUE);
+ }
+ }
+
+ if (rc == REP_PROTOCOL_SUCCESS) {
+ granted = perm_granted(pcp);
+ if (granted < 0)
+ rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
+ }
+
+ pc_free(pcp);
+
+ if (rc != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ if (!granted)
+ return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
+#endif /* NATIVE_BUILD */
+ }
+
+ nnp = rc_node_alloc();
+ if (nnp == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ nnp->rn_id = np->rn_id; /* structure assignment */
+ nnp->rn_hash = np->rn_hash;
+ nnp->rn_name = strdup(np->rn_name);
+ nnp->rn_type = strdup(np->rn_type);
+ nnp->rn_pgflags = np->rn_pgflags;
+
+ nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
+
+ if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
+ rc_node_destroy(nnp);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ /*
+ * We must have all of the old properties in the cache, or the
+ * database deletions could cause inconsistencies.
+ */
+ if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
+ REP_PROTOCOL_SUCCESS) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_destroy(nnp);
+ return (rc);
+ }
+
+ if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_destroy(nnp);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (np->rn_flags & RC_NODE_OLD) {
+ rc_node_rele_flag(np, RC_NODE_USING_PARENT);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ rc_node_destroy(nnp);
+ return (REP_PROTOCOL_FAIL_NOT_LATEST);
+ }
+
+ pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
+ if (pp == NULL) {
+ /* our parent is gone, we're going next... */
+ rc_node_destroy(nnp);
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (np->rn_flags & RC_NODE_OLD) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NOT_LATEST);
+ }
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+
+ /*
+ * prepare for the transaction
+ */
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ rc_node_destroy(nnp);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+ nnp->rn_gen_id = np->rn_gen_id;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ /* Sets nnp->rn_gen_id on success. */
+ rc = object_tx_commit(&np->rn_id, cmds, cmds_sz, &nnp->rn_gen_id);
+
+ (void) pthread_mutex_lock(&np->rn_lock);
+ if (rc != REP_PROTOCOL_SUCCESS) {
+ rc_node_rele_flag(np, RC_NODE_IN_TX);
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ (void) pthread_mutex_lock(&pp->rn_lock);
+ rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
+ (void) pthread_mutex_unlock(&pp->rn_lock);
+ rc_node_destroy(nnp);
+ rc_node_clear(txp, 0);
+ if (rc == REP_PROTOCOL_DONE)
+ rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
+ return (rc);
+ }
+
+ /*
+ * Notify waiters
+ */
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
+ rc_pg_notify_fire(pnp);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+
+ np->rn_flags |= RC_NODE_OLD;
+ (void) pthread_mutex_unlock(&np->rn_lock);
+
+ rc_notify_remove_node(np);
+
+ /*
+ * replace np with nnp
+ */
+ rc_node_relink_child(pp, np, nnp);
+
+ /*
+ * all done -- clear the transaction.
+ */
+ rc_node_clear(txp, 0);
+
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+void
+rc_pg_notify_init(rc_node_pg_notify_t *pnp)
+{
+ uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
+ pnp->rnpn_pg = NULL;
+ pnp->rnpn_fd = -1;
+}
+
+int
+rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
+{
+ rc_node_t *np;
+
+ RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
+
+ if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_BAD_REQUEST);
+ }
+
+ /*
+ * wait for any transaction in progress to complete
+ */
+ if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_DELETED);
+ }
+
+ if (np->rn_flags & RC_NODE_OLD) {
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_FAIL_NOT_LATEST);
+ }
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ rc_pg_notify_fire(pnp);
+ pnp->rnpn_pg = np;
+ pnp->rnpn_fd = fd;
+ (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+
+ (void) pthread_mutex_unlock(&np->rn_lock);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+void
+rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
+{
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ rc_pg_notify_fire(pnp);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+
+ uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
+}
+
+void
+rc_notify_info_init(rc_notify_info_t *rnip)
+{
+ int i;
+
+ uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
+ uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
+ rc_notify_pool);
+
+ rnip->rni_notify.rcn_node = NULL;
+ rnip->rni_notify.rcn_info = rnip;
+
+ bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
+ bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
+
+ (void) pthread_cond_init(&rnip->rni_cv, NULL);
+
+ for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
+ rnip->rni_namelist[i] = NULL;
+ rnip->rni_typelist[i] = NULL;
+ }
+}
+
+static void
+rc_notify_info_insert_locked(rc_notify_info_t *rnip)
+{
+ assert(MUTEX_HELD(&rc_pg_notify_lock));
+
+ assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
+
+ rnip->rni_flags |= RC_NOTIFY_ACTIVE;
+ (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
+ (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
+}
+
+static void
+rc_notify_info_remove_locked(rc_notify_info_t *rnip)
+{
+ rc_notify_t *me = &rnip->rni_notify;
+ rc_notify_t *np;
+
+ assert(MUTEX_HELD(&rc_pg_notify_lock));
+
+ assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
+
+ assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
+ rnip->rni_flags |= RC_NOTIFY_DRAIN;
+ (void) pthread_cond_broadcast(&rnip->rni_cv);
+
+ (void) uu_list_remove(rc_notify_info_list, rnip);
+
+ /*
+ * clean up any notifications at the beginning of the list
+ */
+ if (uu_list_first(rc_notify_list) == me) {
+ while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
+ np->rcn_info == NULL)
+ rc_notify_remove_locked(np);
+ }
+ (void) uu_list_remove(rc_notify_list, me);
+
+ while (rnip->rni_waiters) {
+ (void) pthread_cond_broadcast(&rc_pg_notify_cv);
+ (void) pthread_cond_broadcast(&rnip->rni_cv);
+ (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
+ }
+
+ rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
+}
+
+static int
+rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
+ const char *name)
+{
+ int i;
+ int rc;
+ char *f;
+
+ rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
+ if (rc != REP_PROTOCOL_SUCCESS)
+ return (rc);
+
+ f = strdup(name);
+ if (f == NULL)
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+
+ while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
+ (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
+
+ for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++)
+ if (arr[i] == NULL)
+ break;
+
+ if (i == RC_NOTIFY_MAX_NAMES) {
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+ free(f);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+
+ arr[i] = f;
+ if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
+ rc_notify_info_insert_locked(rnip);
+
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+ return (REP_PROTOCOL_SUCCESS);
+}
+
+int
+rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
+{
+ return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
+}
+
+int
+rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
+{
+ return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
+}
+
+/*
+ * Wait for and report an event of interest to rnip, a notification client
+ */
+int
+rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
+ char *outp, size_t sz)
+{
+ rc_notify_t *np;
+ rc_notify_t *me = &rnip->rni_notify;
+ rc_node_t *nnp;
+ rc_notify_delete_t *ndp;
+
+ int am_first_info;
+
+ if (sz > 0)
+ outp[0] = 0;
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+
+ while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
+ RC_NOTIFY_ACTIVE) {
+ /*
+ * If I'm first on the notify list, it is my job to
+ * clean up any notifications I pass by. I can't do that
+ * if someone is blocking the list from removals, so I
+ * have to wait until they have all drained.
+ */
+ am_first_info = (uu_list_first(rc_notify_list) == me);
+ if (am_first_info && rc_notify_in_use) {
+ rnip->rni_waiters++;
+ (void) pthread_cond_wait(&rc_pg_notify_cv,
+ &rc_pg_notify_lock);
+ rnip->rni_waiters--;
+ continue;
+ }
+
+ /*
+ * Search the list for a node of interest.
+ */
+ np = uu_list_next(rc_notify_list, me);
+ while (np != NULL && !rc_notify_info_interested(rnip, np)) {
+ rc_notify_t *next = uu_list_next(rc_notify_list, np);
+
+ if (am_first_info) {
+ if (np->rcn_info) {
+ /*
+ * Passing another client -- stop
+ * cleaning up notifications
+ */
+ am_first_info = 0;
+ } else {
+ rc_notify_remove_locked(np);
+ }
+ }
+ np = next;
+ }
+
+ /*
+ * Nothing of interest -- wait for notification
+ */
+ if (np == NULL) {
+ rnip->rni_waiters++;
+ (void) pthread_cond_wait(&rnip->rni_cv,
+ &rc_pg_notify_lock);
+ rnip->rni_waiters--;
+ continue;
+ }
+
+ /*
+ * found something to report -- move myself after the
+ * notification and process it.
+ */
+ (void) uu_list_remove(rc_notify_list, me);
+ (void) uu_list_insert_after(rc_notify_list, np, me);
+
+ if ((ndp = np->rcn_delete) != NULL) {
+ (void) strlcpy(outp, ndp->rnd_fmri, sz);
+ if (am_first_info)
+ rc_notify_remove_locked(np);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+ rc_node_clear(out, 0);
+ return (REP_PROTOCOL_SUCCESS);
+ }
+
+ nnp = np->rcn_node;
+ assert(nnp != NULL);
+
+ /*
+ * We can't bump nnp's reference count without grabbing its
+ * lock, and rc_pg_notify_lock is a leaf lock. So we
+ * temporarily block all removals to keep nnp from
+ * disappearing.
+ */
+ rc_notify_in_use++;
+ assert(rc_notify_in_use > 0);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+
+ rc_node_assign(out, nnp);
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ assert(rc_notify_in_use > 0);
+ rc_notify_in_use--;
+ if (am_first_info)
+ rc_notify_remove_locked(np);
+ if (rc_notify_in_use == 0)
+ (void) pthread_cond_broadcast(&rc_pg_notify_cv);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+
+ return (REP_PROTOCOL_SUCCESS);
+ }
+ /*
+ * If we're the last one out, let people know it's clear.
+ */
+ if (rnip->rni_waiters == 0)
+ (void) pthread_cond_broadcast(&rnip->rni_cv);
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+ return (REP_PROTOCOL_DONE);
+}
+
+static void
+rc_notify_info_reset(rc_notify_info_t *rnip)
+{
+ int i;
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
+ rc_notify_info_remove_locked(rnip);
+ assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
+ rnip->rni_flags |= RC_NOTIFY_EMPTYING;
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+
+ for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
+ if (rnip->rni_namelist[i] != NULL) {
+ free((void *)rnip->rni_namelist[i]);
+ rnip->rni_namelist[i] = NULL;
+ }
+ if (rnip->rni_typelist[i] != NULL) {
+ free((void *)rnip->rni_typelist[i]);
+ rnip->rni_typelist[i] = NULL;
+ }
+ }
+
+ (void) pthread_mutex_lock(&rc_pg_notify_lock);
+ rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
+ (void) pthread_mutex_unlock(&rc_pg_notify_lock);
+}
+
+void
+rc_notify_info_fini(rc_notify_info_t *rnip)
+{
+ rc_notify_info_reset(rnip);
+
+ uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
+ uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
+ rc_notify_pool);
+}
diff --git a/usr/src/cmd/svc/configd/restore_repository.sh b/usr/src/cmd/svc/configd/restore_repository.sh
new file mode 100644
index 0000000000..08e916b0f9
--- /dev/null
+++ b/usr/src/cmd/svc/configd/restore_repository.sh
@@ -0,0 +1,334 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PATH=/sbin:/usr/bin:/usr/sbin
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/fs_include.sh
+
+echo >&2 "
+Repository Restore utility
+
+See http://sun.com/msg/SMF-8000-MY for more information on the use of
+this script to restore backup copies of the smf(5) repository.
+
+If there are any problems which need human intervention, this script will
+give instructions and then exit back to your shell."
+
+usage()
+{
+ echo "usage: $0 [-r rootdir]" >&2
+ exit 2;
+}
+
+repositorydir=etc/svc
+repository=repository
+
+myroot=/
+while getopts r: opt; do
+ case "$opt" in
+ r) myroot=$OPTARG
+ if [ ! -d $myroot ]; then
+ echo "$myroot: not a directory" >&2
+ exit 1
+ fi
+ # validate directory and make sure it ends in '/'.
+ case "$myroot" in
+ //*) echo "$myroot: must begin with a single /" >&2
+ usage;;
+ /) echo "$myroot: alternate root cannot be /" >&2
+ usage;;
+
+ /*/) ;; # ends with /
+ /*) myroot="$myroot/";; # add final /
+
+ *) echo "$myroot: must be a full path" >&2
+ usage;;
+ esac;;
+ ?) usage;;
+ esac
+done
+
+if [ $OPTIND -le $# ]; then
+ # getopts(1) didn't slurp up everything.
+ usage
+fi
+
+if [ "$myroot" -eq / ]; then
+ system="system"
+ [ "`/sbin/zonename`" != global ] && system="zone"
+ echo >&2 "
+Note that upon full completion of this script, the $system will be rebooted
+using reboot(1M), which will interrupt any active services.
+"
+fi
+
+# check that the filesystem is as expected
+cd "$myroot" || exit 1
+cd "$myroot$repositorydir" || exit 1
+
+nouser=false
+rootro=false
+
+# check to make sure /usr is mounted
+if [ ! -x /usr/bin/pgrep ]; then
+ nouser=true
+fi
+if [ ! -w "$myroot" ]; then
+ rootro=true
+fi
+
+if [ "$nouser" = true -o "$rootro" = true ]; then
+ if [ "$nouser" = true -a "$rootro" = true ]; then
+ echo "The / filesystem is mounted read-only, and the /usr" >&2
+ echo "filesystem has not yet been mounted." >&2
+ elif [ "$nouser" = true ]; then
+ echo "The /usr filesystem has not yet been mounted." >&2
+ else
+ echo "The / filesystem is mounted read-only." >&2
+ fi
+
+ echo >&2 "
+This must be rectified before $0 can continue.
+
+If / or /usr are on SVM (md(7d)) partitions, first run
+ /lib/svc/method/svc-metainit
+
+To properly mount / and /usr, run:
+ /lib/svc/method/fs-root
+then
+ /lib/svc/method/fs-usr
+
+After those have completed successfully, re-run:
+ $0 $*
+
+to continue.
+"
+ exit 1
+fi
+
+# at this point, we know / is mounted read-write, and /usr is mounted.
+oldreps="`
+ /bin/ls -1rt $repository-*-[0-9]*[0-9] | \
+ /bin/sed -e '/[^A-Za-z0-9_,.-]/d' -e 's/^'$repository'-//'
+`"
+
+if [ -z "$oldreps" ]; then
+ cat >&2 <<EOF
+There are no available backups of $myroot$repositorydir/$repository.db.
+The only available repository is "-seed-". Note that restoring the seed
+will lose all customizations, and XXX other issues?
+
+EOF
+ prompt="Enter -seed- to restore from the seed, or -quit- to exit: \c"
+ default=
+else
+ cat >&2 <<EOF
+The following backups of $myroot$repositorydir/$repository.db exist, from
+oldest to newest:
+
+$oldreps
+
+The backups are named based on their type and the time what they were taken.
+Backups beginning with "boot" are made before the first change is made to
+the repository after system boot. Backups beginning with "manifest_import"
+are made after svc:/system/manifest-import:default finishes its processing.
+The time of backup is given in YYYYMMDD_HHMMSS format.
+
+Please enter one of:
+ 1) boot, for the most recent post-boot backup
+ 2) manifest_import, for the most recent manifest_import backup.
+ 3) a specific backup repository from the above list
+ 4) -seed-, the initial starting repository. (All customizations
+ will be lost.)
+ 5) -quit-, to cancel.
+
+EOF
+ prompt="Enter response [boot]: \c"
+ default="boot"
+fi
+
+cont=false
+while [ $cont = false ]; do
+ echo "$prompt"
+
+ read x || exit 1
+ [ -z "$x" ] && x="$default"
+
+ case "$x" in
+ -seed-)
+ if [ $myroot != / -o "`/sbin/zonename`" = global ]; then
+ file="$myroot"lib/svc/seed/global.db
+ else
+ file="$myroot"lib/svc/seed/nonglobal.db
+ fi;;
+ -quit-)
+ echo "Exiting."
+ exit 0;;
+ /*)
+ file="$x";;
+ */*)
+ file="$myroot$x";;
+ ?*)
+ file="$myroot$repositorydir/repository-$x";;
+ *) file= ;;
+ esac
+
+ if [ -f $file ]; then
+ if [ -r $file ]; then
+ checkresults="`echo PRAGMA integrity_check\; | \
+ /lib/svc/bin/sqlite $file >&1 | grep -v '^ok$'`"
+
+ if [ -n "$checkresults" ]; then
+ echo "$file: integrity check failed:" >&2
+ echo "$checkresults" >&2
+ echo
+ else
+ cont=true
+ fi
+ else
+ echo "$file: not readable"
+ fi
+ elif [ -n "$file" ]; then
+ echo "$file: not found"
+ fi
+done
+
+errors="$myroot"etc/svc/volatile/db_errors
+repo="$myroot$repositorydir/$repository.db"
+new="$repo"_old_"`date +%Y''%m''%d'_'%H''%M''%S`"
+
+steps=
+if [ "$myroot" = / ]; then
+ steps="$steps
+svc.startd(1M) and svc.configd(1M) will be quiesced, if running."
+fi
+
+if [ -r $repo ]; then
+ steps="$steps
+$repo
+ -- renamed --> $new"
+fi
+if [ -r $errors ]; then
+ steps="$steps
+$errors
+ -- copied --> ${new}_errors"
+fi
+
+cat >&2 <<EOF
+
+After confirmation, the following steps will be taken:
+$steps
+$file
+ -- copied --> $repo
+EOF
+
+if [ "$myroot" = / ]; then
+ echo "and the system will be rebooted with reboot(1M)."
+fi
+
+echo
+cont=false
+while [ $cont = false ]; do
+ echo "Proceed [yes/no]? \c"
+ read x || x=n
+
+ case "$x" in
+ [Yy]|[Yy][Ee][Ss])
+ cont=true;;
+ [Nn]|[Nn][Oo])
+ echo; echo "Exiting..."
+ exit 0;
+ esac;
+done
+
+umask 077 # we want files to be root-readable only.
+
+startd_msg=
+if [ "$myroot" = / ]; then
+ zone="`zonename`"
+ startd="`pgrep -z "$zone" -f svc.startd`"
+
+ echo
+ echo "Quiescing svc.startd(1M) and svc.configd(1M): \c"
+ if [ -n "$startd" ]; then
+ pstop $startd
+ startd_msg=\
+"To start svc.start(1M) running, do: /usr/bin/prun $startd"
+ fi
+ pkill -z "$zone" -f svc.configd
+
+ sleep 1 # yes, this is hack
+
+ echo "done."
+fi
+
+if [ -r "$repo" ]; then
+ echo "$repo"
+ echo " -- renamed --> $new"
+ if mv $repo $new; then
+ :
+ else
+ echo "Failed. $startd_msg"
+ exit 1;
+ fi
+fi
+
+if [ -r $errors ]; then
+ echo "$errors"
+ echo " -- copied --> ${new}_errors"
+ if cp -p $errors ${new}_errors; then
+ :
+ else
+ mv -f $new $repo
+ echo "Failed. $startd_msg"
+ exit 1;
+ fi
+fi
+
+echo "$file"
+echo " -- copied --> $repo"
+
+if cp $file $repo.new.$$ && mv $repo.new.$$ $repo; then
+ :
+else
+ rm -f $repo.new.$$ ${new}_errors
+ mv -f $new $repo
+ echo "Failed. $startd_msg"
+ exit 1;
+fi
+
+echo
+echo "The backup repository has been successfully restored."
+echo
+
+if [ "$myroot" = / ]; then
+ echo "Rebooting in 5 seconds."
+ sleep 5
+ reboot
+fi
diff --git a/usr/src/cmd/svc/configd/snapshot.c b/usr/src/cmd/svc/configd/snapshot.c
new file mode 100644
index 0000000000..66e92123fd
--- /dev/null
+++ b/usr/src/cmd/svc/configd/snapshot.c
@@ -0,0 +1,271 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <strings.h>
+#include "configd.h"
+#include "repcache_protocol.h"
+
+typedef struct snapshot_bucket {
+ pthread_mutex_t sb_lock;
+ rc_snapshot_t *sb_head;
+
+ char sb_pad[64 - sizeof (pthread_mutex_t) -
+ sizeof (rc_snapshot_t *)];
+} snapshot_bucket_t;
+
+#define SN_HASH_SIZE 64
+#define SN_HASH_MASK (SN_HASH_SIZE - 1)
+
+#pragma align 64(snapshot_hash)
+static snapshot_bucket_t snapshot_hash[SN_HASH_SIZE];
+
+#define SNAPSHOT_BUCKET(h) (&snapshot_hash[(h) & SN_HASH_MASK])
+
+static rc_snapshot_t *
+snapshot_alloc(void)
+{
+ rc_snapshot_t *sp;
+ sp = uu_zalloc(sizeof (*sp));
+
+ (void) pthread_mutex_init(&sp->rs_lock, NULL);
+ (void) pthread_cond_init(&sp->rs_cv, NULL);
+
+ sp->rs_refcnt++;
+ return (sp);
+}
+
+static void
+snapshot_free(rc_snapshot_t *sp)
+{
+ rc_snaplevel_t *lvl, *next;
+
+ assert(sp->rs_refcnt == 0 && sp->rs_childref == 0);
+
+ (void) pthread_mutex_destroy(&sp->rs_lock);
+ (void) pthread_cond_destroy(&sp->rs_cv);
+
+ for (lvl = sp->rs_levels; lvl != NULL; lvl = next) {
+ next = lvl->rsl_next;
+
+ assert(lvl->rsl_parent == sp);
+ lvl->rsl_parent = NULL;
+
+ if (lvl->rsl_service)
+ free((char *)lvl->rsl_service);
+ if (lvl->rsl_instance)
+ free((char *)lvl->rsl_instance);
+
+ uu_free(lvl);
+ }
+ uu_free(sp);
+}
+
+static void
+rc_snapshot_hold(rc_snapshot_t *sp)
+{
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ sp->rs_refcnt++;
+ assert(sp->rs_refcnt > 0);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+}
+
+void
+rc_snapshot_rele(rc_snapshot_t *sp)
+{
+ int done;
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ assert(sp->rs_refcnt > 0);
+ sp->rs_refcnt--;
+ done = ((sp->rs_flags & RC_SNAPSHOT_DEAD) &&
+ sp->rs_refcnt == 0 && sp->rs_childref == 0);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+
+ if (done)
+ snapshot_free(sp);
+}
+
+void
+rc_snaplevel_hold(rc_snaplevel_t *lvl)
+{
+ rc_snapshot_t *sp = lvl->rsl_parent;
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ sp->rs_childref++;
+ assert(sp->rs_childref > 0);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+}
+
+void
+rc_snaplevel_rele(rc_snaplevel_t *lvl)
+{
+ int done;
+ rc_snapshot_t *sp = lvl->rsl_parent;
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ assert(sp->rs_childref > 0);
+ sp->rs_childref--;
+ done = ((sp->rs_flags & RC_SNAPSHOT_DEAD) &&
+ sp->rs_refcnt == 0 && sp->rs_childref == 0);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+
+ if (done)
+ snapshot_free(sp);
+}
+
+static snapshot_bucket_t *
+snapshot_hold_bucket(uint32_t snap_id)
+{
+ snapshot_bucket_t *bp = SNAPSHOT_BUCKET(snap_id);
+ (void) pthread_mutex_lock(&bp->sb_lock);
+ return (bp);
+}
+
+static void
+snapshot_rele_bucket(snapshot_bucket_t *bp)
+{
+ assert(MUTEX_HELD(&bp->sb_lock));
+ (void) pthread_mutex_unlock(&bp->sb_lock);
+}
+
+static rc_snapshot_t *
+snapshot_lookup_unlocked(snapshot_bucket_t *bp, uint32_t snap_id)
+{
+ rc_snapshot_t *sp;
+
+ assert(MUTEX_HELD(&bp->sb_lock));
+ assert(bp == SNAPSHOT_BUCKET(snap_id));
+
+ for (sp = bp->sb_head; sp != NULL; sp = sp->rs_hash_next) {
+ if (sp->rs_snap_id == snap_id) {
+ rc_snapshot_hold(sp);
+ return (sp);
+ }
+ }
+ return (NULL);
+}
+
+static void
+snapshot_insert_unlocked(snapshot_bucket_t *bp, rc_snapshot_t *sp)
+{
+ assert(MUTEX_HELD(&bp->sb_lock));
+ assert(bp == SNAPSHOT_BUCKET(sp->rs_snap_id));
+
+ assert(sp->rs_hash_next == NULL);
+
+ sp->rs_hash_next = bp->sb_head;
+ bp->sb_head = sp;
+}
+
+static void
+snapshot_remove_unlocked(snapshot_bucket_t *bp, rc_snapshot_t *sp)
+{
+ rc_snapshot_t **spp;
+
+ assert(MUTEX_HELD(&bp->sb_lock));
+ assert(bp == SNAPSHOT_BUCKET(sp->rs_snap_id));
+
+ assert(sp->rs_hash_next == NULL);
+
+ for (spp = &bp->sb_head; *spp != NULL; spp = &(*spp)->rs_hash_next)
+ if (*spp == sp)
+ break;
+
+ assert(*spp == sp);
+ *spp = sp->rs_hash_next;
+ sp->rs_hash_next = NULL;
+}
+
+/*
+ * Look up the snapshot with id snap_id in the hash table, or create it
+ * & populate it with its snaplevels if it's not in the hash table yet.
+ *
+ * Fails with
+ * _NO_RESOURCES
+ */
+int
+rc_snapshot_get(uint32_t snap_id, rc_snapshot_t **snpp)
+{
+ snapshot_bucket_t *bp;
+ rc_snapshot_t *sp;
+ int r;
+
+ bp = snapshot_hold_bucket(snap_id);
+ sp = snapshot_lookup_unlocked(bp, snap_id);
+ if (sp != NULL) {
+ snapshot_rele_bucket(bp);
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ while (sp->rs_flags & RC_SNAPSHOT_FILLING)
+ (void) pthread_cond_wait(&sp->rs_cv, &sp->rs_lock);
+
+ if (sp->rs_flags & RC_SNAPSHOT_DEAD) {
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+ rc_snapshot_rele(sp);
+ return (REP_PROTOCOL_FAIL_NO_RESOURCES);
+ }
+ assert(sp->rs_flags & RC_SNAPSHOT_READY);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+ *snpp = sp;
+ return (REP_PROTOCOL_SUCCESS);
+ }
+ sp = snapshot_alloc();
+ sp->rs_snap_id = snap_id;
+ sp->rs_flags |= RC_SNAPSHOT_FILLING;
+ snapshot_insert_unlocked(bp, sp);
+ snapshot_rele_bucket(bp);
+
+ /*
+ * Now fill in the snapshot tree
+ */
+ r = object_fill_snapshot(sp);
+ if (r != REP_PROTOCOL_SUCCESS) {
+ assert(r == REP_PROTOCOL_FAIL_NO_RESOURCES);
+
+ /*
+ * failed -- first remove it from the hash table, then kill it
+ */
+ bp = snapshot_hold_bucket(snap_id);
+ snapshot_remove_unlocked(bp, sp);
+ snapshot_rele_bucket(bp);
+
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ sp->rs_flags &= ~RC_SNAPSHOT_FILLING;
+ sp->rs_flags |= RC_SNAPSHOT_DEAD;
+ (void) pthread_cond_broadcast(&sp->rs_cv);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+ rc_snapshot_rele(sp); /* may free sp */
+ return (r);
+ }
+ (void) pthread_mutex_lock(&sp->rs_lock);
+ sp->rs_flags &= ~RC_SNAPSHOT_FILLING;
+ sp->rs_flags |= RC_SNAPSHOT_READY;
+ (void) pthread_cond_broadcast(&sp->rs_cv);
+ (void) pthread_mutex_unlock(&sp->rs_lock);
+ *snpp = sp;
+ return (REP_PROTOCOL_SUCCESS); /* pass on creation reference */
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/Makefile b/usr/src/cmd/svc/configd/sqlite/Makefile
new file mode 100644
index 0000000000..23ea95038e
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/Makefile
@@ -0,0 +1,305 @@
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+
+SQLITE_VERSION = 2.8.15-repcached
+
+LIBRARY = libsqlite.a
+RELOC = $(LIBRARY:%.a=%.o)
+
+VERS = .1
+OBJECTS = \
+ attach.o \
+ auth.o \
+ btree.o \
+ btree_rb.o \
+ build.o \
+ copy.o \
+ date.o \
+ delete.o \
+ encode.o \
+ expr.o \
+ func.o \
+ hash.o \
+ insert.o \
+ main.o \
+ opcodes.o \
+ os.o \
+ pager.o \
+ parse.o \
+ pragma.o \
+ printf.o \
+ random.o \
+ select.o \
+ table.o \
+ tokenize.o \
+ trigger.o \
+ update.o \
+ util.o \
+ vacuum.o \
+ vdbe.o \
+ vdbeaux.o \
+ where.o
+
+include $(SRC)/lib/Makefile.lib
+
+SRCDIR = src
+TOOLDIR = tool
+LIBS = $(RELOC) $(LINTLIB)
+
+$(LINTLIB) := SRCS = $(LINTSRC)
+
+SRCS = \
+ $(SRCDIR)/attach.c \
+ $(SRCDIR)/auth.c \
+ $(SRCDIR)/btree.c \
+ $(SRCDIR)/btree_rb.c \
+ $(SRCDIR)/build.c \
+ $(SRCDIR)/copy.c \
+ $(SRCDIR)/date.c \
+ $(SRCDIR)/delete.c \
+ $(SRCDIR)/encode.c \
+ $(SRCDIR)/expr.c \
+ $(SRCDIR)/func.c \
+ $(SRCDIR)/hash.c \
+ $(SRCDIR)/insert.c \
+ $(SRCDIR)/main.c \
+ opcodes.c \
+ $(SRCDIR)/os.c \
+ $(SRCDIR)/pager.c \
+ parse.c \
+ $(SRCDIR)/pragma.c \
+ $(SRCDIR)/printf.c \
+ $(SRCDIR)/random.c \
+ $(SRCDIR)/select.c \
+ $(SRCDIR)/table.c \
+ $(SRCDIR)/tokenize.c \
+ $(SRCDIR)/update.c \
+ $(SRCDIR)/util.c \
+ $(SRCDIR)/vacuum.c \
+ $(SRCDIR)/vdbe.c \
+ $(SRCDIR)/vdbeaux.c \
+ $(SRCDIR)/where.c \
+ $(SRCDIR)/trigger.c
+
+MYCPPFLAGS = -D_REENTRANT -DTHREADSAFE=1 -DHAVE_USLEEP=1 -I. -I$(SRCDIR)
+CPPFLAGS += $(MYCPPFLAGS)
+
+MAPFILE = mapfile-sqlite
+
+# Header files used by all library source files.
+#
+HDR = \
+ $(SRCDIR)/btree.h \
+ $(SRCDIR)/config.h \
+ $(SRCDIR)/hash.h \
+ opcodes.h \
+ $(SRCDIR)/os.h \
+ parse.h \
+ sqlite.h \
+ $(SRCDIR)/sqliteInt.h \
+ $(SRCDIR)/vdbe.h \
+ $(SRCDIR)/vdbeInt.h
+
+#
+# Sources used for test harness
+#
+TESTSRC = \
+ $(SRCDIR)/tclsqlite.c \
+ $(SRCDIR)/btree.c \
+ $(SRCDIR)/func.c \
+ $(SRCDIR)/os.c \
+ $(SRCDIR)/pager.c \
+ $(SRCDIR)/test1.c \
+ $(SRCDIR)/test2.c \
+ $(SRCDIR)/test3.c \
+ $(SRCDIR)/md5.c
+
+TESTOBJS = $(TESTSRC:$(SRCDIR)/%.c=%.o)
+
+TESTCLEAN = $(TESTOBJS) test.db test.tcl test1.bt test2.db testdb
+
+#
+# Native variants
+#
+NATIVERELOC = $(RELOC:%.o=%-native.o)
+NATIVEPROGS = lemon-build testfixture
+NATIVEOBJS = lemon.o $(OBJS:%.o=%-native.o)
+
+NATIVETARGETS = $(NATIVEPROGS) $(NATIVEOBJS) $(NATIVERELOC)
+
+$(NATIVETARGETS) := CC = $(NATIVECC)
+$(NATIVETARGETS) := LD = $(NATIVELD)
+$(NATIVETARGETS) := CFLAGS = $(NATIVE_CFLAGS)
+$(NATIVETARGETS) := CPPFLAGS = $(MYCPPFLAGS)
+$(NATIVETARGETS) := LDFLAGS =
+$(NATIVETARGETS) := LDLIBS = -lc
+
+$(OBJS) shell.o := CFLAGS += $(CTF_FLAGS)
+$(OBJS) shell.o := CTFCONVERT_POST = $(CTFCONVERT_O)
+
+TCLBASE = /usr/sfw
+TCLVERS = tcl8.3
+
+testfixture := MYCPPFLAGS += -I$(TCLBASE)/include -DTCLSH -DSQLITE_TEST=1
+#
+# work around compiler issues
+#
+testfixture := CFLAGS += \
+ -erroff=E_ARRAY_OF_INCOMPLETE \
+ -erroff=E_ARG_INCOMPATIBLE_WITH_ARG
+
+testfixture := LDLIBS += -R$(TCLBASE)/lib -L$(TCLBASE)/lib -l$(TCLVERS) -lm -ldl
+
+CLEANFILES += \
+ $(RELOC) \
+ $(LINTLIB) \
+ $(NATIVETARGETS) \
+ $(TESTCLEAN) \
+ lemon \
+ lemon.o \
+ lempar.c \
+ opcodes.c \
+ opcodes.h \
+ parse_tmp.c \
+ parse_tmp.h \
+ parse_tmp.out \
+ parse_tmp.y \
+ parse.c \
+ parse.h \
+ shell.o \
+ sqlite \
+ sqlite.h
+
+ENCODING = ISO8859
+
+.PARALLEL: $(OBJS) $(OBJS:%.o=%-native.o)
+.KEEP_STATE:
+
+SQLITE = sqlite
+
+ROOTLIBSVCBIN = $(ROOT)/lib/svc/bin
+ROOTSQLITE = $(ROOTLIBSVCBIN)/$(SQLITE)
+
+# This is the default Makefile target. The objects listed here
+# are what get build when you type just "make" with no arguments.
+#
+all: $(LIBS) $(SQLITE)
+install: all $(ROOTSQLITE)
+
+$(ROOTSQLITE) := FILEMODE = 555
+
+$(ROOTLIBSVCBIN)/%: %
+ $(INS.file)
+
+$(OBJS) $(OBJS:%.o=%-native.o): $(HDR)
+
+native: $(NATIVERELOC)
+
+$(RELOC): objs .WAIT $(OBJS)
+ $(LD) -r $(MAPFILE:%=-M%) -o $(RELOC) $(OBJS)
+ $(CTFMERGE) -t -f -L VERSION -o $(RELOC) $(OBJS)
+
+$(NATIVERELOC): objs .WAIT $(OBJS:%.o=%-native.o)
+ $(LD) -r $(MAPFILE:%=-M%) -o $(NATIVERELOC) $(OBJS:%.o=%-native.o)
+
+#
+# we don't want this output different every time, so we just suppress it
+#
+sqlite.h: $(SRCDIR)/sqlite.h.in
+ @echo "Generating $@"; \
+ sed -e 's"--VERS--"$(SQLITE_VERSION)-$(VERSION)"' \
+ -e s/--ENCODING--/$(ENCODING)/ \
+ $(SRCDIR)/sqlite.h.in > $@
+
+opcodes.h: $(SRCDIR)/vdbe.c
+ @echo "Generating $@"; \
+ $(RM) -f $@ ; \
+ echo '/* Automatically generated file. Do not edit */' > $@ ; \
+ grep '^case OP_' $(SRCDIR)/vdbe.c | \
+ sed -e 's/://' | \
+ awk '{printf "#define %-30s %3d\n", $$2, ++cnt}' >> $@
+
+opcodes.c: $(SRCDIR)/vdbe.c
+ @echo "Generating $@"; \
+ $(RM) -f $@ ; \
+ echo '/* Automatically generated file. Do not edit */' > $@ ; \
+ echo 'char *sqliteOpcodeNames[] = { "???", ' >> $@ ; \
+ grep '^case OP_' $(SRCDIR)/vdbe.c | \
+ sed -e 's/^.*OP_/ "/' -e 's/:.*$$/", /' >> $@ ; \
+ echo '};' >> $@
+
+#
+# We use a recursive invocation because otherwise pmake always rebuilds
+# everything, due to multiple expansions of "foo := A += B".
+#
+lemon: FRC
+ $(MAKE) lemon-build
+
+lemon-build: lemon.o $(TOOLDIR)/lempar.c
+ $(LINK.c) -o lemon lemon.o
+ $(RM) lempar.c
+ $(LN) -s $(TOOLDIR)/lempar.c lempar.c
+ $(RM) lemon-build
+ $(CP) lemon lemon-build
+
+shell.o: sqlite.h
+
+NES_MAPFILE= $(SRC)/cmd/mapfile_noexstk $(NX_MAP)
+
+sqlite: shell.o $(RELOC)
+ $(LINK.c) -o sqlite shell.o $(NES_MAPFILE:%=-M%) $(RELOC) -lrt
+ $(CTFMERGE) -t -L VERSION -o $@ shell.o $(RELOC)
+ $(POST_PROCESS)
+
+testfixture: FRC
+ @if [ -f $(TCLBASE)/include/tcl.h ]; then \
+ unset SUNPRO_DEPENDENCIES; \
+ echo $(LINK.c) -o testfixture $(TESTSRC) $(LIBRARY) $(LDLIBS) ;\
+ exec $(LINK.c) -o testfixture $(TESTSRC) $(LIBRARY) $(LDLIBS) ;\
+ else \
+ echo "$(TCLBASE)/include/tcl.h: not found."; \
+ exit 1; \
+ fi
+
+parse_tmp.out: $(SRCDIR)/parse.y lemon
+ $(RM) parse_tmp.y
+ $(CP) $(SRCDIR)/parse.y parse_tmp.y
+ ./lemon parse_tmp.y
+
+parse.h: parse_tmp.out
+ $(CP) parse_tmp.h parse.h
+
+parse.c: parse_tmp.out
+ $(CP) parse_tmp.c parse.c
+
+objs/%-native.o: $(SRCDIR)/%.c
+ $(COMPILE.c) -o $@ $<
+ $(POST_PROCESS_O)
+
+objs/%-native.o: %.c
+ $(COMPILE.c) -o $@ $<
+ $(POST_PROCESS_O)
+
+objs/parse-native.o: parse.c
+ $(COMPILE.c) -o $@ parse.c
+ $(POST_PROCESS_O)
+
+objs/%.o: %.c
+ $(COMPILE.c) -o $@ $<
+ $(POST_PROCESS_O)
+
+%.o: $(SRCDIR)/%.c
+ $(COMPILE.c) -o $@ $<
+ $(POST_PROCESS_O)
+
+%.o: $(TOOLDIR)/%.c
+ $(COMPILE.c) -o $@ $<
+ $(POST_PROCESS_O)
+
+include $(SRC)/lib/Makefile.targ
+
+FRC:
diff --git a/usr/src/cmd/svc/configd/sqlite/inc.flg b/usr/src/cmd/svc/configd/sqlite/inc.flg
new file mode 100644
index 0000000000..7c0ea43c86
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/inc.flg
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# pragma ident "%Z%%M% %I% %E% SMI"
+
+echo_file usr/src/lib/Makefile.lib
+echo_file usr/src/lib/Makefile.targ
diff --git a/usr/src/cmd/svc/configd/sqlite/llib-lsqlite b/usr/src/cmd/svc/configd/sqlite/llib-lsqlite
new file mode 100644
index 0000000000..220ae71518
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/llib-lsqlite
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/* LINTLIBRARY */
+/* PROTOLIB1 */
+
+#include "sqlite.h"
+#include "sqlite-misc.h"
diff --git a/usr/src/cmd/svc/configd/sqlite/main.mk b/usr/src/cmd/svc/configd/sqlite/main.mk
new file mode 100644
index 0000000000..ebcb86b2ee
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/main.mk
@@ -0,0 +1,448 @@
+#ident "%Z%%M% %I% %E% SMI"
+#
+###############################################################################
+# The following macros should be defined before this script is
+# invoked:
+#
+# TOP The toplevel directory of the source tree. This is the
+# directory that contains this "Makefile.in" and the
+# "configure.in" script.
+#
+# BCC C Compiler and options for use in building executables that
+# will run on the platform that is doing the build.
+#
+# USLEEP If the target operating system supports the "usleep()" system
+# call, then define the HAVE_USLEEP macro for all C modules.
+#
+# THREADSAFE If you want the SQLite library to be safe for use within a
+# multi-threaded program, then define the following macro
+# appropriately:
+#
+# THREADLIB Specify any extra linker options needed to make the library
+# thread safe
+#
+# OPTS Extra compiler command-line options.
+#
+# EXE The suffix to add to executable files. ".exe" for windows
+# and "" for Unix.
+#
+# TCC C Compiler and options for use in building executables that
+# will run on the target platform. This is usually the same
+# as BCC, unless you are cross-compiling.
+#
+# AR Tools used to build a static library.
+# RANLIB
+#
+# TCL_FLAGS Extra compiler options needed for programs that use the
+# TCL library.
+#
+# LIBTCL Linker options needed to link against the TCL library.
+#
+# READLINE_FLAGS Compiler options needed for programs that use the
+# readline() library.
+#
+# LIBREADLINE Linker options needed by programs using readline() must
+# link against.
+#
+# ENCODING "UTF8" or "ISO8859"
+#
+# Once the macros above are defined, the rest of this make script will
+# build the SQLite library and testing tools.
+################################################################################
+
+# This is how we compile
+#
+TCCX = $(TCC) $(OPTS) $(THREADSAFE) $(USLEEP) -I. -I$(TOP)/src
+
+# Object files for the SQLite library.
+#
+LIBOBJ = attach.o auth.o btree.o btree_rb.o build.o copy.o date.o delete.o \
+ expr.o func.o hash.o insert.o \
+ main.o opcodes.o os.o pager.o parse.o pragma.o printf.o random.o \
+ select.o table.o tokenize.o trigger.o update.o util.o \
+ vacuum.o vdbe.o vdbeaux.o where.o tclsqlite.o
+
+# All of the source code files.
+#
+SRC = \
+ $(TOP)/src/attach.c \
+ $(TOP)/src/auth.c \
+ $(TOP)/src/btree.c \
+ $(TOP)/src/btree.h \
+ $(TOP)/src/btree_rb.c \
+ $(TOP)/src/build.c \
+ $(TOP)/src/copy.c \
+ $(TOP)/src/date.c \
+ $(TOP)/src/delete.c \
+ $(TOP)/src/encode.c \
+ $(TOP)/src/expr.c \
+ $(TOP)/src/func.c \
+ $(TOP)/src/hash.c \
+ $(TOP)/src/hash.h \
+ $(TOP)/src/insert.c \
+ $(TOP)/src/main.c \
+ $(TOP)/src/os.c \
+ $(TOP)/src/pager.c \
+ $(TOP)/src/pager.h \
+ $(TOP)/src/parse.y \
+ $(TOP)/src/pragma.c \
+ $(TOP)/src/printf.c \
+ $(TOP)/src/random.c \
+ $(TOP)/src/select.c \
+ $(TOP)/src/shell.c \
+ $(TOP)/src/sqlite.h.in \
+ $(TOP)/src/sqliteInt.h \
+ $(TOP)/src/table.c \
+ $(TOP)/src/tclsqlite.c \
+ $(TOP)/src/tokenize.c \
+ $(TOP)/src/trigger.c \
+ $(TOP)/src/update.c \
+ $(TOP)/src/util.c \
+ $(TOP)/src/vacuum.c \
+ $(TOP)/src/vdbe.c \
+ $(TOP)/src/vdbe.h \
+ $(TOP)/src/vdbeaux.c \
+ $(TOP)/src/vdbeInt.h \
+ $(TOP)/src/where.c
+
+# Source code to the test files.
+#
+TESTSRC = \
+ $(TOP)/src/btree.c \
+ $(TOP)/src/func.c \
+ $(TOP)/src/os.c \
+ $(TOP)/src/pager.c \
+ $(TOP)/src/test1.c \
+ $(TOP)/src/test2.c \
+ $(TOP)/src/test3.c \
+ $(TOP)/src/test4.c \
+ $(TOP)/src/vdbe.c \
+ $(TOP)/src/md5.c
+
+# Header files used by all library source files.
+#
+HDR = \
+ sqlite.h \
+ $(TOP)/src/btree.h \
+ config.h \
+ $(TOP)/src/hash.h \
+ opcodes.h \
+ $(TOP)/src/os.h \
+ $(TOP)/src/sqliteInt.h \
+ $(TOP)/src/vdbe.h \
+ parse.h
+
+# Header files used by the VDBE submodule
+#
+VDBEHDR = \
+ $(HDR) \
+ $(TOP)/src/vdbeInt.h
+
+# This is the default Makefile target. The objects listed here
+# are what get build when you type just "make" with no arguments.
+#
+all: sqlite.h config.h libsqlite.a sqlite$(EXE)
+
+# Generate the file "last_change" which contains the date of change
+# of the most recently modified source code file
+#
+last_change: $(SRC)
+ cat $(SRC) | grep '$$Id: ' | sort +4 | tail -1 \
+ | awk '{print $$5,$$6}' >last_change
+
+libsqlite.a: $(LIBOBJ)
+ $(AR) libsqlite.a $(LIBOBJ)
+ $(RANLIB) libsqlite.a
+
+sqlite$(EXE): $(TOP)/src/shell.c libsqlite.a sqlite.h
+ $(TCCX) $(READLINE_FLAGS) -o sqlite$(EXE) $(TOP)/src/shell.c \
+ libsqlite.a $(LIBREADLINE) $(THREADLIB)
+
+sqlite_analyzer$(EXE): $(TOP)/src/tclsqlite.c libsqlite.a $(TESTSRC) \
+ $(TOP)/tool/spaceanal.tcl
+ sed \
+ -e '/^#/d' \
+ -e 's,\\,\\\\,g' \
+ -e 's,",\\",g' \
+ -e 's,^,",' \
+ -e 's,$$,\\n",' \
+ $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h
+ $(TCCX) $(TCL_FLAGS) -DTCLSH=2 -DSQLITE_TEST=1 -static -o \
+ sqlite_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \
+ libsqlite.a $(LIBTCL)
+
+
+# This target creates a directory named "tsrc" and fills it with
+# copies of all of the C source code and header files needed to
+# build on the target system. Some of the C source code and header
+# files are automatically generated. This target takes care of
+# all that automatic generation.
+#
+target_source: $(SRC) $(VDBEHDR) opcodes.c
+ rm -rf tsrc
+ mkdir tsrc
+ cp $(SRC) $(VDBEHDR) tsrc
+ rm tsrc/sqlite.h.in tsrc/parse.y
+ cp parse.c opcodes.c tsrc
+
+# Rules to build the LEMON compiler generator
+#
+lemon: $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c
+ $(BCC) -o lemon $(TOP)/tool/lemon.c
+ cp $(TOP)/tool/lempar.c .
+
+btree.o: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h
+ $(TCCX) -c $(TOP)/src/btree.c
+
+btree_rb.o: $(TOP)/src/btree_rb.c $(HDR)
+ $(TCCX) -c $(TOP)/src/btree_rb.c
+
+build.o: $(TOP)/src/build.c $(HDR)
+ $(TCCX) -c $(TOP)/src/build.c
+
+main.o: $(TOP)/src/main.c $(HDR)
+ $(TCCX) -c $(TOP)/src/main.c
+
+pager.o: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h
+ $(TCCX) -c $(TOP)/src/pager.c
+
+opcodes.o: opcodes.c
+ $(TCCX) -c opcodes.c
+
+opcodes.c: $(TOP)/src/vdbe.c
+ echo '/* Automatically generated file. Do not edit */' >opcodes.c
+ echo 'char *sqliteOpcodeNames[] = { "???", ' >>opcodes.c
+ grep '^case OP_' $(TOP)/src/vdbe.c | \
+ sed -e 's/^.*OP_/ "/' -e 's/:.*$$/", /' >>opcodes.c
+ echo '};' >>opcodes.c
+
+opcodes.h: $(TOP)/src/vdbe.h
+ echo '/* Automatically generated file. Do not edit */' >opcodes.h
+ grep '^case OP_' $(TOP)/src/vdbe.c | \
+ sed -e 's/://' | \
+ awk '{printf "#define %-30s %3d\n", $$2, ++cnt}' >>opcodes.h
+
+os.o: $(TOP)/src/os.c $(HDR)
+ $(TCCX) -c $(TOP)/src/os.c
+
+parse.o: parse.c $(HDR)
+ $(TCCX) -c parse.c
+
+parse.h: parse.c
+
+parse.c: $(TOP)/src/parse.y lemon
+ cp $(TOP)/src/parse.y .
+ ./lemon parse.y
+
+# The config.h file will contain a single #define that tells us how
+# many bytes are in a pointer. This only works if a pointer is the
+# same size on the host as it is on the target. If you are cross-compiling
+# to a target with a different pointer size, you'll need to manually
+# configure the config.h file.
+#
+config.h:
+ echo '#include <stdio.h>' >temp.c
+ echo 'int main(){printf(' >>temp.c
+ echo '"#define SQLITE_PTR_SZ %d",sizeof(char*));' >>temp.c
+ echo 'exit(0);}' >>temp.c
+ $(BCC) -o temp temp.c
+ ./temp >config.h
+ echo >>config.h
+ rm -f temp.c temp
+
+sqlite.h: $(TOP)/src/sqlite.h.in
+ sed -e s/--VERS--/`cat ${TOP}/VERSION`/ \
+ -e s/--ENCODING--/$(ENCODING)/ \
+ $(TOP)/src/sqlite.h.in >sqlite.h
+
+tokenize.o: $(TOP)/src/tokenize.c $(HDR)
+ $(TCCX) -c $(TOP)/src/tokenize.c
+
+trigger.o: $(TOP)/src/trigger.c $(HDR)
+ $(TCCX) -c $(TOP)/src/trigger.c
+
+util.o: $(TOP)/src/util.c $(HDR)
+ $(TCCX) -c $(TOP)/src/util.c
+
+vacuum.o: $(TOP)/src/vacuum.c $(HDR)
+ $(TCCX) -c $(TOP)/src/vacuum.c
+
+vdbe.o: $(TOP)/src/vdbe.c $(VDBEHDR)
+ $(TCCX) -c $(TOP)/src/vdbe.c
+
+vdbeaux.o: $(TOP)/src/vdbeaux.c $(VDBEHDR)
+ $(TCCX) -c $(TOP)/src/vdbeaux.c
+
+where.o: $(TOP)/src/where.c $(HDR)
+ $(TCCX) -c $(TOP)/src/where.c
+
+copy.o: $(TOP)/src/copy.c $(HDR)
+ $(TCCX) -c $(TOP)/src/copy.c
+
+date.o: $(TOP)/src/date.c $(HDR)
+ $(TCCX) -c $(TOP)/src/date.c
+
+delete.o: $(TOP)/src/delete.c $(HDR)
+ $(TCCX) -c $(TOP)/src/delete.c
+
+encode.o: $(TOP)/src/encode.c
+ $(TCCX) -c $(TOP)/src/encode.c
+
+expr.o: $(TOP)/src/expr.c $(HDR)
+ $(TCCX) -c $(TOP)/src/expr.c
+
+func.o: $(TOP)/src/func.c $(HDR)
+ $(TCCX) -c $(TOP)/src/func.c
+
+hash.o: $(TOP)/src/hash.c $(HDR)
+ $(TCCX) -c $(TOP)/src/hash.c
+
+insert.o: $(TOP)/src/insert.c $(HDR)
+ $(TCCX) -c $(TOP)/src/insert.c
+
+random.o: $(TOP)/src/random.c $(HDR)
+ $(TCCX) -c $(TOP)/src/random.c
+
+select.o: $(TOP)/src/select.c $(HDR)
+ $(TCCX) -c $(TOP)/src/select.c
+
+table.o: $(TOP)/src/table.c $(HDR)
+ $(TCCX) -c $(TOP)/src/table.c
+
+update.o: $(TOP)/src/update.c $(HDR)
+ $(TCCX) -c $(TOP)/src/update.c
+
+tclsqlite.o: $(TOP)/src/tclsqlite.c $(HDR)
+ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/tclsqlite.c
+
+pragma.o: $(TOP)/src/pragma.c $(HDR)
+ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/pragma.c
+
+printf.o: $(TOP)/src/printf.c $(HDR)
+ $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/printf.c
+
+attach.o: $(TOP)/src/attach.c $(HDR)
+ $(TCCX) -c $(TOP)/src/attach.c
+
+auth.o: $(TOP)/src/auth.c $(HDR)
+ $(TCCX) -c $(TOP)/src/auth.c
+
+tclsqlite: $(TOP)/src/tclsqlite.c libsqlite.a
+ $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -o tclsqlite \
+ $(TOP)/src/tclsqlite.c libsqlite.a $(LIBTCL)
+
+testfixture$(EXE): $(TOP)/src/tclsqlite.c libsqlite.a $(TESTSRC)
+ $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -DSQLITE_TEST=1 -o testfixture$(EXE) \
+ $(TESTSRC) $(TOP)/src/tclsqlite.c \
+ libsqlite.a $(LIBTCL) $(THREADLIB)
+
+fulltest: testfixture$(EXE) sqlite$(EXE)
+ ./testfixture$(EXE) $(TOP)/test/all.test
+
+test: testfixture$(EXE) sqlite$(EXE)
+ ./testfixture$(EXE) $(TOP)/test/quick.test
+
+index.html: $(TOP)/www/index.tcl last_change
+ tclsh $(TOP)/www/index.tcl `cat $(TOP)/VERSION` >index.html
+
+sqlite.html: $(TOP)/www/sqlite.tcl
+ tclsh $(TOP)/www/sqlite.tcl >sqlite.html
+
+c_interface.html: $(TOP)/www/c_interface.tcl
+ tclsh $(TOP)/www/c_interface.tcl >c_interface.html
+
+changes.html: $(TOP)/www/changes.tcl
+ tclsh $(TOP)/www/changes.tcl >changes.html
+
+lang.html: $(TOP)/www/lang.tcl
+ tclsh $(TOP)/www/lang.tcl >lang.html
+
+vdbe.html: $(TOP)/www/vdbe.tcl
+ tclsh $(TOP)/www/vdbe.tcl >vdbe.html
+
+arch.html: $(TOP)/www/arch.tcl
+ tclsh $(TOP)/www/arch.tcl >arch.html
+
+arch.png: $(TOP)/www/arch.png
+ cp $(TOP)/www/arch.png .
+
+opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c
+ tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html
+
+mingw.html: $(TOP)/www/mingw.tcl
+ tclsh $(TOP)/www/mingw.tcl >mingw.html
+
+tclsqlite.html: $(TOP)/www/tclsqlite.tcl
+ tclsh $(TOP)/www/tclsqlite.tcl >tclsqlite.html
+
+speed.html: $(TOP)/www/speed.tcl
+ tclsh $(TOP)/www/speed.tcl >speed.html
+
+faq.html: $(TOP)/www/faq.tcl
+ tclsh $(TOP)/www/faq.tcl >faq.html
+
+formatchng.html: $(TOP)/www/formatchng.tcl
+ tclsh $(TOP)/www/formatchng.tcl >formatchng.html
+
+conflict.html: $(TOP)/www/conflict.tcl
+ tclsh $(TOP)/www/conflict.tcl >conflict.html
+
+download.html: $(TOP)/www/download.tcl
+ tclsh $(TOP)/www/download.tcl >download.html
+
+omitted.html: $(TOP)/www/omitted.tcl
+ tclsh $(TOP)/www/omitted.tcl >omitted.html
+
+datatypes.html: $(TOP)/www/datatypes.tcl
+ tclsh $(TOP)/www/datatypes.tcl >datatypes.html
+
+quickstart.html: $(TOP)/www/quickstart.tcl
+ tclsh $(TOP)/www/quickstart.tcl >quickstart.html
+
+fileformat.html: $(TOP)/www/fileformat.tcl
+ tclsh $(TOP)/www/fileformat.tcl >fileformat.html
+
+nulls.html: $(TOP)/www/nulls.tcl
+ tclsh $(TOP)/www/nulls.tcl >nulls.html
+
+
+# Files to be published on the website.
+#
+DOC = \
+ index.html \
+ sqlite.html \
+ changes.html \
+ lang.html \
+ opcode.html \
+ arch.html \
+ arch.png \
+ vdbe.html \
+ c_interface.html \
+ mingw.html \
+ tclsqlite.html \
+ download.html \
+ speed.html \
+ faq.html \
+ formatchng.html \
+ conflict.html \
+ omitted.html \
+ datatypes.html \
+ quickstart.html \
+ fileformat.html \
+ nulls.html
+
+doc: $(DOC)
+ mkdir -p doc
+ mv $(DOC) doc
+
+install: sqlite libsqlite.a sqlite.h
+ mv sqlite /usr/bin
+ mv libsqlite.a /usr/lib
+ mv sqlite.h /usr/include
+
+clean:
+ rm -f *.o sqlite libsqlite.a sqlite.h opcodes.*
+ rm -f lemon lempar.c parse.* sqlite*.tar.gz
+ rm -f $(PUBLISH)
+ rm -f *.da *.bb *.bbg gmon.out
+ rm -rf tsrc
diff --git a/usr/src/cmd/svc/configd/sqlite/mapfile-sqlite b/usr/src/cmd/svc/configd/sqlite/mapfile-sqlite
new file mode 100644
index 0000000000..43c22f5823
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/mapfile-sqlite
@@ -0,0 +1,64 @@
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+#
+# Defines the public interface to sqlite
+#
+
+{
+ global:
+ # exported functions
+ sqlite_open;
+ sqlite_close;
+ sqlite_exec;
+ sqlite_last_insert_rowid;
+ sqlite_changes;
+ sqlite_last_statement_changes;
+ sqlite_error_string;
+ sqlite_interrupt;
+ sqlite_complete;
+ sqlite_busy_handler;
+ sqlite_busy_timeout;
+ sqlite_get_table;
+ sqlite_free_table;
+ sqlite_exec_printf;
+ sqlite_exec_vprintf;
+ sqlite_get_table_printf;
+ sqlite_get_table_vprintf;
+ sqlite_mprintf;
+ sqlite_vmprintf;
+ sqlite_freemem;
+ sqlite_libversion;
+ sqlite_libencoding;
+ sqlite_create_function;
+ sqlite_create_aggregate;
+ sqlite_function_type;
+ sqlite_set_result_string;
+ sqlite_set_result_int;
+ sqlite_set_result_double;
+ sqlite_set_result_error;
+ sqlite_user_data;
+ sqlite_aggregate_context;
+ sqlite_aggregate_count;
+ sqlite_set_authorizer;
+ sqlite_trace;
+ sqlite_compile;
+ sqlite_step;
+ sqlite_finalize;
+ sqlite_reset;
+ sqlite_bind;
+ sqlite_progress_handler;
+ sqlite_commit_hook;
+ sqlite_encode_binary;
+ sqlite_decode_binary;
+
+ # exported data
+ sqlite_version;
+ sqlite_encoding;
+
+ local:
+ *;
+};
diff --git a/usr/src/cmd/svc/configd/sqlite/sqlite-misc.h b/usr/src/cmd/svc/configd/sqlite/sqlite-misc.h
new file mode 100644
index 0000000000..d2d2745ba0
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/sqlite-misc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SQLITE_MISC_H
+#define _SQLITE_MISC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * defines an extra temp directory to try first
+ */
+extern const char *sqlite_temp_directory;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SQLITE_MISC_H */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/attach.c b/usr/src/cmd/svc/configd/sqlite/src/attach.c
new file mode 100644
index 0000000000..c3e9ca3c1c
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/attach.c
@@ -0,0 +1,314 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the ATTACH and DETACH commands.
+**
+** $Id: attach.c,v 1.10.2.1 2004/05/07 01:46:01 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** This routine is called by the parser to process an ATTACH statement:
+**
+** ATTACH DATABASE filename AS dbname
+**
+** The pFilename and pDbname arguments are the tokens that define the
+** filename and dbname in the ATTACH statement.
+*/
+void sqliteAttach(Parse *pParse, Token *pFilename, Token *pDbname, Token *pKey){
+ Db *aNew;
+ int rc, i;
+ char *zFile, *zName;
+ sqlite *db;
+ Vdbe *v;
+
+ v = sqliteGetVdbe(pParse);
+ sqliteVdbeAddOp(v, OP_Halt, 0, 0);
+ if( pParse->explain ) return;
+ db = pParse->db;
+ if( db->file_format<4 ){
+ sqliteErrorMsg(pParse, "cannot attach auxiliary databases to an "
+ "older format master database", 0);
+ pParse->rc = SQLITE_ERROR;
+ return;
+ }
+ if( db->nDb>=MAX_ATTACHED+2 ){
+ sqliteErrorMsg(pParse, "too many attached databases - max %d",
+ MAX_ATTACHED);
+ pParse->rc = SQLITE_ERROR;
+ return;
+ }
+
+ zFile = 0;
+ sqliteSetNString(&zFile, pFilename->z, pFilename->n, 0);
+ if( zFile==0 ) return;
+ sqliteDequote(zFile);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( sqliteAuthCheck(pParse, SQLITE_ATTACH, zFile, 0, 0)!=SQLITE_OK ){
+ sqliteFree(zFile);
+ return;
+ }
+#endif /* SQLITE_OMIT_AUTHORIZATION */
+
+ zName = 0;
+ sqliteSetNString(&zName, pDbname->z, pDbname->n, 0);
+ if( zName==0 ) return;
+ sqliteDequote(zName);
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].zName && sqliteStrICmp(db->aDb[i].zName, zName)==0 ){
+ sqliteErrorMsg(pParse, "database %z is already in use", zName);
+ pParse->rc = SQLITE_ERROR;
+ sqliteFree(zFile);
+ return;
+ }
+ }
+
+ if( db->aDb==db->aDbStatic ){
+ aNew = sqliteMalloc( sizeof(db->aDb[0])*3 );
+ if( aNew==0 ) return;
+ memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2);
+ }else{
+ aNew = sqliteRealloc(db->aDb, sizeof(db->aDb[0])*(db->nDb+1) );
+ if( aNew==0 ) return;
+ }
+ db->aDb = aNew;
+ aNew = &db->aDb[db->nDb++];
+ memset(aNew, 0, sizeof(*aNew));
+ sqliteHashInit(&aNew->tblHash, SQLITE_HASH_STRING, 0);
+ sqliteHashInit(&aNew->idxHash, SQLITE_HASH_STRING, 0);
+ sqliteHashInit(&aNew->trigHash, SQLITE_HASH_STRING, 0);
+ sqliteHashInit(&aNew->aFKey, SQLITE_HASH_STRING, 1);
+ aNew->zName = zName;
+ rc = sqliteBtreeFactory(db, zFile, 0, MAX_PAGES, &aNew->pBt);
+ if( rc ){
+ sqliteErrorMsg(pParse, "unable to open database: %s", zFile);
+ }
+#if SQLITE_HAS_CODEC
+ {
+ extern int sqliteCodecAttach(sqlite*, int, void*, int);
+ char *zKey = 0;
+ int nKey;
+ if( pKey && pKey->z && pKey->n ){
+ sqliteSetNString(&zKey, pKey->z, pKey->n, 0);
+ sqliteDequote(zKey);
+ nKey = strlen(zKey);
+ }else{
+ zKey = 0;
+ nKey = 0;
+ }
+ sqliteCodecAttach(db, db->nDb-1, zKey, nKey);
+ }
+#endif
+ sqliteFree(zFile);
+ db->flags &= ~SQLITE_Initialized;
+ if( pParse->nErr ) return;
+ if( rc==SQLITE_OK ){
+ rc = sqliteInit(pParse->db, &pParse->zErrMsg);
+ }
+ if( rc ){
+ int i = db->nDb - 1;
+ assert( i>=2 );
+ if( db->aDb[i].pBt ){
+ sqliteBtreeClose(db->aDb[i].pBt);
+ db->aDb[i].pBt = 0;
+ }
+ sqliteResetInternalSchema(db, 0);
+ pParse->nErr++;
+ pParse->rc = SQLITE_ERROR;
+ }
+}
+
+/*
+** This routine is called by the parser to process a DETACH statement:
+**
+** DETACH DATABASE dbname
+**
+** The pDbname argument is the name of the database in the DETACH statement.
+*/
+void sqliteDetach(Parse *pParse, Token *pDbname){
+ int i;
+ sqlite *db;
+ Vdbe *v;
+ Db *pDb;
+
+ v = sqliteGetVdbe(pParse);
+ sqliteVdbeAddOp(v, OP_Halt, 0, 0);
+ if( pParse->explain ) return;
+ db = pParse->db;
+ for(i=0; i<db->nDb; i++){
+ pDb = &db->aDb[i];
+ if( pDb->pBt==0 || pDb->zName==0 ) continue;
+ if( strlen(pDb->zName)!=pDbname->n ) continue;
+ if( sqliteStrNICmp(pDb->zName, pDbname->z, pDbname->n)==0 ) break;
+ }
+ if( i>=db->nDb ){
+ sqliteErrorMsg(pParse, "no such database: %T", pDbname);
+ return;
+ }
+ if( i<2 ){
+ sqliteErrorMsg(pParse, "cannot detach database %T", pDbname);
+ return;
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( sqliteAuthCheck(pParse,SQLITE_DETACH,db->aDb[i].zName,0,0)!=SQLITE_OK ){
+ return;
+ }
+#endif /* SQLITE_OMIT_AUTHORIZATION */
+ sqliteBtreeClose(pDb->pBt);
+ pDb->pBt = 0;
+ sqliteFree(pDb->zName);
+ sqliteResetInternalSchema(db, i);
+ if( pDb->pAux && pDb->xFreeAux ) pDb->xFreeAux(pDb->pAux);
+ db->nDb--;
+ if( i<db->nDb ){
+ db->aDb[i] = db->aDb[db->nDb];
+ memset(&db->aDb[db->nDb], 0, sizeof(db->aDb[0]));
+ sqliteResetInternalSchema(db, i);
+ }
+}
+
+/*
+** Initialize a DbFixer structure. This routine must be called prior
+** to passing the structure to one of the sqliteFixAAAA() routines below.
+**
+** The return value indicates whether or not fixation is required. TRUE
+** means we do need to fix the database references, FALSE means we do not.
+*/
+int sqliteFixInit(
+ DbFixer *pFix, /* The fixer to be initialized */
+ Parse *pParse, /* Error messages will be written here */
+ int iDb, /* This is the database that must must be used */
+ const char *zType, /* "view", "trigger", or "index" */
+ const Token *pName /* Name of the view, trigger, or index */
+){
+ sqlite *db;
+
+ if( iDb<0 || iDb==1 ) return 0;
+ db = pParse->db;
+ assert( db->nDb>iDb );
+ pFix->pParse = pParse;
+ pFix->zDb = db->aDb[iDb].zName;
+ pFix->zType = zType;
+ pFix->pName = pName;
+ return 1;
+}
+
+/*
+** The following set of routines walk through the parse tree and assign
+** a specific database to all table references where the database name
+** was left unspecified in the original SQL statement. The pFix structure
+** must have been initialized by a prior call to sqliteFixInit().
+**
+** These routines are used to make sure that an index, trigger, or
+** view in one database does not refer to objects in a different database.
+** (Exception: indices, triggers, and views in the TEMP database are
+** allowed to refer to anything.) If a reference is explicitly made
+** to an object in a different database, an error message is added to
+** pParse->zErrMsg and these routines return non-zero. If everything
+** checks out, these routines return 0.
+*/
+int sqliteFixSrcList(
+ DbFixer *pFix, /* Context of the fixation */
+ SrcList *pList /* The Source list to check and modify */
+){
+ int i;
+ const char *zDb;
+
+ if( pList==0 ) return 0;
+ zDb = pFix->zDb;
+ for(i=0; i<pList->nSrc; i++){
+ if( pList->a[i].zDatabase==0 ){
+ pList->a[i].zDatabase = sqliteStrDup(zDb);
+ }else if( sqliteStrICmp(pList->a[i].zDatabase,zDb)!=0 ){
+ sqliteErrorMsg(pFix->pParse,
+ "%s %z cannot reference objects in database %s",
+ pFix->zType, sqliteStrNDup(pFix->pName->z, pFix->pName->n),
+ pList->a[i].zDatabase);
+ return 1;
+ }
+ if( sqliteFixSelect(pFix, pList->a[i].pSelect) ) return 1;
+ if( sqliteFixExpr(pFix, pList->a[i].pOn) ) return 1;
+ }
+ return 0;
+}
+int sqliteFixSelect(
+ DbFixer *pFix, /* Context of the fixation */
+ Select *pSelect /* The SELECT statement to be fixed to one database */
+){
+ while( pSelect ){
+ if( sqliteFixExprList(pFix, pSelect->pEList) ){
+ return 1;
+ }
+ if( sqliteFixSrcList(pFix, pSelect->pSrc) ){
+ return 1;
+ }
+ if( sqliteFixExpr(pFix, pSelect->pWhere) ){
+ return 1;
+ }
+ if( sqliteFixExpr(pFix, pSelect->pHaving) ){
+ return 1;
+ }
+ pSelect = pSelect->pPrior;
+ }
+ return 0;
+}
+int sqliteFixExpr(
+ DbFixer *pFix, /* Context of the fixation */
+ Expr *pExpr /* The expression to be fixed to one database */
+){
+ while( pExpr ){
+ if( sqliteFixSelect(pFix, pExpr->pSelect) ){
+ return 1;
+ }
+ if( sqliteFixExprList(pFix, pExpr->pList) ){
+ return 1;
+ }
+ if( sqliteFixExpr(pFix, pExpr->pRight) ){
+ return 1;
+ }
+ pExpr = pExpr->pLeft;
+ }
+ return 0;
+}
+int sqliteFixExprList(
+ DbFixer *pFix, /* Context of the fixation */
+ ExprList *pList /* The expression to be fixed to one database */
+){
+ int i;
+ if( pList==0 ) return 0;
+ for(i=0; i<pList->nExpr; i++){
+ if( sqliteFixExpr(pFix, pList->a[i].pExpr) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+int sqliteFixTriggerStep(
+ DbFixer *pFix, /* Context of the fixation */
+ TriggerStep *pStep /* The trigger step be fixed to one database */
+){
+ while( pStep ){
+ if( sqliteFixSelect(pFix, pStep->pSelect) ){
+ return 1;
+ }
+ if( sqliteFixExpr(pFix, pStep->pWhere) ){
+ return 1;
+ }
+ if( sqliteFixExprList(pFix, pStep->pExprList) ){
+ return 1;
+ }
+ pStep = pStep->pNext;
+ }
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/auth.c b/usr/src/cmd/svc/configd/sqlite/src/auth.c
new file mode 100644
index 0000000000..0fb7a47658
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/auth.c
@@ -0,0 +1,222 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 January 11
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the sqlite_set_authorizer()
+** API. This facility is an optional feature of the library. Embedded
+** systems that do not need this facility may omit it by recompiling
+** the library with -DSQLITE_OMIT_AUTHORIZATION=1
+**
+** $Id: auth.c,v 1.12.2.1 2004/06/14 11:58:37 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** All of the code in this file may be omitted by defining a single
+** macro.
+*/
+#ifndef SQLITE_OMIT_AUTHORIZATION
+
+/*
+** Set or clear the access authorization function.
+**
+** The access authorization function is be called during the compilation
+** phase to verify that the user has read and/or write access permission on
+** various fields of the database. The first argument to the auth function
+** is a copy of the 3rd argument to this routine. The second argument
+** to the auth function is one of these constants:
+**
+** SQLITE_COPY
+** SQLITE_CREATE_INDEX
+** SQLITE_CREATE_TABLE
+** SQLITE_CREATE_TEMP_INDEX
+** SQLITE_CREATE_TEMP_TABLE
+** SQLITE_CREATE_TEMP_TRIGGER
+** SQLITE_CREATE_TEMP_VIEW
+** SQLITE_CREATE_TRIGGER
+** SQLITE_CREATE_VIEW
+** SQLITE_DELETE
+** SQLITE_DROP_INDEX
+** SQLITE_DROP_TABLE
+** SQLITE_DROP_TEMP_INDEX
+** SQLITE_DROP_TEMP_TABLE
+** SQLITE_DROP_TEMP_TRIGGER
+** SQLITE_DROP_TEMP_VIEW
+** SQLITE_DROP_TRIGGER
+** SQLITE_DROP_VIEW
+** SQLITE_INSERT
+** SQLITE_PRAGMA
+** SQLITE_READ
+** SQLITE_SELECT
+** SQLITE_TRANSACTION
+** SQLITE_UPDATE
+**
+** The third and fourth arguments to the auth function are the name of
+** the table and the column that are being accessed. The auth function
+** should return either SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. If
+** SQLITE_OK is returned, it means that access is allowed. SQLITE_DENY
+** means that the SQL statement will never-run - the sqlite_exec() call
+** will return with an error. SQLITE_IGNORE means that the SQL statement
+** should run but attempts to read the specified column will return NULL
+** and attempts to write the column will be ignored.
+**
+** Setting the auth function to NULL disables this hook. The default
+** setting of the auth function is NULL.
+*/
+int sqlite_set_authorizer(
+ sqlite *db,
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
+ void *pArg
+){
+ db->xAuth = xAuth;
+ db->pAuthArg = pArg;
+ return SQLITE_OK;
+}
+
+/*
+** Write an error message into pParse->zErrMsg that explains that the
+** user-supplied authorization function returned an illegal value.
+*/
+static void sqliteAuthBadReturnCode(Parse *pParse, int rc){
+ sqliteErrorMsg(pParse, "illegal return value (%d) from the "
+ "authorization function - should be SQLITE_OK, SQLITE_IGNORE, "
+ "or SQLITE_DENY", rc);
+ pParse->rc = SQLITE_MISUSE;
+}
+
+/*
+** The pExpr should be a TK_COLUMN expression. The table referred to
+** is in pTabList or else it is the NEW or OLD table of a trigger.
+** Check to see if it is OK to read this particular column.
+**
+** If the auth function returns SQLITE_IGNORE, change the TK_COLUMN
+** instruction into a TK_NULL. If the auth function returns SQLITE_DENY,
+** then generate an error.
+*/
+void sqliteAuthRead(
+ Parse *pParse, /* The parser context */
+ Expr *pExpr, /* The expression to check authorization on */
+ SrcList *pTabList /* All table that pExpr might refer to */
+){
+ sqlite *db = pParse->db;
+ int rc;
+ Table *pTab; /* The table being read */
+ const char *zCol; /* Name of the column of the table */
+ int iSrc; /* Index in pTabList->a[] of table being read */
+ const char *zDBase; /* Name of database being accessed */
+
+ if( db->xAuth==0 ) return;
+ assert( pExpr->op==TK_COLUMN );
+ for(iSrc=0; iSrc<pTabList->nSrc; iSrc++){
+ if( pExpr->iTable==pTabList->a[iSrc].iCursor ) break;
+ }
+ if( iSrc>=0 && iSrc<pTabList->nSrc ){
+ pTab = pTabList->a[iSrc].pTab;
+ }else{
+ /* This must be an attempt to read the NEW or OLD pseudo-tables
+ ** of a trigger.
+ */
+ TriggerStack *pStack; /* The stack of current triggers */
+ pStack = pParse->trigStack;
+ assert( pStack!=0 );
+ assert( pExpr->iTable==pStack->newIdx || pExpr->iTable==pStack->oldIdx );
+ pTab = pStack->pTab;
+ }
+ if( pTab==0 ) return;
+ if( pExpr->iColumn>=0 ){
+ assert( pExpr->iColumn<pTab->nCol );
+ zCol = pTab->aCol[pExpr->iColumn].zName;
+ }else if( pTab->iPKey>=0 ){
+ assert( pTab->iPKey<pTab->nCol );
+ zCol = pTab->aCol[pTab->iPKey].zName;
+ }else{
+ zCol = "ROWID";
+ }
+ assert( pExpr->iDb<db->nDb );
+ zDBase = db->aDb[pExpr->iDb].zName;
+ rc = db->xAuth(db->pAuthArg, SQLITE_READ, pTab->zName, zCol, zDBase,
+ pParse->zAuthContext);
+ if( rc==SQLITE_IGNORE ){
+ pExpr->op = TK_NULL;
+ }else if( rc==SQLITE_DENY ){
+ if( db->nDb>2 || pExpr->iDb!=0 ){
+ sqliteErrorMsg(pParse, "access to %s.%s.%s is prohibited",
+ zDBase, pTab->zName, zCol);
+ }else{
+ sqliteErrorMsg(pParse, "access to %s.%s is prohibited", pTab->zName,zCol);
+ }
+ pParse->rc = SQLITE_AUTH;
+ }else if( rc!=SQLITE_OK ){
+ sqliteAuthBadReturnCode(pParse, rc);
+ }
+}
+
+/*
+** Do an authorization check using the code and arguments given. Return
+** either SQLITE_OK (zero) or SQLITE_IGNORE or SQLITE_DENY. If SQLITE_DENY
+** is returned, then the error count and error message in pParse are
+** modified appropriately.
+*/
+int sqliteAuthCheck(
+ Parse *pParse,
+ int code,
+ const char *zArg1,
+ const char *zArg2,
+ const char *zArg3
+){
+ sqlite *db = pParse->db;
+ int rc;
+
+ if( db->init.busy || db->xAuth==0 ){
+ return SQLITE_OK;
+ }
+ rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext);
+ if( rc==SQLITE_DENY ){
+ sqliteErrorMsg(pParse, "not authorized");
+ pParse->rc = SQLITE_AUTH;
+ }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){
+ rc = SQLITE_DENY;
+ sqliteAuthBadReturnCode(pParse, rc);
+ }
+ return rc;
+}
+
+/*
+** Push an authorization context. After this routine is called, the
+** zArg3 argument to authorization callbacks will be zContext until
+** popped. Or if pParse==0, this routine is a no-op.
+*/
+void sqliteAuthContextPush(
+ Parse *pParse,
+ AuthContext *pContext,
+ const char *zContext
+){
+ pContext->pParse = pParse;
+ if( pParse ){
+ pContext->zAuthContext = pParse->zAuthContext;
+ pParse->zAuthContext = zContext;
+ }
+}
+
+/*
+** Pop an authorization context that was previously pushed
+** by sqliteAuthContextPush
+*/
+void sqliteAuthContextPop(AuthContext *pContext){
+ if( pContext->pParse ){
+ pContext->pParse->zAuthContext = pContext->zAuthContext;
+ pContext->pParse = 0;
+ }
+}
+
+#endif /* SQLITE_OMIT_AUTHORIZATION */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/btree.c b/usr/src/cmd/svc/configd/sqlite/src/btree.c
new file mode 100644
index 0000000000..f4b47fa6f9
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/btree.c
@@ -0,0 +1,3593 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** $Id: btree.c,v 1.103 2004/03/10 13:42:38 drh Exp $
+**
+** This file implements a external (disk-based) database using BTrees.
+** For a detailed discussion of BTrees, refer to
+**
+** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3:
+** "Sorting And Searching", pages 473-480. Addison-Wesley
+** Publishing Company, Reading, Massachusetts.
+**
+** The basic idea is that each page of the file contains N database
+** entries and N+1 pointers to subpages.
+**
+** ----------------------------------------------------------------
+** | Ptr(0) | Key(0) | Ptr(1) | Key(1) | ... | Key(N) | Ptr(N+1) |
+** ----------------------------------------------------------------
+**
+** All of the keys on the page that Ptr(0) points to have values less
+** than Key(0). All of the keys on page Ptr(1) and its subpages have
+** values greater than Key(0) and less than Key(1). All of the keys
+** on Ptr(N+1) and its subpages have values greater than Key(N). And
+** so forth.
+**
+** Finding a particular key requires reading O(log(M)) pages from the
+** disk where M is the number of entries in the tree.
+**
+** In this implementation, a single file can hold one or more separate
+** BTrees. Each BTree is identified by the index of its root page. The
+** key and data for any entry are combined to form the "payload". Up to
+** MX_LOCAL_PAYLOAD bytes of payload can be carried directly on the
+** database page. If the payload is larger than MX_LOCAL_PAYLOAD bytes
+** then surplus bytes are stored on overflow pages. The payload for an
+** entry and the preceding pointer are combined to form a "Cell". Each
+** page has a small header which contains the Ptr(N+1) pointer.
+**
+** The first page of the file contains a magic string used to verify that
+** the file really is a valid BTree database, a pointer to a list of unused
+** pages in the file, and some meta information. The root of the first
+** BTree begins on page 2 of the file. (Pages are numbered beginning with
+** 1, not 0.) Thus a minimum database contains 2 pages.
+*/
+#include "sqliteInt.h"
+#include "pager.h"
+#include "btree.h"
+#include <assert.h>
+
+/* Forward declarations */
+static BtOps sqliteBtreeOps;
+static BtCursorOps sqliteBtreeCursorOps;
+
+/*
+** Macros used for byteswapping. B is a pointer to the Btree
+** structure. This is needed to access the Btree.needSwab boolean
+** in order to tell if byte swapping is needed or not.
+** X is an unsigned integer. SWAB16 byte swaps a 16-bit integer.
+** SWAB32 byteswaps a 32-bit integer.
+*/
+#define SWAB16(B,X) ((B)->needSwab? swab16((u16)X) : ((u16)X))
+#define SWAB32(B,X) ((B)->needSwab? swab32(X) : (X))
+#define SWAB_ADD(B,X,A) \
+ if((B)->needSwab){ X=swab32(swab32(X)+A); }else{ X += (A); }
+
+/*
+** The following global variable - available only if SQLITE_TEST is
+** defined - is used to determine whether new databases are created in
+** native byte order or in non-native byte order. Non-native byte order
+** databases are created for testing purposes only. Under normal operation,
+** only native byte-order databases should be created, but we should be
+** able to read or write existing databases regardless of the byteorder.
+*/
+#ifdef SQLITE_TEST
+int btree_native_byte_order = 1;
+#else
+# define btree_native_byte_order 1
+#endif
+
+/*
+** Forward declarations of structures used only in this file.
+*/
+typedef struct PageOne PageOne;
+typedef struct MemPage MemPage;
+typedef struct PageHdr PageHdr;
+typedef struct Cell Cell;
+typedef struct CellHdr CellHdr;
+typedef struct FreeBlk FreeBlk;
+typedef struct OverflowPage OverflowPage;
+typedef struct FreelistInfo FreelistInfo;
+
+/*
+** All structures on a database page are aligned to 4-byte boundries.
+** This routine rounds up a number of bytes to the next multiple of 4.
+**
+** This might need to change for computer architectures that require
+** and 8-byte alignment boundry for structures.
+*/
+#define ROUNDUP(X) ((X+3) & ~3)
+
+/*
+** This is a magic string that appears at the beginning of every
+** SQLite database in order to identify the file as a real database.
+*/
+static const char zMagicHeader[] =
+ "** This file contains an SQLite 2.1 database **";
+#define MAGIC_SIZE (sizeof(zMagicHeader))
+
+/*
+** This is a magic integer also used to test the integrity of the database
+** file. This integer is used in addition to the string above so that
+** if the file is written on a little-endian architecture and read
+** on a big-endian architectures (or vice versa) we can detect the
+** problem.
+**
+** The number used was obtained at random and has no special
+** significance other than the fact that it represents a different
+** integer on little-endian and big-endian machines.
+*/
+#define MAGIC 0xdae37528
+
+/*
+** The first page of the database file contains a magic header string
+** to identify the file as an SQLite database file. It also contains
+** a pointer to the first free page of the file. Page 2 contains the
+** root of the principle BTree. The file might contain other BTrees
+** rooted on pages above 2.
+**
+** The first page also contains SQLITE_N_BTREE_META integers that
+** can be used by higher-level routines.
+**
+** Remember that pages are numbered beginning with 1. (See pager.c
+** for additional information.) Page 0 does not exist and a page
+** number of 0 is used to mean "no such page".
+*/
+struct PageOne {
+ char zMagic[MAGIC_SIZE]; /* String that identifies the file as a database */
+ int iMagic; /* Integer to verify correct byte order */
+ Pgno freeList; /* First free page in a list of all free pages */
+ int nFree; /* Number of pages on the free list */
+ int aMeta[SQLITE_N_BTREE_META-1]; /* User defined integers */
+};
+
+/*
+** Each database page has a header that is an instance of this
+** structure.
+**
+** PageHdr.firstFree is 0 if there is no free space on this page.
+** Otherwise, PageHdr.firstFree is the index in MemPage.u.aDisk[] of a
+** FreeBlk structure that describes the first block of free space.
+** All free space is defined by a linked list of FreeBlk structures.
+**
+** Data is stored in a linked list of Cell structures. PageHdr.firstCell
+** is the index into MemPage.u.aDisk[] of the first cell on the page. The
+** Cells are kept in sorted order.
+**
+** A Cell contains all information about a database entry and a pointer
+** to a child page that contains other entries less than itself. In
+** other words, the i-th Cell contains both Ptr(i) and Key(i). The
+** right-most pointer of the page is contained in PageHdr.rightChild.
+*/
+struct PageHdr {
+ Pgno rightChild; /* Child page that comes after all cells on this page */
+ u16 firstCell; /* Index in MemPage.u.aDisk[] of the first cell */
+ u16 firstFree; /* Index in MemPage.u.aDisk[] of the first free block */
+};
+
+/*
+** Entries on a page of the database are called "Cells". Each Cell
+** has a header and data. This structure defines the header. The
+** key and data (collectively the "payload") follow this header on
+** the database page.
+**
+** A definition of the complete Cell structure is given below. The
+** header for the cell must be defined first in order to do some
+** of the sizing #defines that follow.
+*/
+struct CellHdr {
+ Pgno leftChild; /* Child page that comes before this cell */
+ u16 nKey; /* Number of bytes in the key */
+ u16 iNext; /* Index in MemPage.u.aDisk[] of next cell in sorted order */
+ u8 nKeyHi; /* Upper 8 bits of key size for keys larger than 64K bytes */
+ u8 nDataHi; /* Upper 8 bits of data size when the size is more than 64K */
+ u16 nData; /* Number of bytes of data */
+};
+
+/*
+** The key and data size are split into a lower 16-bit segment and an
+** upper 8-bit segment in order to pack them together into a smaller
+** space. The following macros reassembly a key or data size back
+** into an integer.
+*/
+#define NKEY(b,h) (SWAB16(b,h.nKey) + h.nKeyHi*65536)
+#define NDATA(b,h) (SWAB16(b,h.nData) + h.nDataHi*65536)
+
+/*
+** The minimum size of a complete Cell. The Cell must contain a header
+** and at least 4 bytes of payload.
+*/
+#define MIN_CELL_SIZE (sizeof(CellHdr)+4)
+
+/*
+** The maximum number of database entries that can be held in a single
+** page of the database.
+*/
+#define MX_CELL ((SQLITE_USABLE_SIZE-sizeof(PageHdr))/MIN_CELL_SIZE)
+
+/*
+** The amount of usable space on a single page of the BTree. This is the
+** page size minus the overhead of the page header.
+*/
+#define USABLE_SPACE (SQLITE_USABLE_SIZE - sizeof(PageHdr))
+
+/*
+** The maximum amount of payload (in bytes) that can be stored locally for
+** a database entry. If the entry contains more data than this, the
+** extra goes onto overflow pages.
+**
+** This number is chosen so that at least 4 cells will fit on every page.
+*/
+#define MX_LOCAL_PAYLOAD ((USABLE_SPACE/4-(sizeof(CellHdr)+sizeof(Pgno)))&~3)
+
+/*
+** Data on a database page is stored as a linked list of Cell structures.
+** Both the key and the data are stored in aPayload[]. The key always comes
+** first. The aPayload[] field grows as necessary to hold the key and data,
+** up to a maximum of MX_LOCAL_PAYLOAD bytes. If the size of the key and
+** data combined exceeds MX_LOCAL_PAYLOAD bytes, then Cell.ovfl is the
+** page number of the first overflow page.
+**
+** Though this structure is fixed in size, the Cell on the database
+** page varies in size. Every cell has a CellHdr and at least 4 bytes
+** of payload space. Additional payload bytes (up to the maximum of
+** MX_LOCAL_PAYLOAD) and the Cell.ovfl value are allocated only as
+** needed.
+*/
+struct Cell {
+ CellHdr h; /* The cell header */
+ char aPayload[MX_LOCAL_PAYLOAD]; /* Key and data */
+ Pgno ovfl; /* The first overflow page */
+};
+
+/*
+** Free space on a page is remembered using a linked list of the FreeBlk
+** structures. Space on a database page is allocated in increments of
+** at least 4 bytes and is always aligned to a 4-byte boundry. The
+** linked list of FreeBlks is always kept in order by address.
+*/
+struct FreeBlk {
+ u16 iSize; /* Number of bytes in this block of free space */
+ u16 iNext; /* Index in MemPage.u.aDisk[] of the next free block */
+};
+
+/*
+** The number of bytes of payload that will fit on a single overflow page.
+*/
+#define OVERFLOW_SIZE (SQLITE_USABLE_SIZE-sizeof(Pgno))
+
+/*
+** When the key and data for a single entry in the BTree will not fit in
+** the MX_LOCAL_PAYLOAD bytes of space available on the database page,
+** then all extra bytes are written to a linked list of overflow pages.
+** Each overflow page is an instance of the following structure.
+**
+** Unused pages in the database are also represented by instances of
+** the OverflowPage structure. The PageOne.freeList field is the
+** page number of the first page in a linked list of unused database
+** pages.
+*/
+struct OverflowPage {
+ Pgno iNext;
+ char aPayload[OVERFLOW_SIZE];
+};
+
+/*
+** The PageOne.freeList field points to a linked list of overflow pages
+** hold information about free pages. The aPayload section of each
+** overflow page contains an instance of the following structure. The
+** aFree[] array holds the page number of nFree unused pages in the disk
+** file.
+*/
+struct FreelistInfo {
+ int nFree;
+ Pgno aFree[(OVERFLOW_SIZE-sizeof(int))/sizeof(Pgno)];
+};
+
+/*
+** For every page in the database file, an instance of the following structure
+** is stored in memory. The u.aDisk[] array contains the raw bits read from
+** the disk. The rest is auxiliary information held in memory only. The
+** auxiliary info is only valid for regular database pages - it is not
+** used for overflow pages and pages on the freelist.
+**
+** Of particular interest in the auxiliary info is the apCell[] entry. Each
+** apCell[] entry is a pointer to a Cell structure in u.aDisk[]. The cells are
+** put in this array so that they can be accessed in constant time, rather
+** than in linear time which would be needed if we had to walk the linked
+** list on every access.
+**
+** Note that apCell[] contains enough space to hold up to two more Cells
+** than can possibly fit on one page. In the steady state, every apCell[]
+** points to memory inside u.aDisk[]. But in the middle of an insert
+** operation, some apCell[] entries may temporarily point to data space
+** outside of u.aDisk[]. This is a transient situation that is quickly
+** resolved. But while it is happening, it is possible for a database
+** page to hold as many as two more cells than it might otherwise hold.
+** The extra two entries in apCell[] are an allowance for this situation.
+**
+** The pParent field points back to the parent page. This allows us to
+** walk up the BTree from any leaf to the root. Care must be taken to
+** unref() the parent page pointer when this page is no longer referenced.
+** The pageDestructor() routine handles that chore.
+*/
+struct MemPage {
+ union u_page_data {
+ char aDisk[SQLITE_PAGE_SIZE]; /* Page data stored on disk */
+ PageHdr hdr; /* Overlay page header */
+ } u;
+ u8 isInit; /* True if auxiliary data is initialized */
+ u8 idxShift; /* True if apCell[] indices have changed */
+ u8 isOverfull; /* Some apCell[] points outside u.aDisk[] */
+ MemPage *pParent; /* The parent of this page. NULL for root */
+ int idxParent; /* Index in pParent->apCell[] of this node */
+ int nFree; /* Number of free bytes in u.aDisk[] */
+ int nCell; /* Number of entries on this page */
+ Cell *apCell[MX_CELL+2]; /* All data entires in sorted order */
+};
+
+/*
+** The in-memory image of a disk page has the auxiliary information appended
+** to the end. EXTRA_SIZE is the number of bytes of space needed to hold
+** that extra information.
+*/
+#define EXTRA_SIZE (sizeof(MemPage)-sizeof(union u_page_data))
+
+/*
+** Everything we need to know about an open database
+*/
+struct Btree {
+ BtOps *pOps; /* Function table */
+ Pager *pPager; /* The page cache */
+ BtCursor *pCursor; /* A list of all open cursors */
+ PageOne *page1; /* First page of the database */
+ u8 inTrans; /* True if a transaction is in progress */
+ u8 inCkpt; /* True if there is a checkpoint on the transaction */
+ u8 readOnly; /* True if the underlying file is readonly */
+ u8 needSwab; /* Need to byte-swapping */
+};
+typedef Btree Bt;
+
+/*
+** A cursor is a pointer to a particular entry in the BTree.
+** The entry is identified by its MemPage and the index in
+** MemPage.apCell[] of the entry.
+*/
+struct BtCursor {
+ BtCursorOps *pOps; /* Function table */
+ Btree *pBt; /* The Btree to which this cursor belongs */
+ BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */
+ BtCursor *pShared; /* Loop of cursors with the same root page */
+ Pgno pgnoRoot; /* The root page of this tree */
+ MemPage *pPage; /* Page that contains the entry */
+ int idx; /* Index of the entry in pPage->apCell[] */
+ u8 wrFlag; /* True if writable */
+ u8 eSkip; /* Determines if next step operation is a no-op */
+ u8 iMatch; /* compare result from last sqliteBtreeMoveto() */
+};
+
+/*
+** Legal values for BtCursor.eSkip.
+*/
+#define SKIP_NONE 0 /* Always step the cursor */
+#define SKIP_NEXT 1 /* The next sqliteBtreeNext() is a no-op */
+#define SKIP_PREV 2 /* The next sqliteBtreePrevious() is a no-op */
+#define SKIP_INVALID 3 /* Calls to Next() and Previous() are invalid */
+
+/* Forward declarations */
+static int fileBtreeCloseCursor(BtCursor *pCur);
+
+/*
+** Routines for byte swapping.
+*/
+u16 swab16(u16 x){
+ return ((x & 0xff)<<8) | ((x>>8)&0xff);
+}
+u32 swab32(u32 x){
+ return ((x & 0xff)<<24) | ((x & 0xff00)<<8) |
+ ((x>>8) & 0xff00) | ((x>>24)&0xff);
+}
+
+/*
+** Compute the total number of bytes that a Cell needs on the main
+** database page. The number returned includes the Cell header,
+** local payload storage, and the pointer to overflow pages (if
+** applicable). Additional space allocated on overflow pages
+** is NOT included in the value returned from this routine.
+*/
+static int cellSize(Btree *pBt, Cell *pCell){
+ int n = NKEY(pBt, pCell->h) + NDATA(pBt, pCell->h);
+ if( n>MX_LOCAL_PAYLOAD ){
+ n = MX_LOCAL_PAYLOAD + sizeof(Pgno);
+ }else{
+ n = ROUNDUP(n);
+ }
+ n += sizeof(CellHdr);
+ return n;
+}
+
+/*
+** Defragment the page given. All Cells are moved to the
+** beginning of the page and all free space is collected
+** into one big FreeBlk at the end of the page.
+*/
+static void defragmentPage(Btree *pBt, MemPage *pPage){
+ int pc, i, n;
+ FreeBlk *pFBlk;
+ char newPage[SQLITE_USABLE_SIZE];
+
+ assert( sqlitepager_iswriteable(pPage) );
+ assert( pPage->isInit );
+ pc = sizeof(PageHdr);
+ pPage->u.hdr.firstCell = SWAB16(pBt, pc);
+ memcpy(newPage, pPage->u.aDisk, pc);
+ for(i=0; i<pPage->nCell; i++){
+ Cell *pCell = pPage->apCell[i];
+
+ /* This routine should never be called on an overfull page. The
+ ** following asserts verify that constraint. */
+ assert( Addr(pCell) > Addr(pPage) );
+ assert( Addr(pCell) < Addr(pPage) + SQLITE_USABLE_SIZE );
+
+ n = cellSize(pBt, pCell);
+ pCell->h.iNext = SWAB16(pBt, pc + n);
+ memcpy(&newPage[pc], pCell, n);
+ pPage->apCell[i] = (Cell*)&pPage->u.aDisk[pc];
+ pc += n;
+ }
+ assert( pPage->nFree==SQLITE_USABLE_SIZE-pc );
+ memcpy(pPage->u.aDisk, newPage, pc);
+ if( pPage->nCell>0 ){
+ pPage->apCell[pPage->nCell-1]->h.iNext = 0;
+ }
+ pFBlk = (FreeBlk*)&pPage->u.aDisk[pc];
+ pFBlk->iSize = SWAB16(pBt, SQLITE_USABLE_SIZE - pc);
+ pFBlk->iNext = 0;
+ pPage->u.hdr.firstFree = SWAB16(pBt, pc);
+ memset(&pFBlk[1], 0, SQLITE_USABLE_SIZE - pc - sizeof(FreeBlk));
+}
+
+/*
+** Allocate nByte bytes of space on a page. nByte must be a
+** multiple of 4.
+**
+** Return the index into pPage->u.aDisk[] of the first byte of
+** the new allocation. Or return 0 if there is not enough free
+** space on the page to satisfy the allocation request.
+**
+** If the page contains nBytes of free space but does not contain
+** nBytes of contiguous free space, then this routine automatically
+** calls defragementPage() to consolidate all free space before
+** allocating the new chunk.
+*/
+static int allocateSpace(Btree *pBt, MemPage *pPage, int nByte){
+ FreeBlk *p;
+ u16 *pIdx;
+ int start;
+ int iSize;
+#ifndef NDEBUG
+ int cnt = 0;
+#endif
+
+ assert( sqlitepager_iswriteable(pPage) );
+ assert( nByte==ROUNDUP(nByte) );
+ assert( pPage->isInit );
+ if( pPage->nFree<nByte || pPage->isOverfull ) return 0;
+ pIdx = &pPage->u.hdr.firstFree;
+ p = (FreeBlk*)&pPage->u.aDisk[SWAB16(pBt, *pIdx)];
+ while( (iSize = SWAB16(pBt, p->iSize))<nByte ){
+ assert( cnt++ < SQLITE_USABLE_SIZE/4 );
+ if( p->iNext==0 ){
+ defragmentPage(pBt, pPage);
+ pIdx = &pPage->u.hdr.firstFree;
+ }else{
+ pIdx = &p->iNext;
+ }
+ p = (FreeBlk*)&pPage->u.aDisk[SWAB16(pBt, *pIdx)];
+ }
+ if( iSize==nByte ){
+ start = SWAB16(pBt, *pIdx);
+ *pIdx = p->iNext;
+ }else{
+ FreeBlk *pNew;
+ start = SWAB16(pBt, *pIdx);
+ pNew = (FreeBlk*)&pPage->u.aDisk[start + nByte];
+ pNew->iNext = p->iNext;
+ pNew->iSize = SWAB16(pBt, iSize - nByte);
+ *pIdx = SWAB16(pBt, start + nByte);
+ }
+ pPage->nFree -= nByte;
+ return start;
+}
+
+/*
+** Return a section of the MemPage.u.aDisk[] to the freelist.
+** The first byte of the new free block is pPage->u.aDisk[start]
+** and the size of the block is "size" bytes. Size must be
+** a multiple of 4.
+**
+** Most of the effort here is involved in coalesing adjacent
+** free blocks into a single big free block.
+*/
+static void freeSpace(Btree *pBt, MemPage *pPage, int start, int size){
+ int end = start + size;
+ u16 *pIdx, idx;
+ FreeBlk *pFBlk;
+ FreeBlk *pNew;
+ FreeBlk *pNext;
+ int iSize;
+
+ assert( sqlitepager_iswriteable(pPage) );
+ assert( size == ROUNDUP(size) );
+ assert( start == ROUNDUP(start) );
+ assert( pPage->isInit );
+ pIdx = &pPage->u.hdr.firstFree;
+ idx = SWAB16(pBt, *pIdx);
+ while( idx!=0 && idx<start ){
+ pFBlk = (FreeBlk*)&pPage->u.aDisk[idx];
+ iSize = SWAB16(pBt, pFBlk->iSize);
+ if( idx + iSize == start ){
+ pFBlk->iSize = SWAB16(pBt, iSize + size);
+ if( idx + iSize + size == SWAB16(pBt, pFBlk->iNext) ){
+ pNext = (FreeBlk*)&pPage->u.aDisk[idx + iSize + size];
+ if( pBt->needSwab ){
+ pFBlk->iSize = swab16((u16)swab16(pNext->iSize)+iSize+size);
+ }else{
+ pFBlk->iSize += pNext->iSize;
+ }
+ pFBlk->iNext = pNext->iNext;
+ }
+ pPage->nFree += size;
+ return;
+ }
+ pIdx = &pFBlk->iNext;
+ idx = SWAB16(pBt, *pIdx);
+ }
+ pNew = (FreeBlk*)&pPage->u.aDisk[start];
+ if( idx != end ){
+ pNew->iSize = SWAB16(pBt, size);
+ pNew->iNext = SWAB16(pBt, idx);
+ }else{
+ pNext = (FreeBlk*)&pPage->u.aDisk[idx];
+ pNew->iSize = SWAB16(pBt, size + SWAB16(pBt, pNext->iSize));
+ pNew->iNext = pNext->iNext;
+ }
+ *pIdx = SWAB16(pBt, start);
+ pPage->nFree += size;
+}
+
+/*
+** Initialize the auxiliary information for a disk block.
+**
+** The pParent parameter must be a pointer to the MemPage which
+** is the parent of the page being initialized. The root of the
+** BTree (usually page 2) has no parent and so for that page,
+** pParent==NULL.
+**
+** Return SQLITE_OK on success. If we see that the page does
+** not contain a well-formed database page, then return
+** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not
+** guarantee that the page is well-formed. It only shows that
+** we failed to detect any corruption.
+*/
+static int initPage(Bt *pBt, MemPage *pPage, Pgno pgnoThis, MemPage *pParent){
+ int idx; /* An index into pPage->u.aDisk[] */
+ Cell *pCell; /* A pointer to a Cell in pPage->u.aDisk[] */
+ FreeBlk *pFBlk; /* A pointer to a free block in pPage->u.aDisk[] */
+ int sz; /* The size of a Cell in bytes */
+ int freeSpace; /* Amount of free space on the page */
+
+ if( pPage->pParent ){
+ assert( pPage->pParent==pParent );
+ return SQLITE_OK;
+ }
+ if( pParent ){
+ pPage->pParent = pParent;
+ sqlitepager_ref(pParent);
+ }
+ if( pPage->isInit ) return SQLITE_OK;
+ pPage->isInit = 1;
+ pPage->nCell = 0;
+ freeSpace = USABLE_SPACE;
+ idx = SWAB16(pBt, pPage->u.hdr.firstCell);
+ while( idx!=0 ){
+ if( idx>SQLITE_USABLE_SIZE-MIN_CELL_SIZE ) goto page_format_error;
+ if( idx<sizeof(PageHdr) ) goto page_format_error;
+ if( idx!=ROUNDUP(idx) ) goto page_format_error;
+ pCell = (Cell*)&pPage->u.aDisk[idx];
+ sz = cellSize(pBt, pCell);
+ if( idx+sz > SQLITE_USABLE_SIZE ) goto page_format_error;
+ freeSpace -= sz;
+ pPage->apCell[pPage->nCell++] = pCell;
+ idx = SWAB16(pBt, pCell->h.iNext);
+ }
+ pPage->nFree = 0;
+ idx = SWAB16(pBt, pPage->u.hdr.firstFree);
+ while( idx!=0 ){
+ int iNext;
+ if( idx>SQLITE_USABLE_SIZE-sizeof(FreeBlk) ) goto page_format_error;
+ if( idx<sizeof(PageHdr) ) goto page_format_error;
+ pFBlk = (FreeBlk*)&pPage->u.aDisk[idx];
+ pPage->nFree += SWAB16(pBt, pFBlk->iSize);
+ iNext = SWAB16(pBt, pFBlk->iNext);
+ if( iNext>0 && iNext <= idx ) goto page_format_error;
+ idx = iNext;
+ }
+ if( pPage->nCell==0 && pPage->nFree==0 ){
+ /* As a special case, an uninitialized root page appears to be
+ ** an empty database */
+ return SQLITE_OK;
+ }
+ if( pPage->nFree!=freeSpace ) goto page_format_error;
+ return SQLITE_OK;
+
+page_format_error:
+ return SQLITE_CORRUPT;
+}
+
+/*
+** Set up a raw page so that it looks like a database page holding
+** no entries.
+*/
+static void zeroPage(Btree *pBt, MemPage *pPage){
+ PageHdr *pHdr;
+ FreeBlk *pFBlk;
+ assert( sqlitepager_iswriteable(pPage) );
+ memset(pPage, 0, SQLITE_USABLE_SIZE);
+ pHdr = &pPage->u.hdr;
+ pHdr->firstCell = 0;
+ pHdr->firstFree = SWAB16(pBt, sizeof(*pHdr));
+ pFBlk = (FreeBlk*)&pHdr[1];
+ pFBlk->iNext = 0;
+ pPage->nFree = SQLITE_USABLE_SIZE - sizeof(*pHdr);
+ pFBlk->iSize = SWAB16(pBt, pPage->nFree);
+ pPage->nCell = 0;
+ pPage->isOverfull = 0;
+}
+
+/*
+** This routine is called when the reference count for a page
+** reaches zero. We need to unref the pParent pointer when that
+** happens.
+*/
+static void pageDestructor(void *pData){
+ MemPage *pPage = (MemPage*)pData;
+ if( pPage->pParent ){
+ MemPage *pParent = pPage->pParent;
+ pPage->pParent = 0;
+ sqlitepager_unref(pParent);
+ }
+}
+
+/*
+** Open a new database.
+**
+** Actually, this routine just sets up the internal data structures
+** for accessing the database. We do not open the database file
+** until the first page is loaded.
+**
+** zFilename is the name of the database file. If zFilename is NULL
+** a new database with a random name is created. This randomly named
+** database file will be deleted when sqliteBtreeClose() is called.
+*/
+int sqliteBtreeOpen(
+ const char *zFilename, /* Name of the file containing the BTree database */
+ int omitJournal, /* if TRUE then do not journal this file */
+ int nCache, /* How many pages in the page cache */
+ Btree **ppBtree /* Pointer to new Btree object written here */
+){
+ Btree *pBt;
+ int rc;
+
+ /*
+ ** The following asserts make sure that structures used by the btree are
+ ** the right size. This is to guard against size changes that result
+ ** when compiling on a different architecture.
+ */
+ assert( sizeof(u32)==4 );
+ assert( sizeof(u16)==2 );
+ assert( sizeof(Pgno)==4 );
+ assert( sizeof(PageHdr)==8 );
+ assert( sizeof(CellHdr)==12 );
+ assert( sizeof(FreeBlk)==4 );
+ assert( sizeof(OverflowPage)==SQLITE_USABLE_SIZE );
+ assert( sizeof(FreelistInfo)==OVERFLOW_SIZE );
+ assert( sizeof(ptr)==sizeof(char*) );
+ assert( sizeof(uptr)==sizeof(ptr) );
+
+ pBt = sqliteMalloc( sizeof(*pBt) );
+ if( pBt==0 ){
+ *ppBtree = 0;
+ return SQLITE_NOMEM;
+ }
+ if( nCache<10 ) nCache = 10;
+ rc = sqlitepager_open(&pBt->pPager, zFilename, nCache, EXTRA_SIZE,
+ !omitJournal);
+ if( rc!=SQLITE_OK ){
+ if( pBt->pPager ) sqlitepager_close(pBt->pPager);
+ sqliteFree(pBt);
+ *ppBtree = 0;
+ return rc;
+ }
+ sqlitepager_set_destructor(pBt->pPager, pageDestructor);
+ pBt->pCursor = 0;
+ pBt->page1 = 0;
+ pBt->readOnly = sqlitepager_isreadonly(pBt->pPager);
+ pBt->pOps = &sqliteBtreeOps;
+ *ppBtree = pBt;
+ return SQLITE_OK;
+}
+
+/*
+** Close an open database and invalidate all cursors.
+*/
+static int fileBtreeClose(Btree *pBt){
+ while( pBt->pCursor ){
+ fileBtreeCloseCursor(pBt->pCursor);
+ }
+ sqlitepager_close(pBt->pPager);
+ sqliteFree(pBt);
+ return SQLITE_OK;
+}
+
+/*
+** Change the limit on the number of pages allowed in the cache.
+**
+** The maximum number of cache pages is set to the absolute
+** value of mxPage. If mxPage is negative, the pager will
+** operate asynchronously - it will not stop to do fsync()s
+** to insure data is written to the disk surface before
+** continuing. Transactions still work if synchronous is off,
+** and the database cannot be corrupted if this program
+** crashes. But if the operating system crashes or there is
+** an abrupt power failure when synchronous is off, the database
+** could be left in an inconsistent and unrecoverable state.
+** Synchronous is on by default so database corruption is not
+** normally a worry.
+*/
+static int fileBtreeSetCacheSize(Btree *pBt, int mxPage){
+ sqlitepager_set_cachesize(pBt->pPager, mxPage);
+ return SQLITE_OK;
+}
+
+/*
+** Change the way data is synced to disk in order to increase or decrease
+** how well the database resists damage due to OS crashes and power
+** failures. Level 1 is the same as asynchronous (no syncs() occur and
+** there is a high probability of damage) Level 2 is the default. There
+** is a very low but non-zero probability of damage. Level 3 reduces the
+** probability of damage to near zero but with a write performance reduction.
+*/
+static int fileBtreeSetSafetyLevel(Btree *pBt, int level){
+ sqlitepager_set_safety_level(pBt->pPager, level);
+ return SQLITE_OK;
+}
+
+/*
+** Get a reference to page1 of the database file. This will
+** also acquire a readlock on that file.
+**
+** SQLITE_OK is returned on success. If the file is not a
+** well-formed database file, then SQLITE_CORRUPT is returned.
+** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM
+** is returned if we run out of memory. SQLITE_PROTOCOL is returned
+** if there is a locking protocol violation.
+*/
+static int lockBtree(Btree *pBt){
+ int rc;
+ if( pBt->page1 ) return SQLITE_OK;
+ rc = sqlitepager_get(pBt->pPager, 1, (void**)&pBt->page1);
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Do some checking to help insure the file we opened really is
+ ** a valid database file.
+ */
+ if( sqlitepager_pagecount(pBt->pPager)>0 ){
+ PageOne *pP1 = pBt->page1;
+ if( strcmp(pP1->zMagic,zMagicHeader)!=0 ||
+ (pP1->iMagic!=MAGIC && swab32(pP1->iMagic)!=MAGIC) ){
+ rc = SQLITE_NOTADB;
+ goto page1_init_failed;
+ }
+ pBt->needSwab = pP1->iMagic!=MAGIC;
+ }
+ return rc;
+
+page1_init_failed:
+ sqlitepager_unref(pBt->page1);
+ pBt->page1 = 0;
+ return rc;
+}
+
+/*
+** If there are no outstanding cursors and we are not in the middle
+** of a transaction but there is a read lock on the database, then
+** this routine unrefs the first page of the database file which
+** has the effect of releasing the read lock.
+**
+** If there are any outstanding cursors, this routine is a no-op.
+**
+** If there is a transaction in progress, this routine is a no-op.
+*/
+static void unlockBtreeIfUnused(Btree *pBt){
+ if( pBt->inTrans==0 && pBt->pCursor==0 && pBt->page1!=0 ){
+ sqlitepager_unref(pBt->page1);
+ pBt->page1 = 0;
+ pBt->inTrans = 0;
+ pBt->inCkpt = 0;
+ }
+}
+
+/*
+** Create a new database by initializing the first two pages of the
+** file.
+*/
+static int newDatabase(Btree *pBt){
+ MemPage *pRoot;
+ PageOne *pP1;
+ int rc;
+ if( sqlitepager_pagecount(pBt->pPager)>1 ) return SQLITE_OK;
+ pP1 = pBt->page1;
+ rc = sqlitepager_write(pBt->page1);
+ if( rc ) return rc;
+ rc = sqlitepager_get(pBt->pPager, 2, (void**)&pRoot);
+ if( rc ) return rc;
+ rc = sqlitepager_write(pRoot);
+ if( rc ){
+ sqlitepager_unref(pRoot);
+ return rc;
+ }
+ strcpy(pP1->zMagic, zMagicHeader);
+ if( btree_native_byte_order ){
+ pP1->iMagic = MAGIC;
+ pBt->needSwab = 0;
+ }else{
+ pP1->iMagic = swab32(MAGIC);
+ pBt->needSwab = 1;
+ }
+ zeroPage(pBt, pRoot);
+ sqlitepager_unref(pRoot);
+ return SQLITE_OK;
+}
+
+/*
+** Attempt to start a new transaction.
+**
+** A transaction must be started before attempting any changes
+** to the database. None of the following routines will work
+** unless a transaction is started first:
+**
+** sqliteBtreeCreateTable()
+** sqliteBtreeCreateIndex()
+** sqliteBtreeClearTable()
+** sqliteBtreeDropTable()
+** sqliteBtreeInsert()
+** sqliteBtreeDelete()
+** sqliteBtreeUpdateMeta()
+*/
+static int fileBtreeBeginTrans(Btree *pBt){
+ int rc;
+ if( pBt->inTrans ) return SQLITE_ERROR;
+ if( pBt->readOnly ) return SQLITE_READONLY;
+ if( pBt->page1==0 ){
+ rc = lockBtree(pBt);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+ rc = sqlitepager_begin(pBt->page1);
+ if( rc==SQLITE_OK ){
+ rc = newDatabase(pBt);
+ }
+ if( rc==SQLITE_OK ){
+ pBt->inTrans = 1;
+ pBt->inCkpt = 0;
+ }else{
+ unlockBtreeIfUnused(pBt);
+ }
+ return rc;
+}
+
+/*
+** Commit the transaction currently in progress.
+**
+** This will release the write lock on the database file. If there
+** are no active cursors, it also releases the read lock.
+*/
+static int fileBtreeCommit(Btree *pBt){
+ int rc;
+ rc = pBt->readOnly ? SQLITE_OK : sqlitepager_commit(pBt->pPager);
+ pBt->inTrans = 0;
+ pBt->inCkpt = 0;
+ unlockBtreeIfUnused(pBt);
+ return rc;
+}
+
+/*
+** Rollback the transaction in progress. All cursors will be
+** invalided by this operation. Any attempt to use a cursor
+** that was open at the beginning of this operation will result
+** in an error.
+**
+** This will release the write lock on the database file. If there
+** are no active cursors, it also releases the read lock.
+*/
+static int fileBtreeRollback(Btree *pBt){
+ int rc;
+ BtCursor *pCur;
+ if( pBt->inTrans==0 ) return SQLITE_OK;
+ pBt->inTrans = 0;
+ pBt->inCkpt = 0;
+ rc = pBt->readOnly ? SQLITE_OK : sqlitepager_rollback(pBt->pPager);
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( pCur->pPage && pCur->pPage->isInit==0 ){
+ sqlitepager_unref(pCur->pPage);
+ pCur->pPage = 0;
+ }
+ }
+ unlockBtreeIfUnused(pBt);
+ return rc;
+}
+
+/*
+** Set the checkpoint for the current transaction. The checkpoint serves
+** as a sub-transaction that can be rolled back independently of the
+** main transaction. You must start a transaction before starting a
+** checkpoint. The checkpoint is ended automatically if the transaction
+** commits or rolls back.
+**
+** Only one checkpoint may be active at a time. It is an error to try
+** to start a new checkpoint if another checkpoint is already active.
+*/
+static int fileBtreeBeginCkpt(Btree *pBt){
+ int rc;
+ if( !pBt->inTrans || pBt->inCkpt ){
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ rc = pBt->readOnly ? SQLITE_OK : sqlitepager_ckpt_begin(pBt->pPager);
+ pBt->inCkpt = 1;
+ return rc;
+}
+
+
+/*
+** Commit a checkpoint to transaction currently in progress. If no
+** checkpoint is active, this is a no-op.
+*/
+static int fileBtreeCommitCkpt(Btree *pBt){
+ int rc;
+ if( pBt->inCkpt && !pBt->readOnly ){
+ rc = sqlitepager_ckpt_commit(pBt->pPager);
+ }else{
+ rc = SQLITE_OK;
+ }
+ pBt->inCkpt = 0;
+ return rc;
+}
+
+/*
+** Rollback the checkpoint to the current transaction. If there
+** is no active checkpoint or transaction, this routine is a no-op.
+**
+** All cursors will be invalided by this operation. Any attempt
+** to use a cursor that was open at the beginning of this operation
+** will result in an error.
+*/
+static int fileBtreeRollbackCkpt(Btree *pBt){
+ int rc;
+ BtCursor *pCur;
+ if( pBt->inCkpt==0 || pBt->readOnly ) return SQLITE_OK;
+ rc = sqlitepager_ckpt_rollback(pBt->pPager);
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( pCur->pPage && pCur->pPage->isInit==0 ){
+ sqlitepager_unref(pCur->pPage);
+ pCur->pPage = 0;
+ }
+ }
+ pBt->inCkpt = 0;
+ return rc;
+}
+
+/*
+** Create a new cursor for the BTree whose root is on the page
+** iTable. The act of acquiring a cursor gets a read lock on
+** the database file.
+**
+** If wrFlag==0, then the cursor can only be used for reading.
+** If wrFlag==1, then the cursor can be used for reading or for
+** writing if other conditions for writing are also met. These
+** are the conditions that must be met in order for writing to
+** be allowed:
+**
+** 1: The cursor must have been opened with wrFlag==1
+**
+** 2: No other cursors may be open with wrFlag==0 on the same table
+**
+** 3: The database must be writable (not on read-only media)
+**
+** 4: There must be an active transaction.
+**
+** Condition 2 warrants further discussion. If any cursor is opened
+** on a table with wrFlag==0, that prevents all other cursors from
+** writing to that table. This is a kind of "read-lock". When a cursor
+** is opened with wrFlag==0 it is guaranteed that the table will not
+** change as long as the cursor is open. This allows the cursor to
+** do a sequential scan of the table without having to worry about
+** entries being inserted or deleted during the scan. Cursors should
+** be opened with wrFlag==0 only if this read-lock property is needed.
+** That is to say, cursors should be opened with wrFlag==0 only if they
+** intend to use the sqliteBtreeNext() system call. All other cursors
+** should be opened with wrFlag==1 even if they never really intend
+** to write.
+**
+** No checking is done to make sure that page iTable really is the
+** root page of a b-tree. If it is not, then the cursor acquired
+** will not work correctly.
+*/
+static
+int fileBtreeCursor(Btree *pBt, int iTable, int wrFlag, BtCursor **ppCur){
+ int rc;
+ BtCursor *pCur, *pRing;
+
+ if( pBt->readOnly && wrFlag ){
+ *ppCur = 0;
+ return SQLITE_READONLY;
+ }
+ if( pBt->page1==0 ){
+ rc = lockBtree(pBt);
+ if( rc!=SQLITE_OK ){
+ *ppCur = 0;
+ return rc;
+ }
+ }
+ pCur = sqliteMalloc( sizeof(*pCur) );
+ if( pCur==0 ){
+ rc = SQLITE_NOMEM;
+ goto create_cursor_exception;
+ }
+ pCur->pgnoRoot = (Pgno)iTable;
+ rc = sqlitepager_get(pBt->pPager, pCur->pgnoRoot, (void**)&pCur->pPage);
+ if( rc!=SQLITE_OK ){
+ goto create_cursor_exception;
+ }
+ rc = initPage(pBt, pCur->pPage, pCur->pgnoRoot, 0);
+ if( rc!=SQLITE_OK ){
+ goto create_cursor_exception;
+ }
+ pCur->pOps = &sqliteBtreeCursorOps;
+ pCur->pBt = pBt;
+ pCur->wrFlag = wrFlag;
+ pCur->idx = 0;
+ pCur->eSkip = SKIP_INVALID;
+ pCur->pNext = pBt->pCursor;
+ if( pCur->pNext ){
+ pCur->pNext->pPrev = pCur;
+ }
+ pCur->pPrev = 0;
+ pRing = pBt->pCursor;
+ while( pRing && pRing->pgnoRoot!=pCur->pgnoRoot ){ pRing = pRing->pNext; }
+ if( pRing ){
+ pCur->pShared = pRing->pShared;
+ pRing->pShared = pCur;
+ }else{
+ pCur->pShared = pCur;
+ }
+ pBt->pCursor = pCur;
+ *ppCur = pCur;
+ return SQLITE_OK;
+
+create_cursor_exception:
+ *ppCur = 0;
+ if( pCur ){
+ if( pCur->pPage ) sqlitepager_unref(pCur->pPage);
+ sqliteFree(pCur);
+ }
+ unlockBtreeIfUnused(pBt);
+ return rc;
+}
+
+/*
+** Close a cursor. The read lock on the database file is released
+** when the last cursor is closed.
+*/
+static int fileBtreeCloseCursor(BtCursor *pCur){
+ Btree *pBt = pCur->pBt;
+ if( pCur->pPrev ){
+ pCur->pPrev->pNext = pCur->pNext;
+ }else{
+ pBt->pCursor = pCur->pNext;
+ }
+ if( pCur->pNext ){
+ pCur->pNext->pPrev = pCur->pPrev;
+ }
+ if( pCur->pPage ){
+ sqlitepager_unref(pCur->pPage);
+ }
+ if( pCur->pShared!=pCur ){
+ BtCursor *pRing = pCur->pShared;
+ while( pRing->pShared!=pCur ){ pRing = pRing->pShared; }
+ pRing->pShared = pCur->pShared;
+ }
+ unlockBtreeIfUnused(pBt);
+ sqliteFree(pCur);
+ return SQLITE_OK;
+}
+
+/*
+** Make a temporary cursor by filling in the fields of pTempCur.
+** The temporary cursor is not on the cursor list for the Btree.
+*/
+static void getTempCursor(BtCursor *pCur, BtCursor *pTempCur){
+ memcpy(pTempCur, pCur, sizeof(*pCur));
+ pTempCur->pNext = 0;
+ pTempCur->pPrev = 0;
+ if( pTempCur->pPage ){
+ sqlitepager_ref(pTempCur->pPage);
+ }
+}
+
+/*
+** Delete a temporary cursor such as was made by the CreateTemporaryCursor()
+** function above.
+*/
+static void releaseTempCursor(BtCursor *pCur){
+ if( pCur->pPage ){
+ sqlitepager_unref(pCur->pPage);
+ }
+}
+
+/*
+** Set *pSize to the number of bytes of key in the entry the
+** cursor currently points to. Always return SQLITE_OK.
+** Failure is not possible. If the cursor is not currently
+** pointing to an entry (which can happen, for example, if
+** the database is empty) then *pSize is set to 0.
+*/
+static int fileBtreeKeySize(BtCursor *pCur, int *pSize){
+ Cell *pCell;
+ MemPage *pPage;
+
+ pPage = pCur->pPage;
+ assert( pPage!=0 );
+ if( pCur->idx >= pPage->nCell ){
+ *pSize = 0;
+ }else{
+ pCell = pPage->apCell[pCur->idx];
+ *pSize = NKEY(pCur->pBt, pCell->h);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Read payload information from the entry that the pCur cursor is
+** pointing to. Begin reading the payload at "offset" and read
+** a total of "amt" bytes. Put the result in zBuf.
+**
+** This routine does not make a distinction between key and data.
+** It just reads bytes from the payload area.
+*/
+static int getPayload(BtCursor *pCur, int offset, int amt, char *zBuf){
+ char *aPayload;
+ Pgno nextPage;
+ int rc;
+ Btree *pBt = pCur->pBt;
+ assert( pCur!=0 && pCur->pPage!=0 );
+ assert( pCur->idx>=0 && pCur->idx<pCur->pPage->nCell );
+ aPayload = pCur->pPage->apCell[pCur->idx]->aPayload;
+ if( offset<MX_LOCAL_PAYLOAD ){
+ int a = amt;
+ if( a+offset>MX_LOCAL_PAYLOAD ){
+ a = MX_LOCAL_PAYLOAD - offset;
+ }
+ memcpy(zBuf, &aPayload[offset], a);
+ if( a==amt ){
+ return SQLITE_OK;
+ }
+ offset = 0;
+ zBuf += a;
+ amt -= a;
+ }else{
+ offset -= MX_LOCAL_PAYLOAD;
+ }
+ if( amt>0 ){
+ nextPage = SWAB32(pBt, pCur->pPage->apCell[pCur->idx]->ovfl);
+ }
+ while( amt>0 && nextPage ){
+ OverflowPage *pOvfl;
+ rc = sqlitepager_get(pBt->pPager, nextPage, (void**)&pOvfl);
+ if( rc!=0 ){
+ return rc;
+ }
+ nextPage = SWAB32(pBt, pOvfl->iNext);
+ if( offset<OVERFLOW_SIZE ){
+ int a = amt;
+ if( a + offset > OVERFLOW_SIZE ){
+ a = OVERFLOW_SIZE - offset;
+ }
+ memcpy(zBuf, &pOvfl->aPayload[offset], a);
+ offset = 0;
+ amt -= a;
+ zBuf += a;
+ }else{
+ offset -= OVERFLOW_SIZE;
+ }
+ sqlitepager_unref(pOvfl);
+ }
+ if( amt>0 ){
+ return SQLITE_CORRUPT;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Read part of the key associated with cursor pCur. A maximum
+** of "amt" bytes will be transfered into zBuf[]. The transfer
+** begins at "offset". The number of bytes actually read is
+** returned.
+**
+** Change: It used to be that the amount returned will be smaller
+** than the amount requested if there are not enough bytes in the key
+** to satisfy the request. But now, it must be the case that there
+** is enough data available to satisfy the request. If not, an exception
+** is raised. The change was made in an effort to boost performance
+** by eliminating unneeded tests.
+*/
+static int fileBtreeKey(BtCursor *pCur, int offset, int amt, char *zBuf){
+ MemPage *pPage;
+
+ assert( amt>=0 );
+ assert( offset>=0 );
+ assert( pCur->pPage!=0 );
+ pPage = pCur->pPage;
+ if( pCur->idx >= pPage->nCell ){
+ return 0;
+ }
+ assert( amt+offset <= NKEY(pCur->pBt, pPage->apCell[pCur->idx]->h) );
+ getPayload(pCur, offset, amt, zBuf);
+ return amt;
+}
+
+/*
+** Set *pSize to the number of bytes of data in the entry the
+** cursor currently points to. Always return SQLITE_OK.
+** Failure is not possible. If the cursor is not currently
+** pointing to an entry (which can happen, for example, if
+** the database is empty) then *pSize is set to 0.
+*/
+static int fileBtreeDataSize(BtCursor *pCur, int *pSize){
+ Cell *pCell;
+ MemPage *pPage;
+
+ pPage = pCur->pPage;
+ assert( pPage!=0 );
+ if( pCur->idx >= pPage->nCell ){
+ *pSize = 0;
+ }else{
+ pCell = pPage->apCell[pCur->idx];
+ *pSize = NDATA(pCur->pBt, pCell->h);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Read part of the data associated with cursor pCur. A maximum
+** of "amt" bytes will be transfered into zBuf[]. The transfer
+** begins at "offset". The number of bytes actually read is
+** returned. The amount returned will be smaller than the
+** amount requested if there are not enough bytes in the data
+** to satisfy the request.
+*/
+static int fileBtreeData(BtCursor *pCur, int offset, int amt, char *zBuf){
+ Cell *pCell;
+ MemPage *pPage;
+
+ assert( amt>=0 );
+ assert( offset>=0 );
+ assert( pCur->pPage!=0 );
+ pPage = pCur->pPage;
+ if( pCur->idx >= pPage->nCell ){
+ return 0;
+ }
+ pCell = pPage->apCell[pCur->idx];
+ assert( amt+offset <= NDATA(pCur->pBt, pCell->h) );
+ getPayload(pCur, offset + NKEY(pCur->pBt, pCell->h), amt, zBuf);
+ return amt;
+}
+
+/*
+** Compare an external key against the key on the entry that pCur points to.
+**
+** The external key is pKey and is nKey bytes long. The last nIgnore bytes
+** of the key associated with pCur are ignored, as if they do not exist.
+** (The normal case is for nIgnore to be zero in which case the entire
+** internal key is used in the comparison.)
+**
+** The comparison result is written to *pRes as follows:
+**
+** *pRes<0 This means pCur<pKey
+**
+** *pRes==0 This means pCur==pKey for all nKey bytes
+**
+** *pRes>0 This means pCur>pKey
+**
+** When one key is an exact prefix of the other, the shorter key is
+** considered less than the longer one. In order to be equal the
+** keys must be exactly the same length. (The length of the pCur key
+** is the actual key length minus nIgnore bytes.)
+*/
+static int fileBtreeKeyCompare(
+ BtCursor *pCur, /* Pointer to entry to compare against */
+ const void *pKey, /* Key to compare against entry that pCur points to */
+ int nKey, /* Number of bytes in pKey */
+ int nIgnore, /* Ignore this many bytes at the end of pCur */
+ int *pResult /* Write the result here */
+){
+ Pgno nextPage;
+ int n, c, rc, nLocal;
+ Cell *pCell;
+ Btree *pBt = pCur->pBt;
+ const char *zKey = (const char*)pKey;
+
+ assert( pCur->pPage );
+ assert( pCur->idx>=0 && pCur->idx<pCur->pPage->nCell );
+ pCell = pCur->pPage->apCell[pCur->idx];
+ nLocal = NKEY(pBt, pCell->h) - nIgnore;
+ if( nLocal<0 ) nLocal = 0;
+ n = nKey<nLocal ? nKey : nLocal;
+ if( n>MX_LOCAL_PAYLOAD ){
+ n = MX_LOCAL_PAYLOAD;
+ }
+ c = memcmp(pCell->aPayload, zKey, n);
+ if( c!=0 ){
+ *pResult = c;
+ return SQLITE_OK;
+ }
+ zKey += n;
+ nKey -= n;
+ nLocal -= n;
+ nextPage = SWAB32(pBt, pCell->ovfl);
+ while( nKey>0 && nLocal>0 ){
+ OverflowPage *pOvfl;
+ if( nextPage==0 ){
+ return SQLITE_CORRUPT;
+ }
+ rc = sqlitepager_get(pBt->pPager, nextPage, (void**)&pOvfl);
+ if( rc ){
+ return rc;
+ }
+ nextPage = SWAB32(pBt, pOvfl->iNext);
+ n = nKey<nLocal ? nKey : nLocal;
+ if( n>OVERFLOW_SIZE ){
+ n = OVERFLOW_SIZE;
+ }
+ c = memcmp(pOvfl->aPayload, zKey, n);
+ sqlitepager_unref(pOvfl);
+ if( c!=0 ){
+ *pResult = c;
+ return SQLITE_OK;
+ }
+ nKey -= n;
+ nLocal -= n;
+ zKey += n;
+ }
+ if( c==0 ){
+ c = nLocal - nKey;
+ }
+ *pResult = c;
+ return SQLITE_OK;
+}
+
+/*
+** Move the cursor down to a new child page. The newPgno argument is the
+** page number of the child page in the byte order of the disk image.
+*/
+static int moveToChild(BtCursor *pCur, int newPgno){
+ int rc;
+ MemPage *pNewPage;
+ Btree *pBt = pCur->pBt;
+
+ newPgno = SWAB32(pBt, newPgno);
+ rc = sqlitepager_get(pBt->pPager, newPgno, (void**)&pNewPage);
+ if( rc ) return rc;
+ rc = initPage(pBt, pNewPage, newPgno, pCur->pPage);
+ if( rc ) return rc;
+ assert( pCur->idx>=pCur->pPage->nCell
+ || pCur->pPage->apCell[pCur->idx]->h.leftChild==SWAB32(pBt,newPgno) );
+ assert( pCur->idx<pCur->pPage->nCell
+ || pCur->pPage->u.hdr.rightChild==SWAB32(pBt,newPgno) );
+ pNewPage->idxParent = pCur->idx;
+ pCur->pPage->idxShift = 0;
+ sqlitepager_unref(pCur->pPage);
+ pCur->pPage = pNewPage;
+ pCur->idx = 0;
+ if( pNewPage->nCell<1 ){
+ return SQLITE_CORRUPT;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Move the cursor up to the parent page.
+**
+** pCur->idx is set to the cell index that contains the pointer
+** to the page we are coming from. If we are coming from the
+** right-most child page then pCur->idx is set to one more than
+** the largest cell index.
+*/
+static void moveToParent(BtCursor *pCur){
+ Pgno oldPgno;
+ MemPage *pParent;
+ MemPage *pPage;
+ int idxParent;
+ pPage = pCur->pPage;
+ assert( pPage!=0 );
+ pParent = pPage->pParent;
+ assert( pParent!=0 );
+ idxParent = pPage->idxParent;
+ sqlitepager_ref(pParent);
+ sqlitepager_unref(pPage);
+ pCur->pPage = pParent;
+ assert( pParent->idxShift==0 );
+ if( pParent->idxShift==0 ){
+ pCur->idx = idxParent;
+#ifndef NDEBUG
+ /* Verify that pCur->idx is the correct index to point back to the child
+ ** page we just came from
+ */
+ oldPgno = SWAB32(pCur->pBt, sqlitepager_pagenumber(pPage));
+ if( pCur->idx<pParent->nCell ){
+ assert( pParent->apCell[idxParent]->h.leftChild==oldPgno );
+ }else{
+ assert( pParent->u.hdr.rightChild==oldPgno );
+ }
+#endif
+ }else{
+ /* The MemPage.idxShift flag indicates that cell indices might have
+ ** changed since idxParent was set and hence idxParent might be out
+ ** of date. So recompute the parent cell index by scanning all cells
+ ** and locating the one that points to the child we just came from.
+ */
+ int i;
+ pCur->idx = pParent->nCell;
+ oldPgno = SWAB32(pCur->pBt, sqlitepager_pagenumber(pPage));
+ for(i=0; i<pParent->nCell; i++){
+ if( pParent->apCell[i]->h.leftChild==oldPgno ){
+ pCur->idx = i;
+ break;
+ }
+ }
+ }
+}
+
+/*
+** Move the cursor to the root page
+*/
+static int moveToRoot(BtCursor *pCur){
+ MemPage *pNew;
+ int rc;
+ Btree *pBt = pCur->pBt;
+
+ rc = sqlitepager_get(pBt->pPager, pCur->pgnoRoot, (void**)&pNew);
+ if( rc ) return rc;
+ rc = initPage(pBt, pNew, pCur->pgnoRoot, 0);
+ if( rc ) return rc;
+ sqlitepager_unref(pCur->pPage);
+ pCur->pPage = pNew;
+ pCur->idx = 0;
+ return SQLITE_OK;
+}
+
+/*
+** Move the cursor down to the left-most leaf entry beneath the
+** entry to which it is currently pointing.
+*/
+static int moveToLeftmost(BtCursor *pCur){
+ Pgno pgno;
+ int rc;
+
+ while( (pgno = pCur->pPage->apCell[pCur->idx]->h.leftChild)!=0 ){
+ rc = moveToChild(pCur, pgno);
+ if( rc ) return rc;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Move the cursor down to the right-most leaf entry beneath the
+** page to which it is currently pointing. Notice the difference
+** between moveToLeftmost() and moveToRightmost(). moveToLeftmost()
+** finds the left-most entry beneath the *entry* whereas moveToRightmost()
+** finds the right-most entry beneath the *page*.
+*/
+static int moveToRightmost(BtCursor *pCur){
+ Pgno pgno;
+ int rc;
+
+ while( (pgno = pCur->pPage->u.hdr.rightChild)!=0 ){
+ pCur->idx = pCur->pPage->nCell;
+ rc = moveToChild(pCur, pgno);
+ if( rc ) return rc;
+ }
+ pCur->idx = pCur->pPage->nCell - 1;
+ return SQLITE_OK;
+}
+
+/* Move the cursor to the first entry in the table. Return SQLITE_OK
+** on success. Set *pRes to 0 if the cursor actually points to something
+** or set *pRes to 1 if the table is empty.
+*/
+static int fileBtreeFirst(BtCursor *pCur, int *pRes){
+ int rc;
+ if( pCur->pPage==0 ) return SQLITE_ABORT;
+ rc = moveToRoot(pCur);
+ if( rc ) return rc;
+ if( pCur->pPage->nCell==0 ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ *pRes = 0;
+ rc = moveToLeftmost(pCur);
+ pCur->eSkip = SKIP_NONE;
+ return rc;
+}
+
+/* Move the cursor to the last entry in the table. Return SQLITE_OK
+** on success. Set *pRes to 0 if the cursor actually points to something
+** or set *pRes to 1 if the table is empty.
+*/
+static int fileBtreeLast(BtCursor *pCur, int *pRes){
+ int rc;
+ if( pCur->pPage==0 ) return SQLITE_ABORT;
+ rc = moveToRoot(pCur);
+ if( rc ) return rc;
+ assert( pCur->pPage->isInit );
+ if( pCur->pPage->nCell==0 ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ *pRes = 0;
+ rc = moveToRightmost(pCur);
+ pCur->eSkip = SKIP_NONE;
+ return rc;
+}
+
+/* Move the cursor so that it points to an entry near pKey.
+** Return a success code.
+**
+** If an exact match is not found, then the cursor is always
+** left pointing at a leaf page which would hold the entry if it
+** were present. The cursor might point to an entry that comes
+** before or after the key.
+**
+** The result of comparing the key with the entry to which the
+** cursor is left pointing is stored in pCur->iMatch. The same
+** value is also written to *pRes if pRes!=NULL. The meaning of
+** this value is as follows:
+**
+** *pRes<0 The cursor is left pointing at an entry that
+** is smaller than pKey or if the table is empty
+** and the cursor is therefore left point to nothing.
+**
+** *pRes==0 The cursor is left pointing at an entry that
+** exactly matches pKey.
+**
+** *pRes>0 The cursor is left pointing at an entry that
+** is larger than pKey.
+*/
+static
+int fileBtreeMoveto(BtCursor *pCur, const void *pKey, int nKey, int *pRes){
+ int rc;
+ if( pCur->pPage==0 ) return SQLITE_ABORT;
+ pCur->eSkip = SKIP_NONE;
+ rc = moveToRoot(pCur);
+ if( rc ) return rc;
+ for(;;){
+ int lwr, upr;
+ Pgno chldPg;
+ MemPage *pPage = pCur->pPage;
+ int c = -1; /* pRes return if table is empty must be -1 */
+ lwr = 0;
+ upr = pPage->nCell-1;
+ while( lwr<=upr ){
+ pCur->idx = (lwr+upr)/2;
+ rc = fileBtreeKeyCompare(pCur, pKey, nKey, 0, &c);
+ if( rc ) return rc;
+ if( c==0 ){
+ pCur->iMatch = c;
+ if( pRes ) *pRes = 0;
+ return SQLITE_OK;
+ }
+ if( c<0 ){
+ lwr = pCur->idx+1;
+ }else{
+ upr = pCur->idx-1;
+ }
+ }
+ assert( lwr==upr+1 );
+ assert( pPage->isInit );
+ if( lwr>=pPage->nCell ){
+ chldPg = pPage->u.hdr.rightChild;
+ }else{
+ chldPg = pPage->apCell[lwr]->h.leftChild;
+ }
+ if( chldPg==0 ){
+ pCur->iMatch = c;
+ if( pRes ) *pRes = c;
+ return SQLITE_OK;
+ }
+ pCur->idx = lwr;
+ rc = moveToChild(pCur, chldPg);
+ if( rc ) return rc;
+ }
+ /* NOT REACHED */
+}
+
+/*
+** Advance the cursor to the next entry in the database. If
+** successful then set *pRes=0. If the cursor
+** was already pointing to the last entry in the database before
+** this routine was called, then set *pRes=1.
+*/
+static int fileBtreeNext(BtCursor *pCur, int *pRes){
+ int rc;
+ MemPage *pPage = pCur->pPage;
+ assert( pRes!=0 );
+ if( pPage==0 ){
+ *pRes = 1;
+ return SQLITE_ABORT;
+ }
+ assert( pPage->isInit );
+ assert( pCur->eSkip!=SKIP_INVALID );
+ if( pPage->nCell==0 ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ assert( pCur->idx<pPage->nCell );
+ if( pCur->eSkip==SKIP_NEXT ){
+ pCur->eSkip = SKIP_NONE;
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ pCur->eSkip = SKIP_NONE;
+ pCur->idx++;
+ if( pCur->idx>=pPage->nCell ){
+ if( pPage->u.hdr.rightChild ){
+ rc = moveToChild(pCur, pPage->u.hdr.rightChild);
+ if( rc ) return rc;
+ rc = moveToLeftmost(pCur);
+ *pRes = 0;
+ return rc;
+ }
+ do{
+ if( pPage->pParent==0 ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ moveToParent(pCur);
+ pPage = pCur->pPage;
+ }while( pCur->idx>=pPage->nCell );
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ *pRes = 0;
+ if( pPage->u.hdr.rightChild==0 ){
+ return SQLITE_OK;
+ }
+ rc = moveToLeftmost(pCur);
+ return rc;
+}
+
+/*
+** Step the cursor to the back to the previous entry in the database. If
+** successful then set *pRes=0. If the cursor
+** was already pointing to the first entry in the database before
+** this routine was called, then set *pRes=1.
+*/
+static int fileBtreePrevious(BtCursor *pCur, int *pRes){
+ int rc;
+ Pgno pgno;
+ MemPage *pPage;
+ pPage = pCur->pPage;
+ if( pPage==0 ){
+ *pRes = 1;
+ return SQLITE_ABORT;
+ }
+ assert( pPage->isInit );
+ assert( pCur->eSkip!=SKIP_INVALID );
+ if( pPage->nCell==0 ){
+ *pRes = 1;
+ return SQLITE_OK;
+ }
+ if( pCur->eSkip==SKIP_PREV ){
+ pCur->eSkip = SKIP_NONE;
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ pCur->eSkip = SKIP_NONE;
+ assert( pCur->idx>=0 );
+ if( (pgno = pPage->apCell[pCur->idx]->h.leftChild)!=0 ){
+ rc = moveToChild(pCur, pgno);
+ if( rc ) return rc;
+ rc = moveToRightmost(pCur);
+ }else{
+ while( pCur->idx==0 ){
+ if( pPage->pParent==0 ){
+ if( pRes ) *pRes = 1;
+ return SQLITE_OK;
+ }
+ moveToParent(pCur);
+ pPage = pCur->pPage;
+ }
+ pCur->idx--;
+ rc = SQLITE_OK;
+ }
+ *pRes = 0;
+ return rc;
+}
+
+/*
+** Allocate a new page from the database file.
+**
+** The new page is marked as dirty. (In other words, sqlitepager_write()
+** has already been called on the new page.) The new page has also
+** been referenced and the calling routine is responsible for calling
+** sqlitepager_unref() on the new page when it is done.
+**
+** SQLITE_OK is returned on success. Any other return value indicates
+** an error. *ppPage and *pPgno are undefined in the event of an error.
+** Do not invoke sqlitepager_unref() on *ppPage if an error is returned.
+**
+** If the "nearby" parameter is not 0, then a (feeble) effort is made to
+** locate a page close to the page number "nearby". This can be used in an
+** attempt to keep related pages close to each other in the database file,
+** which in turn can make database access faster.
+*/
+static int allocatePage(Btree *pBt, MemPage **ppPage, Pgno *pPgno, Pgno nearby){
+ PageOne *pPage1 = pBt->page1;
+ int rc;
+ if( pPage1->freeList ){
+ OverflowPage *pOvfl;
+ FreelistInfo *pInfo;
+
+ rc = sqlitepager_write(pPage1);
+ if( rc ) return rc;
+ SWAB_ADD(pBt, pPage1->nFree, -1);
+ rc = sqlitepager_get(pBt->pPager, SWAB32(pBt, pPage1->freeList),
+ (void**)&pOvfl);
+ if( rc ) return rc;
+ rc = sqlitepager_write(pOvfl);
+ if( rc ){
+ sqlitepager_unref(pOvfl);
+ return rc;
+ }
+ pInfo = (FreelistInfo*)pOvfl->aPayload;
+ if( pInfo->nFree==0 ){
+ *pPgno = SWAB32(pBt, pPage1->freeList);
+ pPage1->freeList = pOvfl->iNext;
+ *ppPage = (MemPage*)pOvfl;
+ }else{
+ int closest, n;
+ n = SWAB32(pBt, pInfo->nFree);
+ if( n>1 && nearby>0 ){
+ int i, dist;
+ closest = 0;
+ dist = SWAB32(pBt, pInfo->aFree[0]) - nearby;
+ if( dist<0 ) dist = -dist;
+ for(i=1; i<n; i++){
+ int d2 = SWAB32(pBt, pInfo->aFree[i]) - nearby;
+ if( d2<0 ) d2 = -d2;
+ if( d2<dist ) closest = i;
+ }
+ }else{
+ closest = 0;
+ }
+ SWAB_ADD(pBt, pInfo->nFree, -1);
+ *pPgno = SWAB32(pBt, pInfo->aFree[closest]);
+ pInfo->aFree[closest] = pInfo->aFree[n-1];
+ rc = sqlitepager_get(pBt->pPager, *pPgno, (void**)ppPage);
+ sqlitepager_unref(pOvfl);
+ if( rc==SQLITE_OK ){
+ sqlitepager_dont_rollback(*ppPage);
+ rc = sqlitepager_write(*ppPage);
+ }
+ }
+ }else{
+ *pPgno = sqlitepager_pagecount(pBt->pPager) + 1;
+ rc = sqlitepager_get(pBt->pPager, *pPgno, (void**)ppPage);
+ if( rc ) return rc;
+ rc = sqlitepager_write(*ppPage);
+ }
+ return rc;
+}
+
+/*
+** Add a page of the database file to the freelist. Either pgno or
+** pPage but not both may be 0.
+**
+** sqlitepager_unref() is NOT called for pPage.
+*/
+static int freePage(Btree *pBt, void *pPage, Pgno pgno){
+ PageOne *pPage1 = pBt->page1;
+ OverflowPage *pOvfl = (OverflowPage*)pPage;
+ int rc;
+ int needUnref = 0;
+ MemPage *pMemPage;
+
+ if( pgno==0 ){
+ assert( pOvfl!=0 );
+ pgno = sqlitepager_pagenumber(pOvfl);
+ }
+ assert( pgno>2 );
+ assert( sqlitepager_pagenumber(pOvfl)==pgno );
+ pMemPage = (MemPage*)pPage;
+ pMemPage->isInit = 0;
+ if( pMemPage->pParent ){
+ sqlitepager_unref(pMemPage->pParent);
+ pMemPage->pParent = 0;
+ }
+ rc = sqlitepager_write(pPage1);
+ if( rc ){
+ return rc;
+ }
+ SWAB_ADD(pBt, pPage1->nFree, 1);
+ if( pPage1->nFree!=0 && pPage1->freeList!=0 ){
+ OverflowPage *pFreeIdx;
+ rc = sqlitepager_get(pBt->pPager, SWAB32(pBt, pPage1->freeList),
+ (void**)&pFreeIdx);
+ if( rc==SQLITE_OK ){
+ FreelistInfo *pInfo = (FreelistInfo*)pFreeIdx->aPayload;
+ int n = SWAB32(pBt, pInfo->nFree);
+ if( n<(sizeof(pInfo->aFree)/sizeof(pInfo->aFree[0])) ){
+ rc = sqlitepager_write(pFreeIdx);
+ if( rc==SQLITE_OK ){
+ pInfo->aFree[n] = SWAB32(pBt, pgno);
+ SWAB_ADD(pBt, pInfo->nFree, 1);
+ sqlitepager_unref(pFreeIdx);
+ sqlitepager_dont_write(pBt->pPager, pgno);
+ return rc;
+ }
+ }
+ sqlitepager_unref(pFreeIdx);
+ }
+ }
+ if( pOvfl==0 ){
+ assert( pgno>0 );
+ rc = sqlitepager_get(pBt->pPager, pgno, (void**)&pOvfl);
+ if( rc ) return rc;
+ needUnref = 1;
+ }
+ rc = sqlitepager_write(pOvfl);
+ if( rc ){
+ if( needUnref ) sqlitepager_unref(pOvfl);
+ return rc;
+ }
+ pOvfl->iNext = pPage1->freeList;
+ pPage1->freeList = SWAB32(pBt, pgno);
+ memset(pOvfl->aPayload, 0, OVERFLOW_SIZE);
+ if( needUnref ) rc = sqlitepager_unref(pOvfl);
+ return rc;
+}
+
+/*
+** Erase all the data out of a cell. This involves returning overflow
+** pages back the freelist.
+*/
+static int clearCell(Btree *pBt, Cell *pCell){
+ Pager *pPager = pBt->pPager;
+ OverflowPage *pOvfl;
+ Pgno ovfl, nextOvfl;
+ int rc;
+
+ if( NKEY(pBt, pCell->h) + NDATA(pBt, pCell->h) <= MX_LOCAL_PAYLOAD ){
+ return SQLITE_OK;
+ }
+ ovfl = SWAB32(pBt, pCell->ovfl);
+ pCell->ovfl = 0;
+ while( ovfl ){
+ rc = sqlitepager_get(pPager, ovfl, (void**)&pOvfl);
+ if( rc ) return rc;
+ nextOvfl = SWAB32(pBt, pOvfl->iNext);
+ rc = freePage(pBt, pOvfl, ovfl);
+ if( rc ) return rc;
+ sqlitepager_unref(pOvfl);
+ ovfl = nextOvfl;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Create a new cell from key and data. Overflow pages are allocated as
+** necessary and linked to this cell.
+*/
+static int fillInCell(
+ Btree *pBt, /* The whole Btree. Needed to allocate pages */
+ Cell *pCell, /* Populate this Cell structure */
+ const void *pKey, int nKey, /* The key */
+ const void *pData,int nData /* The data */
+){
+ OverflowPage *pOvfl, *pPrior;
+ Pgno *pNext;
+ int spaceLeft;
+ int n, rc;
+ int nPayload;
+ const char *pPayload;
+ char *pSpace;
+ Pgno nearby = 0;
+
+ pCell->h.leftChild = 0;
+ pCell->h.nKey = SWAB16(pBt, nKey & 0xffff);
+ pCell->h.nKeyHi = nKey >> 16;
+ pCell->h.nData = SWAB16(pBt, nData & 0xffff);
+ pCell->h.nDataHi = nData >> 16;
+ pCell->h.iNext = 0;
+
+ pNext = &pCell->ovfl;
+ pSpace = pCell->aPayload;
+ spaceLeft = MX_LOCAL_PAYLOAD;
+ pPayload = pKey;
+ pKey = 0;
+ nPayload = nKey;
+ pPrior = 0;
+ while( nPayload>0 ){
+ if( spaceLeft==0 ){
+ rc = allocatePage(pBt, (MemPage**)&pOvfl, pNext, nearby);
+ if( rc ){
+ *pNext = 0;
+ }else{
+ nearby = *pNext;
+ }
+ if( pPrior ) sqlitepager_unref(pPrior);
+ if( rc ){
+ clearCell(pBt, pCell);
+ return rc;
+ }
+ if( pBt->needSwab ) *pNext = swab32(*pNext);
+ pPrior = pOvfl;
+ spaceLeft = OVERFLOW_SIZE;
+ pSpace = pOvfl->aPayload;
+ pNext = &pOvfl->iNext;
+ }
+ n = nPayload;
+ if( n>spaceLeft ) n = spaceLeft;
+ memcpy(pSpace, pPayload, n);
+ nPayload -= n;
+ if( nPayload==0 && pData ){
+ pPayload = pData;
+ nPayload = nData;
+ pData = 0;
+ }else{
+ pPayload += n;
+ }
+ spaceLeft -= n;
+ pSpace += n;
+ }
+ *pNext = 0;
+ if( pPrior ){
+ sqlitepager_unref(pPrior);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Change the MemPage.pParent pointer on the page whose number is
+** given in the second argument so that MemPage.pParent holds the
+** pointer in the third argument.
+*/
+static void reparentPage(Pager *pPager, Pgno pgno, MemPage *pNewParent,int idx){
+ MemPage *pThis;
+
+ if( pgno==0 ) return;
+ assert( pPager!=0 );
+ pThis = sqlitepager_lookup(pPager, pgno);
+ if( pThis && pThis->isInit ){
+ if( pThis->pParent!=pNewParent ){
+ if( pThis->pParent ) sqlitepager_unref(pThis->pParent);
+ pThis->pParent = pNewParent;
+ if( pNewParent ) sqlitepager_ref(pNewParent);
+ }
+ pThis->idxParent = idx;
+ sqlitepager_unref(pThis);
+ }
+}
+
+/*
+** Reparent all children of the given page to be the given page.
+** In other words, for every child of pPage, invoke reparentPage()
+** to make sure that each child knows that pPage is its parent.
+**
+** This routine gets called after you memcpy() one page into
+** another.
+*/
+static void reparentChildPages(Btree *pBt, MemPage *pPage){
+ int i;
+ Pager *pPager = pBt->pPager;
+ for(i=0; i<pPage->nCell; i++){
+ reparentPage(pPager, SWAB32(pBt, pPage->apCell[i]->h.leftChild), pPage, i);
+ }
+ reparentPage(pPager, SWAB32(pBt, pPage->u.hdr.rightChild), pPage, i);
+ pPage->idxShift = 0;
+}
+
+/*
+** Remove the i-th cell from pPage. This routine effects pPage only.
+** The cell content is not freed or deallocated. It is assumed that
+** the cell content has been copied someplace else. This routine just
+** removes the reference to the cell from pPage.
+**
+** "sz" must be the number of bytes in the cell.
+**
+** Do not bother maintaining the integrity of the linked list of Cells.
+** Only the pPage->apCell[] array is important. The relinkCellList()
+** routine will be called soon after this routine in order to rebuild
+** the linked list.
+*/
+static void dropCell(Btree *pBt, MemPage *pPage, int idx, int sz){
+ int j;
+ assert( idx>=0 && idx<pPage->nCell );
+ assert( sz==cellSize(pBt, pPage->apCell[idx]) );
+ assert( sqlitepager_iswriteable(pPage) );
+ freeSpace(pBt, pPage, Addr(pPage->apCell[idx]) - Addr(pPage), sz);
+ for(j=idx; j<pPage->nCell-1; j++){
+ pPage->apCell[j] = pPage->apCell[j+1];
+ }
+ pPage->nCell--;
+ pPage->idxShift = 1;
+}
+
+/*
+** Insert a new cell on pPage at cell index "i". pCell points to the
+** content of the cell.
+**
+** If the cell content will fit on the page, then put it there. If it
+** will not fit, then just make pPage->apCell[i] point to the content
+** and set pPage->isOverfull.
+**
+** Do not bother maintaining the integrity of the linked list of Cells.
+** Only the pPage->apCell[] array is important. The relinkCellList()
+** routine will be called soon after this routine in order to rebuild
+** the linked list.
+*/
+static void insertCell(Btree *pBt, MemPage *pPage, int i, Cell *pCell, int sz){
+ int idx, j;
+ assert( i>=0 && i<=pPage->nCell );
+ assert( sz==cellSize(pBt, pCell) );
+ assert( sqlitepager_iswriteable(pPage) );
+ idx = allocateSpace(pBt, pPage, sz);
+ for(j=pPage->nCell; j>i; j--){
+ pPage->apCell[j] = pPage->apCell[j-1];
+ }
+ pPage->nCell++;
+ if( idx<=0 ){
+ pPage->isOverfull = 1;
+ pPage->apCell[i] = pCell;
+ }else{
+ memcpy(&pPage->u.aDisk[idx], pCell, sz);
+ pPage->apCell[i] = (Cell*)&pPage->u.aDisk[idx];
+ }
+ pPage->idxShift = 1;
+}
+
+/*
+** Rebuild the linked list of cells on a page so that the cells
+** occur in the order specified by the pPage->apCell[] array.
+** Invoke this routine once to repair damage after one or more
+** invocations of either insertCell() or dropCell().
+*/
+static void relinkCellList(Btree *pBt, MemPage *pPage){
+ int i;
+ u16 *pIdx;
+ assert( sqlitepager_iswriteable(pPage) );
+ pIdx = &pPage->u.hdr.firstCell;
+ for(i=0; i<pPage->nCell; i++){
+ int idx = Addr(pPage->apCell[i]) - Addr(pPage);
+ assert( idx>0 && idx<SQLITE_USABLE_SIZE );
+ *pIdx = SWAB16(pBt, idx);
+ pIdx = &pPage->apCell[i]->h.iNext;
+ }
+ *pIdx = 0;
+}
+
+/*
+** Make a copy of the contents of pFrom into pTo. The pFrom->apCell[]
+** pointers that point into pFrom->u.aDisk[] must be adjusted to point
+** into pTo->u.aDisk[] instead. But some pFrom->apCell[] entries might
+** not point to pFrom->u.aDisk[]. Those are unchanged.
+*/
+static void copyPage(MemPage *pTo, MemPage *pFrom){
+ uptr from, to;
+ int i;
+ memcpy(pTo->u.aDisk, pFrom->u.aDisk, SQLITE_USABLE_SIZE);
+ pTo->pParent = 0;
+ pTo->isInit = 1;
+ pTo->nCell = pFrom->nCell;
+ pTo->nFree = pFrom->nFree;
+ pTo->isOverfull = pFrom->isOverfull;
+ to = Addr(pTo);
+ from = Addr(pFrom);
+ for(i=0; i<pTo->nCell; i++){
+ uptr x = Addr(pFrom->apCell[i]);
+ if( x>from && x<from+SQLITE_USABLE_SIZE ){
+ *((uptr*)&pTo->apCell[i]) = x + to - from;
+ }else{
+ pTo->apCell[i] = pFrom->apCell[i];
+ }
+ }
+}
+
+/*
+** The following parameters determine how many adjacent pages get involved
+** in a balancing operation. NN is the number of neighbors on either side
+** of the page that participate in the balancing operation. NB is the
+** total number of pages that participate, including the target page and
+** NN neighbors on either side.
+**
+** The minimum value of NN is 1 (of course). Increasing NN above 1
+** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance
+** in exchange for a larger degradation in INSERT and UPDATE performance.
+** The value of NN appears to give the best results overall.
+*/
+#define NN 1 /* Number of neighbors on either side of pPage */
+#define NB (NN*2+1) /* Total pages involved in the balance */
+
+/*
+** This routine redistributes Cells on pPage and up to two siblings
+** of pPage so that all pages have about the same amount of free space.
+** Usually one sibling on either side of pPage is used in the balancing,
+** though both siblings might come from one side if pPage is the first
+** or last child of its parent. If pPage has fewer than two siblings
+** (something which can only happen if pPage is the root page or a
+** child of root) then all available siblings participate in the balancing.
+**
+** The number of siblings of pPage might be increased or decreased by
+** one in an effort to keep pages between 66% and 100% full. The root page
+** is special and is allowed to be less than 66% full. If pPage is
+** the root page, then the depth of the tree might be increased
+** or decreased by one, as necessary, to keep the root page from being
+** overfull or empty.
+**
+** This routine calls relinkCellList() on its input page regardless of
+** whether or not it does any real balancing. Client routines will typically
+** invoke insertCell() or dropCell() before calling this routine, so we
+** need to call relinkCellList() to clean up the mess that those other
+** routines left behind.
+**
+** pCur is left pointing to the same cell as when this routine was called
+** even if that cell gets moved to a different page. pCur may be NULL.
+** Set the pCur parameter to NULL if you do not care about keeping track
+** of a cell as that will save this routine the work of keeping track of it.
+**
+** Note that when this routine is called, some of the Cells on pPage
+** might not actually be stored in pPage->u.aDisk[]. This can happen
+** if the page is overfull. Part of the job of this routine is to
+** make sure all Cells for pPage once again fit in pPage->u.aDisk[].
+**
+** In the course of balancing the siblings of pPage, the parent of pPage
+** might become overfull or underfull. If that happens, then this routine
+** is called recursively on the parent.
+**
+** If this routine fails for any reason, it might leave the database
+** in a corrupted state. So if this routine fails, the database should
+** be rolled back.
+*/
+static int balance(Btree *pBt, MemPage *pPage, BtCursor *pCur){
+ MemPage *pParent; /* The parent of pPage */
+ int nCell; /* Number of cells in apCell[] */
+ int nOld; /* Number of pages in apOld[] */
+ int nNew; /* Number of pages in apNew[] */
+ int nDiv; /* Number of cells in apDiv[] */
+ int i, j, k; /* Loop counters */
+ int idx; /* Index of pPage in pParent->apCell[] */
+ int nxDiv; /* Next divider slot in pParent->apCell[] */
+ int rc; /* The return code */
+ int iCur; /* apCell[iCur] is the cell of the cursor */
+ MemPage *pOldCurPage; /* The cursor originally points to this page */
+ int subtotal; /* Subtotal of bytes in cells on one page */
+ MemPage *extraUnref = 0; /* A page that needs to be unref-ed */
+ MemPage *apOld[NB]; /* pPage and up to two siblings */
+ Pgno pgnoOld[NB]; /* Page numbers for each page in apOld[] */
+ MemPage *apNew[NB+1]; /* pPage and up to NB siblings after balancing */
+ Pgno pgnoNew[NB+1]; /* Page numbers for each page in apNew[] */
+ int idxDiv[NB]; /* Indices of divider cells in pParent */
+ Cell *apDiv[NB]; /* Divider cells in pParent */
+ Cell aTemp[NB]; /* Temporary holding area for apDiv[] */
+ int cntNew[NB+1]; /* Index in apCell[] of cell after i-th page */
+ int szNew[NB+1]; /* Combined size of cells place on i-th page */
+ MemPage aOld[NB]; /* Temporary copies of pPage and its siblings */
+ Cell *apCell[(MX_CELL+2)*NB]; /* All cells from pages being balanced */
+ int szCell[(MX_CELL+2)*NB]; /* Local size of all cells */
+
+ /*
+ ** Return without doing any work if pPage is neither overfull nor
+ ** underfull.
+ */
+ assert( sqlitepager_iswriteable(pPage) );
+ if( !pPage->isOverfull && pPage->nFree<SQLITE_USABLE_SIZE/2
+ && pPage->nCell>=2){
+ relinkCellList(pBt, pPage);
+ return SQLITE_OK;
+ }
+
+ /*
+ ** Find the parent of the page to be balanceed.
+ ** If there is no parent, it means this page is the root page and
+ ** special rules apply.
+ */
+ pParent = pPage->pParent;
+ if( pParent==0 ){
+ Pgno pgnoChild;
+ MemPage *pChild;
+ assert( pPage->isInit );
+ if( pPage->nCell==0 ){
+ if( pPage->u.hdr.rightChild ){
+ /*
+ ** The root page is empty. Copy the one child page
+ ** into the root page and return. This reduces the depth
+ ** of the BTree by one.
+ */
+ pgnoChild = SWAB32(pBt, pPage->u.hdr.rightChild);
+ rc = sqlitepager_get(pBt->pPager, pgnoChild, (void**)&pChild);
+ if( rc ) return rc;
+ memcpy(pPage, pChild, SQLITE_USABLE_SIZE);
+ pPage->isInit = 0;
+ rc = initPage(pBt, pPage, sqlitepager_pagenumber(pPage), 0);
+ assert( rc==SQLITE_OK );
+ reparentChildPages(pBt, pPage);
+ if( pCur && pCur->pPage==pChild ){
+ sqlitepager_unref(pChild);
+ pCur->pPage = pPage;
+ sqlitepager_ref(pPage);
+ }
+ freePage(pBt, pChild, pgnoChild);
+ sqlitepager_unref(pChild);
+ }else{
+ relinkCellList(pBt, pPage);
+ }
+ return SQLITE_OK;
+ }
+ if( !pPage->isOverfull ){
+ /* It is OK for the root page to be less than half full.
+ */
+ relinkCellList(pBt, pPage);
+ return SQLITE_OK;
+ }
+ /*
+ ** If we get to here, it means the root page is overfull.
+ ** When this happens, Create a new child page and copy the
+ ** contents of the root into the child. Then make the root
+ ** page an empty page with rightChild pointing to the new
+ ** child. Then fall thru to the code below which will cause
+ ** the overfull child page to be split.
+ */
+ rc = sqlitepager_write(pPage);
+ if( rc ) return rc;
+ rc = allocatePage(pBt, &pChild, &pgnoChild, sqlitepager_pagenumber(pPage));
+ if( rc ) return rc;
+ assert( sqlitepager_iswriteable(pChild) );
+ copyPage(pChild, pPage);
+ pChild->pParent = pPage;
+ pChild->idxParent = 0;
+ sqlitepager_ref(pPage);
+ pChild->isOverfull = 1;
+ if( pCur && pCur->pPage==pPage ){
+ sqlitepager_unref(pPage);
+ pCur->pPage = pChild;
+ }else{
+ extraUnref = pChild;
+ }
+ zeroPage(pBt, pPage);
+ pPage->u.hdr.rightChild = SWAB32(pBt, pgnoChild);
+ pParent = pPage;
+ pPage = pChild;
+ }
+ rc = sqlitepager_write(pParent);
+ if( rc ) return rc;
+ assert( pParent->isInit );
+
+ /*
+ ** Find the Cell in the parent page whose h.leftChild points back
+ ** to pPage. The "idx" variable is the index of that cell. If pPage
+ ** is the rightmost child of pParent then set idx to pParent->nCell
+ */
+ if( pParent->idxShift ){
+ Pgno pgno, swabPgno;
+ pgno = sqlitepager_pagenumber(pPage);
+ swabPgno = SWAB32(pBt, pgno);
+ for(idx=0; idx<pParent->nCell; idx++){
+ if( pParent->apCell[idx]->h.leftChild==swabPgno ){
+ break;
+ }
+ }
+ assert( idx<pParent->nCell || pParent->u.hdr.rightChild==swabPgno );
+ }else{
+ idx = pPage->idxParent;
+ }
+
+ /*
+ ** Initialize variables so that it will be safe to jump
+ ** directly to balance_cleanup at any moment.
+ */
+ nOld = nNew = 0;
+ sqlitepager_ref(pParent);
+
+ /*
+ ** Find sibling pages to pPage and the Cells in pParent that divide
+ ** the siblings. An attempt is made to find NN siblings on either
+ ** side of pPage. More siblings are taken from one side, however, if
+ ** pPage there are fewer than NN siblings on the other side. If pParent
+ ** has NB or fewer children then all children of pParent are taken.
+ */
+ nxDiv = idx - NN;
+ if( nxDiv + NB > pParent->nCell ){
+ nxDiv = pParent->nCell - NB + 1;
+ }
+ if( nxDiv<0 ){
+ nxDiv = 0;
+ }
+ nDiv = 0;
+ for(i=0, k=nxDiv; i<NB; i++, k++){
+ if( k<pParent->nCell ){
+ idxDiv[i] = k;
+ apDiv[i] = pParent->apCell[k];
+ nDiv++;
+ pgnoOld[i] = SWAB32(pBt, apDiv[i]->h.leftChild);
+ }else if( k==pParent->nCell ){
+ pgnoOld[i] = SWAB32(pBt, pParent->u.hdr.rightChild);
+ }else{
+ break;
+ }
+ rc = sqlitepager_get(pBt->pPager, pgnoOld[i], (void**)&apOld[i]);
+ if( rc ) goto balance_cleanup;
+ rc = initPage(pBt, apOld[i], pgnoOld[i], pParent);
+ if( rc ) goto balance_cleanup;
+ apOld[i]->idxParent = k;
+ nOld++;
+ }
+
+ /*
+ ** Set iCur to be the index in apCell[] of the cell that the cursor
+ ** is pointing to. We will need this later on in order to keep the
+ ** cursor pointing at the same cell. If pCur points to a page that
+ ** has no involvement with this rebalancing, then set iCur to a large
+ ** number so that the iCur==j tests always fail in the main cell
+ ** distribution loop below.
+ */
+ if( pCur ){
+ iCur = 0;
+ for(i=0; i<nOld; i++){
+ if( pCur->pPage==apOld[i] ){
+ iCur += pCur->idx;
+ break;
+ }
+ iCur += apOld[i]->nCell;
+ if( i<nOld-1 && pCur->pPage==pParent && pCur->idx==idxDiv[i] ){
+ break;
+ }
+ iCur++;
+ }
+ pOldCurPage = pCur->pPage;
+ }
+
+ /*
+ ** Make copies of the content of pPage and its siblings into aOld[].
+ ** The rest of this function will use data from the copies rather
+ ** that the original pages since the original pages will be in the
+ ** process of being overwritten.
+ */
+ for(i=0; i<nOld; i++){
+ copyPage(&aOld[i], apOld[i]);
+ }
+
+ /*
+ ** Load pointers to all cells on sibling pages and the divider cells
+ ** into the local apCell[] array. Make copies of the divider cells
+ ** into aTemp[] and remove the the divider Cells from pParent.
+ */
+ nCell = 0;
+ for(i=0; i<nOld; i++){
+ MemPage *pOld = &aOld[i];
+ for(j=0; j<pOld->nCell; j++){
+ apCell[nCell] = pOld->apCell[j];
+ szCell[nCell] = cellSize(pBt, apCell[nCell]);
+ nCell++;
+ }
+ if( i<nOld-1 ){
+ szCell[nCell] = cellSize(pBt, apDiv[i]);
+ memcpy(&aTemp[i], apDiv[i], szCell[nCell]);
+ apCell[nCell] = &aTemp[i];
+ dropCell(pBt, pParent, nxDiv, szCell[nCell]);
+ assert( SWAB32(pBt, apCell[nCell]->h.leftChild)==pgnoOld[i] );
+ apCell[nCell]->h.leftChild = pOld->u.hdr.rightChild;
+ nCell++;
+ }
+ }
+
+ /*
+ ** Figure out the number of pages needed to hold all nCell cells.
+ ** Store this number in "k". Also compute szNew[] which is the total
+ ** size of all cells on the i-th page and cntNew[] which is the index
+ ** in apCell[] of the cell that divides path i from path i+1.
+ ** cntNew[k] should equal nCell.
+ **
+ ** This little patch of code is critical for keeping the tree
+ ** balanced.
+ */
+ for(subtotal=k=i=0; i<nCell; i++){
+ subtotal += szCell[i];
+ if( subtotal > USABLE_SPACE ){
+ szNew[k] = subtotal - szCell[i];
+ cntNew[k] = i;
+ subtotal = 0;
+ k++;
+ }
+ }
+ szNew[k] = subtotal;
+ cntNew[k] = nCell;
+ k++;
+ for(i=k-1; i>0; i--){
+ while( szNew[i]<USABLE_SPACE/2 ){
+ cntNew[i-1]--;
+ assert( cntNew[i-1]>0 );
+ szNew[i] += szCell[cntNew[i-1]];
+ szNew[i-1] -= szCell[cntNew[i-1]-1];
+ }
+ }
+ assert( cntNew[0]>0 );
+
+ /*
+ ** Allocate k new pages. Reuse old pages where possible.
+ */
+ for(i=0; i<k; i++){
+ if( i<nOld ){
+ apNew[i] = apOld[i];
+ pgnoNew[i] = pgnoOld[i];
+ apOld[i] = 0;
+ rc = sqlitepager_write(apNew[i]);
+ if( rc ) goto balance_cleanup;
+ }else{
+ rc = allocatePage(pBt, &apNew[i], &pgnoNew[i], pgnoNew[i-1]);
+ if( rc ) goto balance_cleanup;
+ }
+ nNew++;
+ zeroPage(pBt, apNew[i]);
+ apNew[i]->isInit = 1;
+ }
+
+ /* Free any old pages that were not reused as new pages.
+ */
+ while( i<nOld ){
+ rc = freePage(pBt, apOld[i], pgnoOld[i]);
+ if( rc ) goto balance_cleanup;
+ sqlitepager_unref(apOld[i]);
+ apOld[i] = 0;
+ i++;
+ }
+
+ /*
+ ** Put the new pages in accending order. This helps to
+ ** keep entries in the disk file in order so that a scan
+ ** of the table is a linear scan through the file. That
+ ** in turn helps the operating system to deliver pages
+ ** from the disk more rapidly.
+ **
+ ** An O(n^2) insertion sort algorithm is used, but since
+ ** n is never more than NB (a small constant), that should
+ ** not be a problem.
+ **
+ ** When NB==3, this one optimization makes the database
+ ** about 25% faster for large insertions and deletions.
+ */
+ for(i=0; i<k-1; i++){
+ int minV = pgnoNew[i];
+ int minI = i;
+ for(j=i+1; j<k; j++){
+ if( pgnoNew[j]<(unsigned)minV ){
+ minI = j;
+ minV = pgnoNew[j];
+ }
+ }
+ if( minI>i ){
+ int t;
+ MemPage *pT;
+ t = pgnoNew[i];
+ pT = apNew[i];
+ pgnoNew[i] = pgnoNew[minI];
+ apNew[i] = apNew[minI];
+ pgnoNew[minI] = t;
+ apNew[minI] = pT;
+ }
+ }
+
+ /*
+ ** Evenly distribute the data in apCell[] across the new pages.
+ ** Insert divider cells into pParent as necessary.
+ */
+ j = 0;
+ for(i=0; i<nNew; i++){
+ MemPage *pNew = apNew[i];
+ while( j<cntNew[i] ){
+ assert( pNew->nFree>=szCell[j] );
+ if( pCur && iCur==j ){ pCur->pPage = pNew; pCur->idx = pNew->nCell; }
+ insertCell(pBt, pNew, pNew->nCell, apCell[j], szCell[j]);
+ j++;
+ }
+ assert( pNew->nCell>0 );
+ assert( !pNew->isOverfull );
+ relinkCellList(pBt, pNew);
+ if( i<nNew-1 && j<nCell ){
+ pNew->u.hdr.rightChild = apCell[j]->h.leftChild;
+ apCell[j]->h.leftChild = SWAB32(pBt, pgnoNew[i]);
+ if( pCur && iCur==j ){ pCur->pPage = pParent; pCur->idx = nxDiv; }
+ insertCell(pBt, pParent, nxDiv, apCell[j], szCell[j]);
+ j++;
+ nxDiv++;
+ }
+ }
+ assert( j==nCell );
+ apNew[nNew-1]->u.hdr.rightChild = aOld[nOld-1].u.hdr.rightChild;
+ if( nxDiv==pParent->nCell ){
+ pParent->u.hdr.rightChild = SWAB32(pBt, pgnoNew[nNew-1]);
+ }else{
+ pParent->apCell[nxDiv]->h.leftChild = SWAB32(pBt, pgnoNew[nNew-1]);
+ }
+ if( pCur ){
+ if( j<=iCur && pCur->pPage==pParent && pCur->idx>idxDiv[nOld-1] ){
+ assert( pCur->pPage==pOldCurPage );
+ pCur->idx += nNew - nOld;
+ }else{
+ assert( pOldCurPage!=0 );
+ sqlitepager_ref(pCur->pPage);
+ sqlitepager_unref(pOldCurPage);
+ }
+ }
+
+ /*
+ ** Reparent children of all cells.
+ */
+ for(i=0; i<nNew; i++){
+ reparentChildPages(pBt, apNew[i]);
+ }
+ reparentChildPages(pBt, pParent);
+
+ /*
+ ** balance the parent page.
+ */
+ rc = balance(pBt, pParent, pCur);
+
+ /*
+ ** Cleanup before returning.
+ */
+balance_cleanup:
+ if( extraUnref ){
+ sqlitepager_unref(extraUnref);
+ }
+ for(i=0; i<nOld; i++){
+ if( apOld[i]!=0 && apOld[i]!=&aOld[i] ) sqlitepager_unref(apOld[i]);
+ }
+ for(i=0; i<nNew; i++){
+ sqlitepager_unref(apNew[i]);
+ }
+ if( pCur && pCur->pPage==0 ){
+ pCur->pPage = pParent;
+ pCur->idx = 0;
+ }else{
+ sqlitepager_unref(pParent);
+ }
+ return rc;
+}
+
+/*
+** This routine checks all cursors that point to the same table
+** as pCur points to. If any of those cursors were opened with
+** wrFlag==0 then this routine returns SQLITE_LOCKED. If all
+** cursors point to the same table were opened with wrFlag==1
+** then this routine returns SQLITE_OK.
+**
+** In addition to checking for read-locks (where a read-lock
+** means a cursor opened with wrFlag==0) this routine also moves
+** all cursors other than pCur so that they are pointing to the
+** first Cell on root page. This is necessary because an insert
+** or delete might change the number of cells on a page or delete
+** a page entirely and we do not want to leave any cursors
+** pointing to non-existant pages or cells.
+*/
+static int checkReadLocks(BtCursor *pCur){
+ BtCursor *p;
+ assert( pCur->wrFlag );
+ for(p=pCur->pShared; p!=pCur; p=p->pShared){
+ assert( p );
+ assert( p->pgnoRoot==pCur->pgnoRoot );
+ if( p->wrFlag==0 ) return SQLITE_LOCKED;
+ if( sqlitepager_pagenumber(p->pPage)!=p->pgnoRoot ){
+ moveToRoot(p);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Insert a new record into the BTree. The key is given by (pKey,nKey)
+** and the data is given by (pData,nData). The cursor is used only to
+** define what database the record should be inserted into. The cursor
+** is left pointing at the new record.
+*/
+static int fileBtreeInsert(
+ BtCursor *pCur, /* Insert data into the table of this cursor */
+ const void *pKey, int nKey, /* The key of the new record */
+ const void *pData, int nData /* The data of the new record */
+){
+ Cell newCell;
+ int rc;
+ int loc;
+ int szNew;
+ MemPage *pPage;
+ Btree *pBt = pCur->pBt;
+
+ if( pCur->pPage==0 ){
+ return SQLITE_ABORT; /* A rollback destroyed this cursor */
+ }
+ if( !pBt->inTrans || nKey+nData==0 ){
+ /* Must start a transaction before doing an insert */
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ assert( !pBt->readOnly );
+ if( !pCur->wrFlag ){
+ return SQLITE_PERM; /* Cursor not open for writing */
+ }
+ if( checkReadLocks(pCur) ){
+ return SQLITE_LOCKED; /* The table pCur points to has a read lock */
+ }
+ rc = fileBtreeMoveto(pCur, pKey, nKey, &loc);
+ if( rc ) return rc;
+ pPage = pCur->pPage;
+ assert( pPage->isInit );
+ rc = sqlitepager_write(pPage);
+ if( rc ) return rc;
+ rc = fillInCell(pBt, &newCell, pKey, nKey, pData, nData);
+ if( rc ) return rc;
+ szNew = cellSize(pBt, &newCell);
+ if( loc==0 ){
+ newCell.h.leftChild = pPage->apCell[pCur->idx]->h.leftChild;
+ rc = clearCell(pBt, pPage->apCell[pCur->idx]);
+ if( rc ) return rc;
+ dropCell(pBt, pPage, pCur->idx, cellSize(pBt, pPage->apCell[pCur->idx]));
+ }else if( loc<0 && pPage->nCell>0 ){
+ assert( pPage->u.hdr.rightChild==0 ); /* Must be a leaf page */
+ pCur->idx++;
+ }else{
+ assert( pPage->u.hdr.rightChild==0 ); /* Must be a leaf page */
+ }
+ insertCell(pBt, pPage, pCur->idx, &newCell, szNew);
+ rc = balance(pCur->pBt, pPage, pCur);
+ /* sqliteBtreePageDump(pCur->pBt, pCur->pgnoRoot, 1); */
+ /* fflush(stdout); */
+ pCur->eSkip = SKIP_INVALID;
+ return rc;
+}
+
+/*
+** Delete the entry that the cursor is pointing to.
+**
+** The cursor is left pointing at either the next or the previous
+** entry. If the cursor is left pointing to the next entry, then
+** the pCur->eSkip flag is set to SKIP_NEXT which forces the next call to
+** sqliteBtreeNext() to be a no-op. That way, you can always call
+** sqliteBtreeNext() after a delete and the cursor will be left
+** pointing to the first entry after the deleted entry. Similarly,
+** pCur->eSkip is set to SKIP_PREV is the cursor is left pointing to
+** the entry prior to the deleted entry so that a subsequent call to
+** sqliteBtreePrevious() will always leave the cursor pointing at the
+** entry immediately before the one that was deleted.
+*/
+static int fileBtreeDelete(BtCursor *pCur){
+ MemPage *pPage = pCur->pPage;
+ Cell *pCell;
+ int rc;
+ Pgno pgnoChild;
+ Btree *pBt = pCur->pBt;
+
+ assert( pPage->isInit );
+ if( pCur->pPage==0 ){
+ return SQLITE_ABORT; /* A rollback destroyed this cursor */
+ }
+ if( !pBt->inTrans ){
+ /* Must start a transaction before doing a delete */
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ assert( !pBt->readOnly );
+ if( pCur->idx >= pPage->nCell ){
+ return SQLITE_ERROR; /* The cursor is not pointing to anything */
+ }
+ if( !pCur->wrFlag ){
+ return SQLITE_PERM; /* Did not open this cursor for writing */
+ }
+ if( checkReadLocks(pCur) ){
+ return SQLITE_LOCKED; /* The table pCur points to has a read lock */
+ }
+ rc = sqlitepager_write(pPage);
+ if( rc ) return rc;
+ pCell = pPage->apCell[pCur->idx];
+ pgnoChild = SWAB32(pBt, pCell->h.leftChild);
+ rc = clearCell(pBt, pCell);
+ if( rc ) return rc;
+ if( pgnoChild ){
+ /*
+ ** The entry we are about to delete is not a leaf so if we do not
+ ** do something we will leave a hole on an internal page.
+ ** We have to fill the hole by moving in a cell from a leaf. The
+ ** next Cell after the one to be deleted is guaranteed to exist and
+ ** to be a leaf so we can use it.
+ */
+ BtCursor leafCur;
+ Cell *pNext;
+ int szNext;
+ int notUsed;
+ getTempCursor(pCur, &leafCur);
+ rc = fileBtreeNext(&leafCur, &notUsed);
+ if( rc!=SQLITE_OK ){
+ if( rc!=SQLITE_NOMEM ) rc = SQLITE_CORRUPT;
+ return rc;
+ }
+ rc = sqlitepager_write(leafCur.pPage);
+ if( rc ) return rc;
+ dropCell(pBt, pPage, pCur->idx, cellSize(pBt, pCell));
+ pNext = leafCur.pPage->apCell[leafCur.idx];
+ szNext = cellSize(pBt, pNext);
+ pNext->h.leftChild = SWAB32(pBt, pgnoChild);
+ insertCell(pBt, pPage, pCur->idx, pNext, szNext);
+ rc = balance(pBt, pPage, pCur);
+ if( rc ) return rc;
+ pCur->eSkip = SKIP_NEXT;
+ dropCell(pBt, leafCur.pPage, leafCur.idx, szNext);
+ rc = balance(pBt, leafCur.pPage, pCur);
+ releaseTempCursor(&leafCur);
+ }else{
+ dropCell(pBt, pPage, pCur->idx, cellSize(pBt, pCell));
+ if( pCur->idx>=pPage->nCell ){
+ pCur->idx = pPage->nCell-1;
+ if( pCur->idx<0 ){
+ pCur->idx = 0;
+ pCur->eSkip = SKIP_NEXT;
+ }else{
+ pCur->eSkip = SKIP_PREV;
+ }
+ }else{
+ pCur->eSkip = SKIP_NEXT;
+ }
+ rc = balance(pBt, pPage, pCur);
+ }
+ return rc;
+}
+
+/*
+** Create a new BTree table. Write into *piTable the page
+** number for the root page of the new table.
+**
+** In the current implementation, BTree tables and BTree indices are the
+** the same. In the future, we may change this so that BTree tables
+** are restricted to having a 4-byte integer key and arbitrary data and
+** BTree indices are restricted to having an arbitrary key and no data.
+** But for now, this routine also serves to create indices.
+*/
+static int fileBtreeCreateTable(Btree *pBt, int *piTable){
+ MemPage *pRoot;
+ Pgno pgnoRoot;
+ int rc;
+ if( !pBt->inTrans ){
+ /* Must start a transaction first */
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ if( pBt->readOnly ){
+ return SQLITE_READONLY;
+ }
+ rc = allocatePage(pBt, &pRoot, &pgnoRoot, 0);
+ if( rc ) return rc;
+ assert( sqlitepager_iswriteable(pRoot) );
+ zeroPage(pBt, pRoot);
+ sqlitepager_unref(pRoot);
+ *piTable = (int)pgnoRoot;
+ return SQLITE_OK;
+}
+
+/*
+** Erase the given database page and all its children. Return
+** the page to the freelist.
+*/
+static int clearDatabasePage(Btree *pBt, Pgno pgno, int freePageFlag){
+ MemPage *pPage;
+ int rc;
+ Cell *pCell;
+ int idx;
+
+ rc = sqlitepager_get(pBt->pPager, pgno, (void**)&pPage);
+ if( rc ) return rc;
+ rc = sqlitepager_write(pPage);
+ if( rc ) return rc;
+ rc = initPage(pBt, pPage, pgno, 0);
+ if( rc ) return rc;
+ idx = SWAB16(pBt, pPage->u.hdr.firstCell);
+ while( idx>0 ){
+ pCell = (Cell*)&pPage->u.aDisk[idx];
+ idx = SWAB16(pBt, pCell->h.iNext);
+ if( pCell->h.leftChild ){
+ rc = clearDatabasePage(pBt, SWAB32(pBt, pCell->h.leftChild), 1);
+ if( rc ) return rc;
+ }
+ rc = clearCell(pBt, pCell);
+ if( rc ) return rc;
+ }
+ if( pPage->u.hdr.rightChild ){
+ rc = clearDatabasePage(pBt, SWAB32(pBt, pPage->u.hdr.rightChild), 1);
+ if( rc ) return rc;
+ }
+ if( freePageFlag ){
+ rc = freePage(pBt, pPage, pgno);
+ }else{
+ zeroPage(pBt, pPage);
+ }
+ sqlitepager_unref(pPage);
+ return rc;
+}
+
+/*
+** Delete all information from a single table in the database.
+*/
+static int fileBtreeClearTable(Btree *pBt, int iTable){
+ int rc;
+ BtCursor *pCur;
+ if( !pBt->inTrans ){
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( pCur->pgnoRoot==(Pgno)iTable ){
+ if( pCur->wrFlag==0 ) return SQLITE_LOCKED;
+ moveToRoot(pCur);
+ }
+ }
+ rc = clearDatabasePage(pBt, (Pgno)iTable, 0);
+ if( rc ){
+ fileBtreeRollback(pBt);
+ }
+ return rc;
+}
+
+/*
+** Erase all information in a table and add the root of the table to
+** the freelist. Except, the root of the principle table (the one on
+** page 2) is never added to the freelist.
+*/
+static int fileBtreeDropTable(Btree *pBt, int iTable){
+ int rc;
+ MemPage *pPage;
+ BtCursor *pCur;
+ if( !pBt->inTrans ){
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( pCur->pgnoRoot==(Pgno)iTable ){
+ return SQLITE_LOCKED; /* Cannot drop a table that has a cursor */
+ }
+ }
+ rc = sqlitepager_get(pBt->pPager, (Pgno)iTable, (void**)&pPage);
+ if( rc ) return rc;
+ rc = fileBtreeClearTable(pBt, iTable);
+ if( rc ) return rc;
+ if( iTable>2 ){
+ rc = freePage(pBt, pPage, iTable);
+ }else{
+ zeroPage(pBt, pPage);
+ }
+ sqlitepager_unref(pPage);
+ return rc;
+}
+
+#if 0 /* UNTESTED */
+/*
+** Copy all cell data from one database file into another.
+** pages back the freelist.
+*/
+static int copyCell(Btree *pBtFrom, BTree *pBtTo, Cell *pCell){
+ Pager *pFromPager = pBtFrom->pPager;
+ OverflowPage *pOvfl;
+ Pgno ovfl, nextOvfl;
+ Pgno *pPrev;
+ int rc = SQLITE_OK;
+ MemPage *pNew, *pPrevPg;
+ Pgno new;
+
+ if( NKEY(pBtTo, pCell->h) + NDATA(pBtTo, pCell->h) <= MX_LOCAL_PAYLOAD ){
+ return SQLITE_OK;
+ }
+ pPrev = &pCell->ovfl;
+ pPrevPg = 0;
+ ovfl = SWAB32(pBtTo, pCell->ovfl);
+ while( ovfl && rc==SQLITE_OK ){
+ rc = sqlitepager_get(pFromPager, ovfl, (void**)&pOvfl);
+ if( rc ) return rc;
+ nextOvfl = SWAB32(pBtFrom, pOvfl->iNext);
+ rc = allocatePage(pBtTo, &pNew, &new, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlitepager_write(pNew);
+ if( rc==SQLITE_OK ){
+ memcpy(pNew, pOvfl, SQLITE_USABLE_SIZE);
+ *pPrev = SWAB32(pBtTo, new);
+ if( pPrevPg ){
+ sqlitepager_unref(pPrevPg);
+ }
+ pPrev = &pOvfl->iNext;
+ pPrevPg = pNew;
+ }
+ }
+ sqlitepager_unref(pOvfl);
+ ovfl = nextOvfl;
+ }
+ if( pPrevPg ){
+ sqlitepager_unref(pPrevPg);
+ }
+ return rc;
+}
+#endif
+
+
+#if 0 /* UNTESTED */
+/*
+** Copy a page of data from one database over to another.
+*/
+static int copyDatabasePage(
+ Btree *pBtFrom,
+ Pgno pgnoFrom,
+ Btree *pBtTo,
+ Pgno *pTo
+){
+ MemPage *pPageFrom, *pPage;
+ Pgno to;
+ int rc;
+ Cell *pCell;
+ int idx;
+
+ rc = sqlitepager_get(pBtFrom->pPager, pgno, (void**)&pPageFrom);
+ if( rc ) return rc;
+ rc = allocatePage(pBt, &pPage, pTo, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlitepager_write(pPage);
+ }
+ if( rc==SQLITE_OK ){
+ memcpy(pPage, pPageFrom, SQLITE_USABLE_SIZE);
+ idx = SWAB16(pBt, pPage->u.hdr.firstCell);
+ while( idx>0 ){
+ pCell = (Cell*)&pPage->u.aDisk[idx];
+ idx = SWAB16(pBt, pCell->h.iNext);
+ if( pCell->h.leftChild ){
+ Pgno newChld;
+ rc = copyDatabasePage(pBtFrom, SWAB32(pBtFrom, pCell->h.leftChild),
+ pBtTo, &newChld);
+ if( rc ) return rc;
+ pCell->h.leftChild = SWAB32(pBtFrom, newChld);
+ }
+ rc = copyCell(pBtFrom, pBtTo, pCell);
+ if( rc ) return rc;
+ }
+ if( pPage->u.hdr.rightChild ){
+ Pgno newChld;
+ rc = copyDatabasePage(pBtFrom, SWAB32(pBtFrom, pPage->u.hdr.rightChild),
+ pBtTo, &newChld);
+ if( rc ) return rc;
+ pPage->u.hdr.rightChild = SWAB32(pBtTo, newChild);
+ }
+ }
+ sqlitepager_unref(pPage);
+ return rc;
+}
+#endif
+
+/*
+** Read the meta-information out of a database file.
+*/
+static int fileBtreeGetMeta(Btree *pBt, int *aMeta){
+ PageOne *pP1;
+ int rc;
+ int i;
+
+ rc = sqlitepager_get(pBt->pPager, 1, (void**)&pP1);
+ if( rc ) return rc;
+ aMeta[0] = SWAB32(pBt, pP1->nFree);
+ for(i=0; i<sizeof(pP1->aMeta)/sizeof(pP1->aMeta[0]); i++){
+ aMeta[i+1] = SWAB32(pBt, pP1->aMeta[i]);
+ }
+ sqlitepager_unref(pP1);
+ return SQLITE_OK;
+}
+
+/*
+** Write meta-information back into the database.
+*/
+static int fileBtreeUpdateMeta(Btree *pBt, int *aMeta){
+ PageOne *pP1;
+ int rc, i;
+ if( !pBt->inTrans ){
+ return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR;
+ }
+ pP1 = pBt->page1;
+ rc = sqlitepager_write(pP1);
+ if( rc ) return rc;
+ for(i=0; i<sizeof(pP1->aMeta)/sizeof(pP1->aMeta[0]); i++){
+ pP1->aMeta[i] = SWAB32(pBt, aMeta[i+1]);
+ }
+ return SQLITE_OK;
+}
+
+/******************************************************************************
+** The complete implementation of the BTree subsystem is above this line.
+** All the code the follows is for testing and troubleshooting the BTree
+** subsystem. None of the code that follows is used during normal operation.
+******************************************************************************/
+
+/*
+** Print a disassembly of the given page on standard output. This routine
+** is used for debugging and testing only.
+*/
+#ifdef SQLITE_TEST
+static int fileBtreePageDump(Btree *pBt, int pgno, int recursive){
+ int rc;
+ MemPage *pPage;
+ int i, j;
+ int nFree;
+ u16 idx;
+ char range[20];
+ unsigned char payload[20];
+ rc = sqlitepager_get(pBt->pPager, (Pgno)pgno, (void**)&pPage);
+ if( rc ){
+ return rc;
+ }
+ if( recursive ) printf("PAGE %d:\n", pgno);
+ i = 0;
+ idx = SWAB16(pBt, pPage->u.hdr.firstCell);
+ while( idx>0 && idx<=SQLITE_USABLE_SIZE-MIN_CELL_SIZE ){
+ Cell *pCell = (Cell*)&pPage->u.aDisk[idx];
+ int sz = cellSize(pBt, pCell);
+ sprintf(range,"%d..%d", idx, idx+sz-1);
+ sz = NKEY(pBt, pCell->h) + NDATA(pBt, pCell->h);
+ if( sz>sizeof(payload)-1 ) sz = sizeof(payload)-1;
+ memcpy(payload, pCell->aPayload, sz);
+ for(j=0; j<sz; j++){
+ if( payload[j]<0x20 || payload[j]>0x7f ) payload[j] = '.';
+ }
+ payload[sz] = 0;
+ printf(
+ "cell %2d: i=%-10s chld=%-4d nk=%-4d nd=%-4d payload=%s\n",
+ i, range, (int)pCell->h.leftChild,
+ NKEY(pBt, pCell->h), NDATA(pBt, pCell->h),
+ payload
+ );
+ if( pPage->isInit && pPage->apCell[i]!=pCell ){
+ printf("**** apCell[%d] does not match on prior entry ****\n", i);
+ }
+ i++;
+ idx = SWAB16(pBt, pCell->h.iNext);
+ }
+ if( idx!=0 ){
+ printf("ERROR: next cell index out of range: %d\n", idx);
+ }
+ printf("right_child: %d\n", SWAB32(pBt, pPage->u.hdr.rightChild));
+ nFree = 0;
+ i = 0;
+ idx = SWAB16(pBt, pPage->u.hdr.firstFree);
+ while( idx>0 && idx<SQLITE_USABLE_SIZE ){
+ FreeBlk *p = (FreeBlk*)&pPage->u.aDisk[idx];
+ sprintf(range,"%d..%d", idx, idx+p->iSize-1);
+ nFree += SWAB16(pBt, p->iSize);
+ printf("freeblock %2d: i=%-10s size=%-4d total=%d\n",
+ i, range, SWAB16(pBt, p->iSize), nFree);
+ idx = SWAB16(pBt, p->iNext);
+ i++;
+ }
+ if( idx!=0 ){
+ printf("ERROR: next freeblock index out of range: %d\n", idx);
+ }
+ if( recursive && pPage->u.hdr.rightChild!=0 ){
+ idx = SWAB16(pBt, pPage->u.hdr.firstCell);
+ while( idx>0 && idx<SQLITE_USABLE_SIZE-MIN_CELL_SIZE ){
+ Cell *pCell = (Cell*)&pPage->u.aDisk[idx];
+ fileBtreePageDump(pBt, SWAB32(pBt, pCell->h.leftChild), 1);
+ idx = SWAB16(pBt, pCell->h.iNext);
+ }
+ fileBtreePageDump(pBt, SWAB32(pBt, pPage->u.hdr.rightChild), 1);
+ }
+ sqlitepager_unref(pPage);
+ return SQLITE_OK;
+}
+#endif
+
+#ifdef SQLITE_TEST
+/*
+** Fill aResult[] with information about the entry and page that the
+** cursor is pointing to.
+**
+** aResult[0] = The page number
+** aResult[1] = The entry number
+** aResult[2] = Total number of entries on this page
+** aResult[3] = Size of this entry
+** aResult[4] = Number of free bytes on this page
+** aResult[5] = Number of free blocks on the page
+** aResult[6] = Page number of the left child of this entry
+** aResult[7] = Page number of the right child for the whole page
+**
+** This routine is used for testing and debugging only.
+*/
+static int fileBtreeCursorDump(BtCursor *pCur, int *aResult){
+ int cnt, idx;
+ MemPage *pPage = pCur->pPage;
+ Btree *pBt = pCur->pBt;
+ aResult[0] = sqlitepager_pagenumber(pPage);
+ aResult[1] = pCur->idx;
+ aResult[2] = pPage->nCell;
+ if( pCur->idx>=0 && pCur->idx<pPage->nCell ){
+ aResult[3] = cellSize(pBt, pPage->apCell[pCur->idx]);
+ aResult[6] = SWAB32(pBt, pPage->apCell[pCur->idx]->h.leftChild);
+ }else{
+ aResult[3] = 0;
+ aResult[6] = 0;
+ }
+ aResult[4] = pPage->nFree;
+ cnt = 0;
+ idx = SWAB16(pBt, pPage->u.hdr.firstFree);
+ while( idx>0 && idx<SQLITE_USABLE_SIZE ){
+ cnt++;
+ idx = SWAB16(pBt, ((FreeBlk*)&pPage->u.aDisk[idx])->iNext);
+ }
+ aResult[5] = cnt;
+ aResult[7] = SWAB32(pBt, pPage->u.hdr.rightChild);
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Return the pager associated with a BTree. This routine is used for
+** testing and debugging only.
+*/
+static Pager *fileBtreePager(Btree *pBt){
+ return pBt->pPager;
+}
+
+/*
+** This structure is passed around through all the sanity checking routines
+** in order to keep track of some global state information.
+*/
+typedef struct IntegrityCk IntegrityCk;
+struct IntegrityCk {
+ Btree *pBt; /* The tree being checked out */
+ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */
+ int nPage; /* Number of pages in the database */
+ int *anRef; /* Number of times each page is referenced */
+ char *zErrMsg; /* An error message. NULL of no errors seen. */
+};
+
+/*
+** Append a message to the error message string.
+*/
+static void checkAppendMsg(IntegrityCk *pCheck, char *zMsg1, char *zMsg2){
+ if( pCheck->zErrMsg ){
+ char *zOld = pCheck->zErrMsg;
+ pCheck->zErrMsg = 0;
+ sqliteSetString(&pCheck->zErrMsg, zOld, "\n", zMsg1, zMsg2, (char*)0);
+ sqliteFree(zOld);
+ }else{
+ sqliteSetString(&pCheck->zErrMsg, zMsg1, zMsg2, (char*)0);
+ }
+}
+
+/*
+** Add 1 to the reference count for page iPage. If this is the second
+** reference to the page, add an error message to pCheck->zErrMsg.
+** Return 1 if there are 2 ore more references to the page and 0 if
+** if this is the first reference to the page.
+**
+** Also check that the page number is in bounds.
+*/
+static int checkRef(IntegrityCk *pCheck, int iPage, char *zContext){
+ if( iPage==0 ) return 1;
+ if( iPage>pCheck->nPage || iPage<0 ){
+ char zBuf[100];
+ sprintf(zBuf, "invalid page number %d", iPage);
+ checkAppendMsg(pCheck, zContext, zBuf);
+ return 1;
+ }
+ if( pCheck->anRef[iPage]==1 ){
+ char zBuf[100];
+ sprintf(zBuf, "2nd reference to page %d", iPage);
+ checkAppendMsg(pCheck, zContext, zBuf);
+ return 1;
+ }
+ return (pCheck->anRef[iPage]++)>1;
+}
+
+/*
+** Check the integrity of the freelist or of an overflow page list.
+** Verify that the number of pages on the list is N.
+*/
+static void checkList(
+ IntegrityCk *pCheck, /* Integrity checking context */
+ int isFreeList, /* True for a freelist. False for overflow page list */
+ int iPage, /* Page number for first page in the list */
+ int N, /* Expected number of pages in the list */
+ char *zContext /* Context for error messages */
+){
+ int i;
+ char zMsg[100];
+ while( N-- > 0 ){
+ OverflowPage *pOvfl;
+ if( iPage<1 ){
+ sprintf(zMsg, "%d pages missing from overflow list", N+1);
+ checkAppendMsg(pCheck, zContext, zMsg);
+ break;
+ }
+ if( checkRef(pCheck, iPage, zContext) ) break;
+ if( sqlitepager_get(pCheck->pPager, (Pgno)iPage, (void**)&pOvfl) ){
+ sprintf(zMsg, "failed to get page %d", iPage);
+ checkAppendMsg(pCheck, zContext, zMsg);
+ break;
+ }
+ if( isFreeList ){
+ FreelistInfo *pInfo = (FreelistInfo*)pOvfl->aPayload;
+ int n = SWAB32(pCheck->pBt, pInfo->nFree);
+ for(i=0; i<n; i++){
+ checkRef(pCheck, SWAB32(pCheck->pBt, pInfo->aFree[i]), zContext);
+ }
+ N -= n;
+ }
+ iPage = SWAB32(pCheck->pBt, pOvfl->iNext);
+ sqlitepager_unref(pOvfl);
+ }
+}
+
+/*
+** Return negative if zKey1<zKey2.
+** Return zero if zKey1==zKey2.
+** Return positive if zKey1>zKey2.
+*/
+static int keyCompare(
+ const char *zKey1, int nKey1,
+ const char *zKey2, int nKey2
+){
+ int min = nKey1>nKey2 ? nKey2 : nKey1;
+ int c = memcmp(zKey1, zKey2, min);
+ if( c==0 ){
+ c = nKey1 - nKey2;
+ }
+ return c;
+}
+
+/*
+** Do various sanity checks on a single page of a tree. Return
+** the tree depth. Root pages return 0. Parents of root pages
+** return 1, and so forth.
+**
+** These checks are done:
+**
+** 1. Make sure that cells and freeblocks do not overlap
+** but combine to completely cover the page.
+** 2. Make sure cell keys are in order.
+** 3. Make sure no key is less than or equal to zLowerBound.
+** 4. Make sure no key is greater than or equal to zUpperBound.
+** 5. Check the integrity of overflow pages.
+** 6. Recursively call checkTreePage on all children.
+** 7. Verify that the depth of all children is the same.
+** 8. Make sure this page is at least 33% full or else it is
+** the root of the tree.
+*/
+static int checkTreePage(
+ IntegrityCk *pCheck, /* Context for the sanity check */
+ int iPage, /* Page number of the page to check */
+ MemPage *pParent, /* Parent page */
+ char *zParentContext, /* Parent context */
+ char *zLowerBound, /* All keys should be greater than this, if not NULL */
+ int nLower, /* Number of characters in zLowerBound */
+ char *zUpperBound, /* All keys should be less than this, if not NULL */
+ int nUpper /* Number of characters in zUpperBound */
+){
+ MemPage *pPage;
+ int i, rc, depth, d2, pgno;
+ char *zKey1, *zKey2;
+ int nKey1, nKey2;
+ BtCursor cur;
+ Btree *pBt;
+ char zMsg[100];
+ char zContext[100];
+ char hit[SQLITE_USABLE_SIZE];
+
+ /* Check that the page exists
+ */
+ cur.pBt = pBt = pCheck->pBt;
+ if( iPage==0 ) return 0;
+ if( checkRef(pCheck, iPage, zParentContext) ) return 0;
+ sprintf(zContext, "On tree page %d: ", iPage);
+ if( (rc = sqlitepager_get(pCheck->pPager, (Pgno)iPage, (void**)&pPage))!=0 ){
+ sprintf(zMsg, "unable to get the page. error code=%d", rc);
+ checkAppendMsg(pCheck, zContext, zMsg);
+ return 0;
+ }
+ if( (rc = initPage(pBt, pPage, (Pgno)iPage, pParent))!=0 ){
+ sprintf(zMsg, "initPage() returns error code %d", rc);
+ checkAppendMsg(pCheck, zContext, zMsg);
+ sqlitepager_unref(pPage);
+ return 0;
+ }
+
+ /* Check out all the cells.
+ */
+ depth = 0;
+ if( zLowerBound ){
+ zKey1 = sqliteMalloc( nLower+1 );
+ memcpy(zKey1, zLowerBound, nLower);
+ zKey1[nLower] = 0;
+ }else{
+ zKey1 = 0;
+ }
+ nKey1 = nLower;
+ cur.pPage = pPage;
+ for(i=0; i<pPage->nCell; i++){
+ Cell *pCell = pPage->apCell[i];
+ int sz;
+
+ /* Check payload overflow pages
+ */
+ nKey2 = NKEY(pBt, pCell->h);
+ sz = nKey2 + NDATA(pBt, pCell->h);
+ sprintf(zContext, "On page %d cell %d: ", iPage, i);
+ if( sz>MX_LOCAL_PAYLOAD ){
+ int nPage = (sz - MX_LOCAL_PAYLOAD + OVERFLOW_SIZE - 1)/OVERFLOW_SIZE;
+ checkList(pCheck, 0, SWAB32(pBt, pCell->ovfl), nPage, zContext);
+ }
+
+ /* Check that keys are in the right order
+ */
+ cur.idx = i;
+ zKey2 = sqliteMallocRaw( nKey2+1 );
+ getPayload(&cur, 0, nKey2, zKey2);
+ if( zKey1 && keyCompare(zKey1, nKey1, zKey2, nKey2)>=0 ){
+ checkAppendMsg(pCheck, zContext, "Key is out of order");
+ }
+
+ /* Check sanity of left child page.
+ */
+ pgno = SWAB32(pBt, pCell->h.leftChild);
+ d2 = checkTreePage(pCheck, pgno, pPage, zContext, zKey1,nKey1,zKey2,nKey2);
+ if( i>0 && d2!=depth ){
+ checkAppendMsg(pCheck, zContext, "Child page depth differs");
+ }
+ depth = d2;
+ sqliteFree(zKey1);
+ zKey1 = zKey2;
+ nKey1 = nKey2;
+ }
+ pgno = SWAB32(pBt, pPage->u.hdr.rightChild);
+ sprintf(zContext, "On page %d at right child: ", iPage);
+ checkTreePage(pCheck, pgno, pPage, zContext, zKey1,nKey1,zUpperBound,nUpper);
+ sqliteFree(zKey1);
+
+ /* Check for complete coverage of the page
+ */
+ memset(hit, 0, sizeof(hit));
+ memset(hit, 1, sizeof(PageHdr));
+ for(i=SWAB16(pBt, pPage->u.hdr.firstCell); i>0 && i<SQLITE_USABLE_SIZE; ){
+ Cell *pCell = (Cell*)&pPage->u.aDisk[i];
+ int j;
+ for(j=i+cellSize(pBt, pCell)-1; j>=i; j--) hit[j]++;
+ i = SWAB16(pBt, pCell->h.iNext);
+ }
+ for(i=SWAB16(pBt,pPage->u.hdr.firstFree); i>0 && i<SQLITE_USABLE_SIZE; ){
+ FreeBlk *pFBlk = (FreeBlk*)&pPage->u.aDisk[i];
+ int j;
+ for(j=i+SWAB16(pBt,pFBlk->iSize)-1; j>=i; j--) hit[j]++;
+ i = SWAB16(pBt,pFBlk->iNext);
+ }
+ for(i=0; i<SQLITE_USABLE_SIZE; i++){
+ if( hit[i]==0 ){
+ sprintf(zMsg, "Unused space at byte %d of page %d", i, iPage);
+ checkAppendMsg(pCheck, zMsg, 0);
+ break;
+ }else if( hit[i]>1 ){
+ sprintf(zMsg, "Multiple uses for byte %d of page %d", i, iPage);
+ checkAppendMsg(pCheck, zMsg, 0);
+ break;
+ }
+ }
+
+ /* Check that free space is kept to a minimum
+ */
+#if 0
+ if( pParent && pParent->nCell>2 && pPage->nFree>3*SQLITE_USABLE_SIZE/4 ){
+ sprintf(zMsg, "free space (%d) greater than max (%d)", pPage->nFree,
+ SQLITE_USABLE_SIZE/3);
+ checkAppendMsg(pCheck, zContext, zMsg);
+ }
+#endif
+
+ sqlitepager_unref(pPage);
+ return depth;
+}
+
+/*
+** This routine does a complete check of the given BTree file. aRoot[] is
+** an array of pages numbers were each page number is the root page of
+** a table. nRoot is the number of entries in aRoot.
+**
+** If everything checks out, this routine returns NULL. If something is
+** amiss, an error message is written into memory obtained from malloc()
+** and a pointer to that error message is returned. The calling function
+** is responsible for freeing the error message when it is done.
+*/
+char *fileBtreeIntegrityCheck(Btree *pBt, int *aRoot, int nRoot){
+ int i;
+ int nRef;
+ IntegrityCk sCheck;
+
+ nRef = *sqlitepager_stats(pBt->pPager);
+ if( lockBtree(pBt)!=SQLITE_OK ){
+ return sqliteStrDup("Unable to acquire a read lock on the database");
+ }
+ sCheck.pBt = pBt;
+ sCheck.pPager = pBt->pPager;
+ sCheck.nPage = sqlitepager_pagecount(sCheck.pPager);
+ if( sCheck.nPage==0 ){
+ unlockBtreeIfUnused(pBt);
+ return 0;
+ }
+ sCheck.anRef = sqliteMallocRaw( (sCheck.nPage+1)*sizeof(sCheck.anRef[0]) );
+ sCheck.anRef[1] = 1;
+ for(i=2; i<=sCheck.nPage; i++){ sCheck.anRef[i] = 0; }
+ sCheck.zErrMsg = 0;
+
+ /* Check the integrity of the freelist
+ */
+ checkList(&sCheck, 1, SWAB32(pBt, pBt->page1->freeList),
+ SWAB32(pBt, pBt->page1->nFree), "Main freelist: ");
+
+ /* Check all the tables.
+ */
+ for(i=0; i<nRoot; i++){
+ if( aRoot[i]==0 ) continue;
+ checkTreePage(&sCheck, aRoot[i], 0, "List of tree roots: ", 0,0,0,0);
+ }
+
+ /* Make sure every page in the file is referenced
+ */
+ for(i=1; i<=sCheck.nPage; i++){
+ if( sCheck.anRef[i]==0 ){
+ char zBuf[100];
+ sprintf(zBuf, "Page %d is never used", i);
+ checkAppendMsg(&sCheck, zBuf, 0);
+ }
+ }
+
+ /* Make sure this analysis did not leave any unref() pages
+ */
+ unlockBtreeIfUnused(pBt);
+ if( nRef != *sqlitepager_stats(pBt->pPager) ){
+ char zBuf[100];
+ sprintf(zBuf,
+ "Outstanding page count goes from %d to %d during this analysis",
+ nRef, *sqlitepager_stats(pBt->pPager)
+ );
+ checkAppendMsg(&sCheck, zBuf, 0);
+ }
+
+ /* Clean up and report errors.
+ */
+ sqliteFree(sCheck.anRef);
+ return sCheck.zErrMsg;
+}
+
+/*
+** Return the full pathname of the underlying database file.
+*/
+static const char *fileBtreeGetFilename(Btree *pBt){
+ assert( pBt->pPager!=0 );
+ return sqlitepager_filename(pBt->pPager);
+}
+
+/*
+** Copy the complete content of pBtFrom into pBtTo. A transaction
+** must be active for both files.
+**
+** The size of file pBtFrom may be reduced by this operation.
+** If anything goes wrong, the transaction on pBtFrom is rolled back.
+*/
+static int fileBtreeCopyFile(Btree *pBtTo, Btree *pBtFrom){
+ int rc = SQLITE_OK;
+ Pgno i, nPage, nToPage;
+
+ if( !pBtTo->inTrans || !pBtFrom->inTrans ) return SQLITE_ERROR;
+ if( pBtTo->needSwab!=pBtFrom->needSwab ) return SQLITE_ERROR;
+ if( pBtTo->pCursor ) return SQLITE_BUSY;
+ memcpy(pBtTo->page1, pBtFrom->page1, SQLITE_USABLE_SIZE);
+ rc = sqlitepager_overwrite(pBtTo->pPager, 1, pBtFrom->page1);
+ nToPage = sqlitepager_pagecount(pBtTo->pPager);
+ nPage = sqlitepager_pagecount(pBtFrom->pPager);
+ for(i=2; rc==SQLITE_OK && i<=nPage; i++){
+ void *pPage;
+ rc = sqlitepager_get(pBtFrom->pPager, i, &pPage);
+ if( rc ) break;
+ rc = sqlitepager_overwrite(pBtTo->pPager, i, pPage);
+ if( rc ) break;
+ sqlitepager_unref(pPage);
+ }
+ for(i=nPage+1; rc==SQLITE_OK && i<=nToPage; i++){
+ void *pPage;
+ rc = sqlitepager_get(pBtTo->pPager, i, &pPage);
+ if( rc ) break;
+ rc = sqlitepager_write(pPage);
+ sqlitepager_unref(pPage);
+ sqlitepager_dont_write(pBtTo->pPager, i);
+ }
+ if( !rc && nPage<nToPage ){
+ rc = sqlitepager_truncate(pBtTo->pPager, nPage);
+ }
+ if( rc ){
+ fileBtreeRollback(pBtTo);
+ }
+ return rc;
+}
+
+/*
+** The following tables contain pointers to all of the interface
+** routines for this implementation of the B*Tree backend. To
+** substitute a different implemention of the backend, one has merely
+** to provide pointers to alternative functions in similar tables.
+*/
+static BtOps sqliteBtreeOps = {
+ fileBtreeClose,
+ fileBtreeSetCacheSize,
+ fileBtreeSetSafetyLevel,
+ fileBtreeBeginTrans,
+ fileBtreeCommit,
+ fileBtreeRollback,
+ fileBtreeBeginCkpt,
+ fileBtreeCommitCkpt,
+ fileBtreeRollbackCkpt,
+ fileBtreeCreateTable,
+ fileBtreeCreateTable, /* Really sqliteBtreeCreateIndex() */
+ fileBtreeDropTable,
+ fileBtreeClearTable,
+ fileBtreeCursor,
+ fileBtreeGetMeta,
+ fileBtreeUpdateMeta,
+ fileBtreeIntegrityCheck,
+ fileBtreeGetFilename,
+ fileBtreeCopyFile,
+ fileBtreePager,
+#ifdef SQLITE_TEST
+ fileBtreePageDump,
+#endif
+};
+static BtCursorOps sqliteBtreeCursorOps = {
+ fileBtreeMoveto,
+ fileBtreeDelete,
+ fileBtreeInsert,
+ fileBtreeFirst,
+ fileBtreeLast,
+ fileBtreeNext,
+ fileBtreePrevious,
+ fileBtreeKeySize,
+ fileBtreeKey,
+ fileBtreeKeyCompare,
+ fileBtreeDataSize,
+ fileBtreeData,
+ fileBtreeCloseCursor,
+#ifdef SQLITE_TEST
+ fileBtreeCursorDump,
+#endif
+};
diff --git a/usr/src/cmd/svc/configd/sqlite/src/btree.h b/usr/src/cmd/svc/configd/sqlite/src/btree.h
new file mode 100644
index 0000000000..cfb5efc31d
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/btree.h
@@ -0,0 +1,159 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the sqlite B-Tree file
+** subsystem. See comments in the source code for a detailed description
+** of what each interface routine does.
+**
+** @(#) $Id: btree.h,v 1.36 2004/02/10 02:57:59 drh Exp $
+*/
+#ifndef _BTREE_H_
+#define _BTREE_H_
+
+/*
+** Forward declarations of structure
+*/
+typedef struct Btree Btree;
+typedef struct BtCursor BtCursor;
+typedef struct BtOps BtOps;
+typedef struct BtCursorOps BtCursorOps;
+
+
+/*
+** An instance of the following structure contains pointers to all
+** methods against an open BTree. Alternative BTree implementations
+** (examples: file based versus in-memory) can be created by substituting
+** different methods. Users of the BTree cannot tell the difference.
+**
+** In C++ we could do this by defining a virtual base class and then
+** creating subclasses for each different implementation. But this is
+** C not C++ so we have to be a little more explicit.
+*/
+struct BtOps {
+ int (*Close)(Btree*);
+ int (*SetCacheSize)(Btree*, int);
+ int (*SetSafetyLevel)(Btree*, int);
+ int (*BeginTrans)(Btree*);
+ int (*Commit)(Btree*);
+ int (*Rollback)(Btree*);
+ int (*BeginCkpt)(Btree*);
+ int (*CommitCkpt)(Btree*);
+ int (*RollbackCkpt)(Btree*);
+ int (*CreateTable)(Btree*, int*);
+ int (*CreateIndex)(Btree*, int*);
+ int (*DropTable)(Btree*, int);
+ int (*ClearTable)(Btree*, int);
+ int (*Cursor)(Btree*, int iTable, int wrFlag, BtCursor **ppCur);
+ int (*GetMeta)(Btree*, int*);
+ int (*UpdateMeta)(Btree*, int*);
+ char *(*IntegrityCheck)(Btree*, int*, int);
+ const char *(*GetFilename)(Btree*);
+ int (*Copyfile)(Btree*,Btree*);
+ struct Pager *(*Pager)(Btree*);
+#ifdef SQLITE_TEST
+ int (*PageDump)(Btree*, int, int);
+#endif
+};
+
+/*
+** An instance of this structure defines all of the methods that can
+** be executed against a cursor.
+*/
+struct BtCursorOps {
+ int (*Moveto)(BtCursor*, const void *pKey, int nKey, int *pRes);
+ int (*Delete)(BtCursor*);
+ int (*Insert)(BtCursor*, const void *pKey, int nKey,
+ const void *pData, int nData);
+ int (*First)(BtCursor*, int *pRes);
+ int (*Last)(BtCursor*, int *pRes);
+ int (*Next)(BtCursor*, int *pRes);
+ int (*Previous)(BtCursor*, int *pRes);
+ int (*KeySize)(BtCursor*, int *pSize);
+ int (*Key)(BtCursor*, int offset, int amt, char *zBuf);
+ int (*KeyCompare)(BtCursor*, const void *pKey, int nKey,
+ int nIgnore, int *pRes);
+ int (*DataSize)(BtCursor*, int *pSize);
+ int (*Data)(BtCursor*, int offset, int amt, char *zBuf);
+ int (*CloseCursor)(BtCursor*);
+#ifdef SQLITE_TEST
+ int (*CursorDump)(BtCursor*, int*);
+#endif
+};
+
+/*
+** The number of 4-byte "meta" values contained on the first page of each
+** database file.
+*/
+#define SQLITE_N_BTREE_META 10
+
+int sqliteBtreeOpen(const char *zFilename, int mode, int nPg, Btree **ppBtree);
+int sqliteRbtreeOpen(const char *zFilename, int mode, int nPg, Btree **ppBtree);
+
+#define btOps(pBt) (*((BtOps **)(pBt)))
+#define btCOps(pCur) (*((BtCursorOps **)(pCur)))
+
+#define sqliteBtreeClose(pBt) (btOps(pBt)->Close(pBt))
+#define sqliteBtreeSetCacheSize(pBt, sz) (btOps(pBt)->SetCacheSize(pBt, sz))
+#define sqliteBtreeSetSafetyLevel(pBt, sl) (btOps(pBt)->SetSafetyLevel(pBt, sl))
+#define sqliteBtreeBeginTrans(pBt) (btOps(pBt)->BeginTrans(pBt))
+#define sqliteBtreeCommit(pBt) (btOps(pBt)->Commit(pBt))
+#define sqliteBtreeRollback(pBt) (btOps(pBt)->Rollback(pBt))
+#define sqliteBtreeBeginCkpt(pBt) (btOps(pBt)->BeginCkpt(pBt))
+#define sqliteBtreeCommitCkpt(pBt) (btOps(pBt)->CommitCkpt(pBt))
+#define sqliteBtreeRollbackCkpt(pBt) (btOps(pBt)->RollbackCkpt(pBt))
+#define sqliteBtreeCreateTable(pBt,piTable)\
+ (btOps(pBt)->CreateTable(pBt,piTable))
+#define sqliteBtreeCreateIndex(pBt, piIndex)\
+ (btOps(pBt)->CreateIndex(pBt, piIndex))
+#define sqliteBtreeDropTable(pBt, iTable) (btOps(pBt)->DropTable(pBt, iTable))
+#define sqliteBtreeClearTable(pBt, iTable)\
+ (btOps(pBt)->ClearTable(pBt, iTable))
+#define sqliteBtreeCursor(pBt, iTable, wrFlag, ppCur)\
+ (btOps(pBt)->Cursor(pBt, iTable, wrFlag, ppCur))
+#define sqliteBtreeMoveto(pCur, pKey, nKey, pRes)\
+ (btCOps(pCur)->Moveto(pCur, pKey, nKey, pRes))
+#define sqliteBtreeDelete(pCur) (btCOps(pCur)->Delete(pCur))
+#define sqliteBtreeInsert(pCur, pKey, nKey, pData, nData) \
+ (btCOps(pCur)->Insert(pCur, pKey, nKey, pData, nData))
+#define sqliteBtreeFirst(pCur, pRes) (btCOps(pCur)->First(pCur, pRes))
+#define sqliteBtreeLast(pCur, pRes) (btCOps(pCur)->Last(pCur, pRes))
+#define sqliteBtreeNext(pCur, pRes) (btCOps(pCur)->Next(pCur, pRes))
+#define sqliteBtreePrevious(pCur, pRes) (btCOps(pCur)->Previous(pCur, pRes))
+#define sqliteBtreeKeySize(pCur, pSize) (btCOps(pCur)->KeySize(pCur, pSize) )
+#define sqliteBtreeKey(pCur, offset, amt, zBuf)\
+ (btCOps(pCur)->Key(pCur, offset, amt, zBuf))
+#define sqliteBtreeKeyCompare(pCur, pKey, nKey, nIgnore, pRes)\
+ (btCOps(pCur)->KeyCompare(pCur, pKey, nKey, nIgnore, pRes))
+#define sqliteBtreeDataSize(pCur, pSize) (btCOps(pCur)->DataSize(pCur, pSize))
+#define sqliteBtreeData(pCur, offset, amt, zBuf)\
+ (btCOps(pCur)->Data(pCur, offset, amt, zBuf))
+#define sqliteBtreeCloseCursor(pCur) (btCOps(pCur)->CloseCursor(pCur))
+#define sqliteBtreeGetMeta(pBt, aMeta) (btOps(pBt)->GetMeta(pBt, aMeta))
+#define sqliteBtreeUpdateMeta(pBt, aMeta) (btOps(pBt)->UpdateMeta(pBt, aMeta))
+#define sqliteBtreeIntegrityCheck(pBt, aRoot, nRoot)\
+ (btOps(pBt)->IntegrityCheck(pBt, aRoot, nRoot))
+#define sqliteBtreeGetFilename(pBt) (btOps(pBt)->GetFilename(pBt))
+#define sqliteBtreeCopyFile(pBt1, pBt2) (btOps(pBt1)->Copyfile(pBt1, pBt2))
+#define sqliteBtreePager(pBt) (btOps(pBt)->Pager(pBt))
+
+#ifdef SQLITE_TEST
+#define sqliteBtreePageDump(pBt, pgno, recursive)\
+ (btOps(pBt)->PageDump(pBt, pgno, recursive))
+#define sqliteBtreeCursorDump(pCur, aResult)\
+ (btCOps(pCur)->CursorDump(pCur, aResult))
+int btree_native_byte_order;
+#endif /* SQLITE_TEST */
+
+
+#endif /* _BTREE_H_ */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/btree_rb.c b/usr/src/cmd/svc/configd/sqlite/src/btree_rb.c
new file mode 100644
index 0000000000..71b1334e2c
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/btree_rb.c
@@ -0,0 +1,1491 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 Feb 4
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** $Id: btree_rb.c,v 1.24.2.1 2004/06/26 14:40:05 drh Exp $
+**
+** This file implements an in-core database using Red-Black balanced
+** binary trees.
+**
+** It was contributed to SQLite by anonymous on 2003-Feb-04 23:24:49 UTC.
+*/
+#include "btree.h"
+#include "sqliteInt.h"
+#include <assert.h>
+
+/*
+** Omit this whole file if the SQLITE_OMIT_INMEMORYDB macro is
+** defined. This allows a lot of code to be omitted for installations
+** that do not need it.
+*/
+#ifndef SQLITE_OMIT_INMEMORYDB
+
+
+typedef struct BtRbTree BtRbTree;
+typedef struct BtRbNode BtRbNode;
+typedef struct BtRollbackOp BtRollbackOp;
+typedef struct Rbtree Rbtree;
+typedef struct RbtCursor RbtCursor;
+
+/* Forward declarations */
+static BtOps sqliteRbtreeOps;
+static BtCursorOps sqliteRbtreeCursorOps;
+
+/*
+ * During each transaction (or checkpoint), a linked-list of
+ * "rollback-operations" is accumulated. If the transaction is rolled back,
+ * then the list of operations must be executed (to restore the database to
+ * it's state before the transaction started). If the transaction is to be
+ * committed, just delete the list.
+ *
+ * Each operation is represented as follows, depending on the value of eOp:
+ *
+ * ROLLBACK_INSERT -> Need to insert (pKey, pData) into table iTab.
+ * ROLLBACK_DELETE -> Need to delete the record (pKey) into table iTab.
+ * ROLLBACK_CREATE -> Need to create table iTab.
+ * ROLLBACK_DROP -> Need to drop table iTab.
+ */
+struct BtRollbackOp {
+ u8 eOp;
+ int iTab;
+ int nKey;
+ void *pKey;
+ int nData;
+ void *pData;
+ BtRollbackOp *pNext;
+};
+
+/*
+** Legal values for BtRollbackOp.eOp:
+*/
+#define ROLLBACK_INSERT 1 /* Insert a record */
+#define ROLLBACK_DELETE 2 /* Delete a record */
+#define ROLLBACK_CREATE 3 /* Create a table */
+#define ROLLBACK_DROP 4 /* Drop a table */
+
+struct Rbtree {
+ BtOps *pOps; /* Function table */
+ int aMetaData[SQLITE_N_BTREE_META];
+
+ int next_idx; /* next available table index */
+ Hash tblHash; /* All created tables, by index */
+ u8 isAnonymous; /* True if this Rbtree is to be deleted when closed */
+ u8 eTransState; /* State of this Rbtree wrt transactions */
+
+ BtRollbackOp *pTransRollback;
+ BtRollbackOp *pCheckRollback;
+ BtRollbackOp *pCheckRollbackTail;
+};
+
+/*
+** Legal values for Rbtree.eTransState.
+*/
+#define TRANS_NONE 0 /* No transaction is in progress */
+#define TRANS_INTRANSACTION 1 /* A transaction is in progress */
+#define TRANS_INCHECKPOINT 2 /* A checkpoint is in progress */
+#define TRANS_ROLLBACK 3 /* We are currently rolling back a checkpoint or
+ * transaction. */
+
+struct RbtCursor {
+ BtCursorOps *pOps; /* Function table */
+ Rbtree *pRbtree;
+ BtRbTree *pTree;
+ int iTree; /* Index of pTree in pRbtree */
+ BtRbNode *pNode;
+ RbtCursor *pShared; /* List of all cursors on the same Rbtree */
+ u8 eSkip; /* Determines if next step operation is a no-op */
+ u8 wrFlag; /* True if this cursor is open for writing */
+};
+
+/*
+** Legal values for RbtCursor.eSkip.
+*/
+#define SKIP_NONE 0 /* Always step the cursor */
+#define SKIP_NEXT 1 /* The next sqliteRbtreeNext() is a no-op */
+#define SKIP_PREV 2 /* The next sqliteRbtreePrevious() is a no-op */
+#define SKIP_INVALID 3 /* Calls to Next() and Previous() are invalid */
+
+struct BtRbTree {
+ RbtCursor *pCursors; /* All cursors pointing to this tree */
+ BtRbNode *pHead; /* Head of the tree, or NULL */
+};
+
+struct BtRbNode {
+ int nKey;
+ void *pKey;
+ int nData;
+ void *pData;
+ u8 isBlack; /* true for a black node, 0 for a red node */
+ BtRbNode *pParent; /* Nodes parent node, NULL for the tree head */
+ BtRbNode *pLeft; /* Nodes left child, or NULL */
+ BtRbNode *pRight; /* Nodes right child, or NULL */
+
+ int nBlackHeight; /* Only used during the red-black integrity check */
+};
+
+/* Forward declarations */
+static int memRbtreeMoveto(
+ RbtCursor* pCur,
+ const void *pKey,
+ int nKey,
+ int *pRes
+);
+static int memRbtreeClearTable(Rbtree* tree, int n);
+static int memRbtreeNext(RbtCursor* pCur, int *pRes);
+static int memRbtreeLast(RbtCursor* pCur, int *pRes);
+static int memRbtreePrevious(RbtCursor* pCur, int *pRes);
+
+
+/*
+** This routine checks all cursors that point to the same table
+** as pCur points to. If any of those cursors were opened with
+** wrFlag==0 then this routine returns SQLITE_LOCKED. If all
+** cursors point to the same table were opened with wrFlag==1
+** then this routine returns SQLITE_OK.
+**
+** In addition to checking for read-locks (where a read-lock
+** means a cursor opened with wrFlag==0) this routine also NULLs
+** out the pNode field of all other cursors.
+** This is necessary because an insert
+** or delete might change erase the node out from under
+** another cursor.
+*/
+static int checkReadLocks(RbtCursor *pCur){
+ RbtCursor *p;
+ assert( pCur->wrFlag );
+ for(p=pCur->pTree->pCursors; p; p=p->pShared){
+ if( p!=pCur ){
+ if( p->wrFlag==0 ) return SQLITE_LOCKED;
+ p->pNode = 0;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+ * The key-compare function for the red-black trees. Returns as follows:
+ *
+ * (key1 < key2) -1
+ * (key1 == key2) 0
+ * (key1 > key2) 1
+ *
+ * Keys are compared using memcmp(). If one key is an exact prefix of the
+ * other, then the shorter key is less than the longer key.
+ */
+static int key_compare(void const*pKey1, int nKey1, void const*pKey2, int nKey2)
+{
+ int mcmp = memcmp(pKey1, pKey2, (nKey1 <= nKey2)?nKey1:nKey2);
+ if( mcmp == 0){
+ if( nKey1 == nKey2 ) return 0;
+ return ((nKey1 < nKey2)?-1:1);
+ }
+ return ((mcmp>0)?1:-1);
+}
+
+/*
+ * Perform the LEFT-rotate transformation on node X of tree pTree. This
+ * transform is part of the red-black balancing code.
+ *
+ * | |
+ * X Y
+ * / \ / \
+ * a Y X c
+ * / \ / \
+ * b c a b
+ *
+ * BEFORE AFTER
+ */
+static void leftRotate(BtRbTree *pTree, BtRbNode *pX)
+{
+ BtRbNode *pY;
+ BtRbNode *pb;
+ pY = pX->pRight;
+ pb = pY->pLeft;
+
+ pY->pParent = pX->pParent;
+ if( pX->pParent ){
+ if( pX->pParent->pLeft == pX ) pX->pParent->pLeft = pY;
+ else pX->pParent->pRight = pY;
+ }
+ pY->pLeft = pX;
+ pX->pParent = pY;
+ pX->pRight = pb;
+ if( pb ) pb->pParent = pX;
+ if( pTree->pHead == pX ) pTree->pHead = pY;
+}
+
+/*
+ * Perform the RIGHT-rotate transformation on node X of tree pTree. This
+ * transform is part of the red-black balancing code.
+ *
+ * | |
+ * X Y
+ * / \ / \
+ * Y c a X
+ * / \ / \
+ * a b b c
+ *
+ * BEFORE AFTER
+ */
+static void rightRotate(BtRbTree *pTree, BtRbNode *pX)
+{
+ BtRbNode *pY;
+ BtRbNode *pb;
+ pY = pX->pLeft;
+ pb = pY->pRight;
+
+ pY->pParent = pX->pParent;
+ if( pX->pParent ){
+ if( pX->pParent->pLeft == pX ) pX->pParent->pLeft = pY;
+ else pX->pParent->pRight = pY;
+ }
+ pY->pRight = pX;
+ pX->pParent = pY;
+ pX->pLeft = pb;
+ if( pb ) pb->pParent = pX;
+ if( pTree->pHead == pX ) pTree->pHead = pY;
+}
+
+/*
+ * A string-manipulation helper function for check_redblack_tree(). If (orig ==
+ * NULL) a copy of val is returned. If (orig != NULL) then a copy of the *
+ * concatenation of orig and val is returned. The original orig is deleted
+ * (using sqliteFree()).
+ */
+static char *append_val(char * orig, char const * val){
+ char *z;
+ if( !orig ){
+ z = sqliteStrDup( val );
+ } else{
+ z = 0;
+ sqliteSetString(&z, orig, val, (char*)0);
+ sqliteFree( orig );
+ }
+ return z;
+}
+
+/*
+ * Append a string representation of the entire node to orig and return it.
+ * This is used to produce debugging information if check_redblack_tree() finds
+ * a problem with a red-black binary tree.
+ */
+static char *append_node(char * orig, BtRbNode *pNode, int indent)
+{
+ char buf[128];
+ int i;
+
+ for( i=0; i<indent; i++ ){
+ orig = append_val(orig, " ");
+ }
+
+ sprintf(buf, "%p", pNode);
+ orig = append_val(orig, buf);
+
+ if( pNode ){
+ indent += 3;
+ if( pNode->isBlack ){
+ orig = append_val(orig, " B \n");
+ }else{
+ orig = append_val(orig, " R \n");
+ }
+ orig = append_node( orig, pNode->pLeft, indent );
+ orig = append_node( orig, pNode->pRight, indent );
+ }else{
+ orig = append_val(orig, "\n");
+ }
+ return orig;
+}
+
+/*
+ * Print a representation of a node to stdout. This function is only included
+ * so you can call it from within a debugger if things get really bad. It
+ * is not called from anyplace in the code.
+ */
+static void print_node(BtRbNode *pNode)
+{
+ char * str = append_node(0, pNode, 0);
+ printf("%s", str);
+
+ /* Suppress a warning message about print_node() being unused */
+ (void)print_node;
+}
+
+/*
+ * Check the following properties of the red-black tree:
+ * (1) - If a node is red, both of it's children are black
+ * (2) - Each path from a given node to a leaf (NULL) node passes thru the
+ * same number of black nodes
+ *
+ * If there is a problem, append a description (using append_val() ) to *msg.
+ */
+static void check_redblack_tree(BtRbTree * tree, char ** msg)
+{
+ BtRbNode *pNode;
+
+ /* 0 -> came from parent
+ * 1 -> came from left
+ * 2 -> came from right */
+ int prev_step = 0;
+
+ pNode = tree->pHead;
+ while( pNode ){
+ switch( prev_step ){
+ case 0:
+ if( pNode->pLeft ){
+ pNode = pNode->pLeft;
+ }else{
+ prev_step = 1;
+ }
+ break;
+ case 1:
+ if( pNode->pRight ){
+ pNode = pNode->pRight;
+ prev_step = 0;
+ }else{
+ prev_step = 2;
+ }
+ break;
+ case 2:
+ /* Check red-black property (1) */
+ if( !pNode->isBlack &&
+ ( (pNode->pLeft && !pNode->pLeft->isBlack) ||
+ (pNode->pRight && !pNode->pRight->isBlack) )
+ ){
+ char buf[128];
+ sprintf(buf, "Red node with red child at %p\n", pNode);
+ *msg = append_val(*msg, buf);
+ *msg = append_node(*msg, tree->pHead, 0);
+ *msg = append_val(*msg, "\n");
+ }
+
+ /* Check red-black property (2) */
+ {
+ int leftHeight = 0;
+ int rightHeight = 0;
+ if( pNode->pLeft ){
+ leftHeight += pNode->pLeft->nBlackHeight;
+ leftHeight += (pNode->pLeft->isBlack?1:0);
+ }
+ if( pNode->pRight ){
+ rightHeight += pNode->pRight->nBlackHeight;
+ rightHeight += (pNode->pRight->isBlack?1:0);
+ }
+ if( leftHeight != rightHeight ){
+ char buf[128];
+ sprintf(buf, "Different black-heights at %p\n", pNode);
+ *msg = append_val(*msg, buf);
+ *msg = append_node(*msg, tree->pHead, 0);
+ *msg = append_val(*msg, "\n");
+ }
+ pNode->nBlackHeight = leftHeight;
+ }
+
+ if( pNode->pParent ){
+ if( pNode == pNode->pParent->pLeft ) prev_step = 1;
+ else prev_step = 2;
+ }
+ pNode = pNode->pParent;
+ break;
+ default: assert(0);
+ }
+ }
+}
+
+/*
+ * Node pX has just been inserted into pTree (by code in sqliteRbtreeInsert()).
+ * It is possible that pX is a red node with a red parent, which is a violation
+ * of the red-black tree properties. This function performs rotations and
+ * color changes to rebalance the tree
+ */
+static void do_insert_balancing(BtRbTree *pTree, BtRbNode *pX)
+{
+ /* In the first iteration of this loop, pX points to the red node just
+ * inserted in the tree. If the parent of pX exists (pX is not the root
+ * node) and is red, then the properties of the red-black tree are
+ * violated.
+ *
+ * At the start of any subsequent iterations, pX points to a red node
+ * with a red parent. In all other respects the tree is a legal red-black
+ * binary tree. */
+ while( pX != pTree->pHead && !pX->pParent->isBlack ){
+ BtRbNode *pUncle;
+ BtRbNode *pGrandparent;
+
+ /* Grandparent of pX must exist and must be black. */
+ pGrandparent = pX->pParent->pParent;
+ assert( pGrandparent );
+ assert( pGrandparent->isBlack );
+
+ /* Uncle of pX may or may not exist. */
+ if( pX->pParent == pGrandparent->pLeft )
+ pUncle = pGrandparent->pRight;
+ else
+ pUncle = pGrandparent->pLeft;
+
+ /* If the uncle of pX exists and is red, we do the following:
+ * | |
+ * G(b) G(r)
+ * / \ / \
+ * U(r) P(r) U(b) P(b)
+ * \ \
+ * X(r) X(r)
+ *
+ * BEFORE AFTER
+ * pX is then set to G. If the parent of G is red, then the while loop
+ * will run again. */
+ if( pUncle && !pUncle->isBlack ){
+ pGrandparent->isBlack = 0;
+ pUncle->isBlack = 1;
+ pX->pParent->isBlack = 1;
+ pX = pGrandparent;
+ }else{
+
+ if( pX->pParent == pGrandparent->pLeft ){
+ if( pX == pX->pParent->pRight ){
+ /* If pX is a right-child, do the following transform, essentially
+ * to change pX into a left-child:
+ * | |
+ * G(b) G(b)
+ * / \ / \
+ * P(r) U(b) X(r) U(b)
+ * \ /
+ * X(r) P(r) <-- new X
+ *
+ * BEFORE AFTER
+ */
+ pX = pX->pParent;
+ leftRotate(pTree, pX);
+ }
+
+ /* Do the following transform, which balances the tree :)
+ * | |
+ * G(b) P(b)
+ * / \ / \
+ * P(r) U(b) X(r) G(r)
+ * / \
+ * X(r) U(b)
+ *
+ * BEFORE AFTER
+ */
+ assert( pGrandparent == pX->pParent->pParent );
+ pGrandparent->isBlack = 0;
+ pX->pParent->isBlack = 1;
+ rightRotate( pTree, pGrandparent );
+
+ }else{
+ /* This code is symetric to the illustrated case above. */
+ if( pX == pX->pParent->pLeft ){
+ pX = pX->pParent;
+ rightRotate(pTree, pX);
+ }
+ assert( pGrandparent == pX->pParent->pParent );
+ pGrandparent->isBlack = 0;
+ pX->pParent->isBlack = 1;
+ leftRotate( pTree, pGrandparent );
+ }
+ }
+ }
+ pTree->pHead->isBlack = 1;
+}
+
+/*
+ * A child of pParent, which in turn had child pX, has just been removed from
+ * pTree (the figure below depicts the operation, Z is being removed). pParent
+ * or pX, or both may be NULL.
+ * | |
+ * P P
+ * / \ / \
+ * Z X
+ * / \
+ * X nil
+ *
+ * This function is only called if Z was black. In this case the red-black tree
+ * properties have been violated, and pX has an "extra black". This function
+ * performs rotations and color-changes to re-balance the tree.
+ */
+static
+void do_delete_balancing(BtRbTree *pTree, BtRbNode *pX, BtRbNode *pParent)
+{
+ BtRbNode *pSib;
+
+ /* TODO: Comment this code! */
+ while( pX != pTree->pHead && (!pX || pX->isBlack) ){
+ if( pX == pParent->pLeft ){
+ pSib = pParent->pRight;
+ if( pSib && !(pSib->isBlack) ){
+ pSib->isBlack = 1;
+ pParent->isBlack = 0;
+ leftRotate(pTree, pParent);
+ pSib = pParent->pRight;
+ }
+ if( !pSib ){
+ pX = pParent;
+ }else if(
+ (!pSib->pLeft || pSib->pLeft->isBlack) &&
+ (!pSib->pRight || pSib->pRight->isBlack) ) {
+ pSib->isBlack = 0;
+ pX = pParent;
+ }else{
+ if( (!pSib->pRight || pSib->pRight->isBlack) ){
+ if( pSib->pLeft ) pSib->pLeft->isBlack = 1;
+ pSib->isBlack = 0;
+ rightRotate( pTree, pSib );
+ pSib = pParent->pRight;
+ }
+ pSib->isBlack = pParent->isBlack;
+ pParent->isBlack = 1;
+ if( pSib->pRight ) pSib->pRight->isBlack = 1;
+ leftRotate(pTree, pParent);
+ pX = pTree->pHead;
+ }
+ }else{
+ pSib = pParent->pLeft;
+ if( pSib && !(pSib->isBlack) ){
+ pSib->isBlack = 1;
+ pParent->isBlack = 0;
+ rightRotate(pTree, pParent);
+ pSib = pParent->pLeft;
+ }
+ if( !pSib ){
+ pX = pParent;
+ }else if(
+ (!pSib->pLeft || pSib->pLeft->isBlack) &&
+ (!pSib->pRight || pSib->pRight->isBlack) ){
+ pSib->isBlack = 0;
+ pX = pParent;
+ }else{
+ if( (!pSib->pLeft || pSib->pLeft->isBlack) ){
+ if( pSib->pRight ) pSib->pRight->isBlack = 1;
+ pSib->isBlack = 0;
+ leftRotate( pTree, pSib );
+ pSib = pParent->pLeft;
+ }
+ pSib->isBlack = pParent->isBlack;
+ pParent->isBlack = 1;
+ if( pSib->pLeft ) pSib->pLeft->isBlack = 1;
+ rightRotate(pTree, pParent);
+ pX = pTree->pHead;
+ }
+ }
+ pParent = pX->pParent;
+ }
+ if( pX ) pX->isBlack = 1;
+}
+
+/*
+ * Create table n in tree pRbtree. Table n must not exist.
+ */
+static void btreeCreateTable(Rbtree* pRbtree, int n)
+{
+ BtRbTree *pNewTbl = sqliteMalloc(sizeof(BtRbTree));
+ sqliteHashInsert(&pRbtree->tblHash, 0, n, pNewTbl);
+}
+
+/*
+ * Log a single "rollback-op" for the given Rbtree. See comments for struct
+ * BtRollbackOp.
+ */
+static void btreeLogRollbackOp(Rbtree* pRbtree, BtRollbackOp *pRollbackOp)
+{
+ assert( pRbtree->eTransState == TRANS_INCHECKPOINT ||
+ pRbtree->eTransState == TRANS_INTRANSACTION );
+ if( pRbtree->eTransState == TRANS_INTRANSACTION ){
+ pRollbackOp->pNext = pRbtree->pTransRollback;
+ pRbtree->pTransRollback = pRollbackOp;
+ }
+ if( pRbtree->eTransState == TRANS_INCHECKPOINT ){
+ if( !pRbtree->pCheckRollback ){
+ pRbtree->pCheckRollbackTail = pRollbackOp;
+ }
+ pRollbackOp->pNext = pRbtree->pCheckRollback;
+ pRbtree->pCheckRollback = pRollbackOp;
+ }
+}
+
+int sqliteRbtreeOpen(
+ const char *zFilename,
+ int mode,
+ int nPg,
+ Btree **ppBtree
+){
+ Rbtree **ppRbtree = (Rbtree**)ppBtree;
+ *ppRbtree = (Rbtree *)sqliteMalloc(sizeof(Rbtree));
+ if( sqlite_malloc_failed ) goto open_no_mem;
+ sqliteHashInit(&(*ppRbtree)->tblHash, SQLITE_HASH_INT, 0);
+
+ /* Create a binary tree for the SQLITE_MASTER table at location 2 */
+ btreeCreateTable(*ppRbtree, 2);
+ if( sqlite_malloc_failed ) goto open_no_mem;
+ (*ppRbtree)->next_idx = 3;
+ (*ppRbtree)->pOps = &sqliteRbtreeOps;
+ /* Set file type to 4; this is so that "attach ':memory:' as ...." does not
+ ** think that the database in uninitialised and refuse to attach
+ */
+ (*ppRbtree)->aMetaData[2] = 4;
+
+ return SQLITE_OK;
+
+open_no_mem:
+ *ppBtree = 0;
+ return SQLITE_NOMEM;
+}
+
+/*
+ * Create a new table in the supplied Rbtree. Set *n to the new table number.
+ * Return SQLITE_OK if the operation is a success.
+ */
+static int memRbtreeCreateTable(Rbtree* tree, int* n)
+{
+ assert( tree->eTransState != TRANS_NONE );
+
+ *n = tree->next_idx++;
+ btreeCreateTable(tree, *n);
+ if( sqlite_malloc_failed ) return SQLITE_NOMEM;
+
+ /* Set up the rollback structure (if we are not doing this as part of a
+ * rollback) */
+ if( tree->eTransState != TRANS_ROLLBACK ){
+ BtRollbackOp *pRollbackOp = sqliteMalloc(sizeof(BtRollbackOp));
+ if( pRollbackOp==0 ) return SQLITE_NOMEM;
+ pRollbackOp->eOp = ROLLBACK_DROP;
+ pRollbackOp->iTab = *n;
+ btreeLogRollbackOp(tree, pRollbackOp);
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+ * Delete table n from the supplied Rbtree.
+ */
+static int memRbtreeDropTable(Rbtree* tree, int n)
+{
+ BtRbTree *pTree;
+ assert( tree->eTransState != TRANS_NONE );
+
+ memRbtreeClearTable(tree, n);
+ pTree = sqliteHashInsert(&tree->tblHash, 0, n, 0);
+ assert(pTree);
+ assert( pTree->pCursors==0 );
+ sqliteFree(pTree);
+
+ if( tree->eTransState != TRANS_ROLLBACK ){
+ BtRollbackOp *pRollbackOp = sqliteMalloc(sizeof(BtRollbackOp));
+ if( pRollbackOp==0 ) return SQLITE_NOMEM;
+ pRollbackOp->eOp = ROLLBACK_CREATE;
+ pRollbackOp->iTab = n;
+ btreeLogRollbackOp(tree, pRollbackOp);
+ }
+
+ return SQLITE_OK;
+}
+
+static int memRbtreeKeyCompare(RbtCursor* pCur, const void *pKey, int nKey,
+ int nIgnore, int *pRes)
+{
+ assert(pCur);
+
+ if( !pCur->pNode ) {
+ *pRes = -1;
+ } else {
+ if( (pCur->pNode->nKey - nIgnore) < 0 ){
+ *pRes = -1;
+ }else{
+ *pRes = key_compare(pCur->pNode->pKey, pCur->pNode->nKey-nIgnore,
+ pKey, nKey);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+ * Get a new cursor for table iTable of the supplied Rbtree. The wrFlag
+ * parameter indicates that the cursor is open for writing.
+ *
+ * Note that RbtCursor.eSkip and RbtCursor.pNode both initialize to 0.
+ */
+static int memRbtreeCursor(
+ Rbtree* tree,
+ int iTable,
+ int wrFlag,
+ RbtCursor **ppCur
+){
+ RbtCursor *pCur;
+ assert(tree);
+ pCur = *ppCur = sqliteMalloc(sizeof(RbtCursor));
+ if( sqlite_malloc_failed ) return SQLITE_NOMEM;
+ pCur->pTree = sqliteHashFind(&tree->tblHash, 0, iTable);
+ assert( pCur->pTree );
+ pCur->pRbtree = tree;
+ pCur->iTree = iTable;
+ pCur->pOps = &sqliteRbtreeCursorOps;
+ pCur->wrFlag = wrFlag;
+ pCur->pShared = pCur->pTree->pCursors;
+ pCur->pTree->pCursors = pCur;
+
+ assert( (*ppCur)->pTree );
+ return SQLITE_OK;
+}
+
+/*
+ * Insert a new record into the Rbtree. The key is given by (pKey,nKey)
+ * and the data is given by (pData,nData). The cursor is used only to
+ * define what database the record should be inserted into. The cursor
+ * is left pointing at the new record.
+ *
+ * If the key exists already in the tree, just replace the data.
+ */
+static int memRbtreeInsert(
+ RbtCursor* pCur,
+ const void *pKey,
+ int nKey,
+ const void *pDataInput,
+ int nData
+){
+ void * pData;
+ int match;
+
+ /* It is illegal to call sqliteRbtreeInsert() if we are
+ ** not in a transaction */
+ assert( pCur->pRbtree->eTransState != TRANS_NONE );
+
+ /* Make sure some other cursor isn't trying to read this same table */
+ if( checkReadLocks(pCur) ){
+ return SQLITE_LOCKED; /* The table pCur points to has a read lock */
+ }
+
+ /* Take a copy of the input data now, in case we need it for the
+ * replace case */
+ pData = sqliteMallocRaw(nData);
+ if( sqlite_malloc_failed ) return SQLITE_NOMEM;
+ memcpy(pData, pDataInput, nData);
+
+ /* Move the cursor to a node near the key to be inserted. If the key already
+ * exists in the table, then (match == 0). In this case we can just replace
+ * the data associated with the entry, we don't need to manipulate the tree.
+ *
+ * If there is no exact match, then the cursor points at what would be either
+ * the predecessor (match == -1) or successor (match == 1) of the
+ * searched-for key, were it to be inserted. The new node becomes a child of
+ * this node.
+ *
+ * The new node is initially red.
+ */
+ memRbtreeMoveto( pCur, pKey, nKey, &match);
+ if( match ){
+ BtRbNode *pNode = sqliteMalloc(sizeof(BtRbNode));
+ if( pNode==0 ) return SQLITE_NOMEM;
+ pNode->nKey = nKey;
+ pNode->pKey = sqliteMallocRaw(nKey);
+ if( sqlite_malloc_failed ) return SQLITE_NOMEM;
+ memcpy(pNode->pKey, pKey, nKey);
+ pNode->nData = nData;
+ pNode->pData = pData;
+ if( pCur->pNode ){
+ switch( match ){
+ case -1:
+ assert( !pCur->pNode->pRight );
+ pNode->pParent = pCur->pNode;
+ pCur->pNode->pRight = pNode;
+ break;
+ case 1:
+ assert( !pCur->pNode->pLeft );
+ pNode->pParent = pCur->pNode;
+ pCur->pNode->pLeft = pNode;
+ break;
+ default:
+ assert(0);
+ }
+ }else{
+ pCur->pTree->pHead = pNode;
+ }
+
+ /* Point the cursor at the node just inserted, as per SQLite requirements */
+ pCur->pNode = pNode;
+
+ /* A new node has just been inserted, so run the balancing code */
+ do_insert_balancing(pCur->pTree, pNode);
+
+ /* Set up a rollback-op in case we have to roll this operation back */
+ if( pCur->pRbtree->eTransState != TRANS_ROLLBACK ){
+ BtRollbackOp *pOp = sqliteMalloc( sizeof(BtRollbackOp) );
+ if( pOp==0 ) return SQLITE_NOMEM;
+ pOp->eOp = ROLLBACK_DELETE;
+ pOp->iTab = pCur->iTree;
+ pOp->nKey = pNode->nKey;
+ pOp->pKey = sqliteMallocRaw( pOp->nKey );
+ if( sqlite_malloc_failed ) return SQLITE_NOMEM;
+ memcpy( pOp->pKey, pNode->pKey, pOp->nKey );
+ btreeLogRollbackOp(pCur->pRbtree, pOp);
+ }
+
+ }else{
+ /* No need to insert a new node in the tree, as the key already exists.
+ * Just clobber the current nodes data. */
+
+ /* Set up a rollback-op in case we have to roll this operation back */
+ if( pCur->pRbtree->eTransState != TRANS_ROLLBACK ){
+ BtRollbackOp *pOp = sqliteMalloc( sizeof(BtRollbackOp) );
+ if( pOp==0 ) return SQLITE_NOMEM;
+ pOp->iTab = pCur->iTree;
+ pOp->nKey = pCur->pNode->nKey;
+ pOp->pKey = sqliteMallocRaw( pOp->nKey );
+ if( sqlite_malloc_failed ) return SQLITE_NOMEM;
+ memcpy( pOp->pKey, pCur->pNode->pKey, pOp->nKey );
+ pOp->nData = pCur->pNode->nData;
+ pOp->pData = pCur->pNode->pData;
+ pOp->eOp = ROLLBACK_INSERT;
+ btreeLogRollbackOp(pCur->pRbtree, pOp);
+ }else{
+ sqliteFree( pCur->pNode->pData );
+ }
+
+ /* Actually clobber the nodes data */
+ pCur->pNode->pData = pData;
+ pCur->pNode->nData = nData;
+ }
+
+ return SQLITE_OK;
+}
+
+/* Move the cursor so that it points to an entry near pKey.
+** Return a success code.
+**
+** *pRes<0 The cursor is left pointing at an entry that
+** is smaller than pKey or if the table is empty
+** and the cursor is therefore left point to nothing.
+**
+** *pRes==0 The cursor is left pointing at an entry that
+** exactly matches pKey.
+**
+** *pRes>0 The cursor is left pointing at an entry that
+** is larger than pKey.
+*/
+static int memRbtreeMoveto(
+ RbtCursor* pCur,
+ const void *pKey,
+ int nKey,
+ int *pRes
+){
+ BtRbNode *pTmp = 0;
+
+ pCur->pNode = pCur->pTree->pHead;
+ *pRes = -1;
+ while( pCur->pNode && *pRes ) {
+ *pRes = key_compare(pCur->pNode->pKey, pCur->pNode->nKey, pKey, nKey);
+ pTmp = pCur->pNode;
+ switch( *pRes ){
+ case 1: /* cursor > key */
+ pCur->pNode = pCur->pNode->pLeft;
+ break;
+ case -1: /* cursor < key */
+ pCur->pNode = pCur->pNode->pRight;
+ break;
+ }
+ }
+
+ /* If (pCur->pNode == NULL), then we have failed to find a match. Set
+ * pCur->pNode to pTmp, which is either NULL (if the tree is empty) or the
+ * last node traversed in the search. In either case the relation ship
+ * between pTmp and the searched for key is already stored in *pRes. pTmp is
+ * either the successor or predecessor of the key we tried to move to. */
+ if( !pCur->pNode ) pCur->pNode = pTmp;
+ pCur->eSkip = SKIP_NONE;
+
+ return SQLITE_OK;
+}
+
+
+/*
+** Delete the entry that the cursor is pointing to.
+**
+** The cursor is left pointing at either the next or the previous
+** entry. If the cursor is left pointing to the next entry, then
+** the pCur->eSkip flag is set to SKIP_NEXT which forces the next call to
+** sqliteRbtreeNext() to be a no-op. That way, you can always call
+** sqliteRbtreeNext() after a delete and the cursor will be left
+** pointing to the first entry after the deleted entry. Similarly,
+** pCur->eSkip is set to SKIP_PREV is the cursor is left pointing to
+** the entry prior to the deleted entry so that a subsequent call to
+** sqliteRbtreePrevious() will always leave the cursor pointing at the
+** entry immediately before the one that was deleted.
+*/
+static int memRbtreeDelete(RbtCursor* pCur)
+{
+ BtRbNode *pZ; /* The one being deleted */
+ BtRbNode *pChild; /* The child of the spliced out node */
+
+ /* It is illegal to call sqliteRbtreeDelete() if we are
+ ** not in a transaction */
+ assert( pCur->pRbtree->eTransState != TRANS_NONE );
+
+ /* Make sure some other cursor isn't trying to read this same table */
+ if( checkReadLocks(pCur) ){
+ return SQLITE_LOCKED; /* The table pCur points to has a read lock */
+ }
+
+ pZ = pCur->pNode;
+ if( !pZ ){
+ return SQLITE_OK;
+ }
+
+ /* If we are not currently doing a rollback, set up a rollback op for this
+ * deletion */
+ if( pCur->pRbtree->eTransState != TRANS_ROLLBACK ){
+ BtRollbackOp *pOp = sqliteMalloc( sizeof(BtRollbackOp) );
+ if( pOp==0 ) return SQLITE_NOMEM;
+ pOp->iTab = pCur->iTree;
+ pOp->nKey = pZ->nKey;
+ pOp->pKey = pZ->pKey;
+ pOp->nData = pZ->nData;
+ pOp->pData = pZ->pData;
+ pOp->eOp = ROLLBACK_INSERT;
+ btreeLogRollbackOp(pCur->pRbtree, pOp);
+ }
+
+ /* First do a standard binary-tree delete (node pZ is to be deleted). How
+ * to do this depends on how many children pZ has:
+ *
+ * If pZ has no children or one child, then splice out pZ. If pZ has two
+ * children, splice out the successor of pZ and replace the key and data of
+ * pZ with the key and data of the spliced out successor. */
+ if( pZ->pLeft && pZ->pRight ){
+ BtRbNode *pTmp;
+ int dummy;
+ pCur->eSkip = SKIP_NONE;
+ memRbtreeNext(pCur, &dummy);
+ assert( dummy == 0 );
+ if( pCur->pRbtree->eTransState == TRANS_ROLLBACK ){
+ sqliteFree(pZ->pKey);
+ sqliteFree(pZ->pData);
+ }
+ pZ->pData = pCur->pNode->pData;
+ pZ->nData = pCur->pNode->nData;
+ pZ->pKey = pCur->pNode->pKey;
+ pZ->nKey = pCur->pNode->nKey;
+ pTmp = pZ;
+ pZ = pCur->pNode;
+ pCur->pNode = pTmp;
+ pCur->eSkip = SKIP_NEXT;
+ }else{
+ int res;
+ pCur->eSkip = SKIP_NONE;
+ memRbtreeNext(pCur, &res);
+ pCur->eSkip = SKIP_NEXT;
+ if( res ){
+ memRbtreeLast(pCur, &res);
+ memRbtreePrevious(pCur, &res);
+ pCur->eSkip = SKIP_PREV;
+ }
+ if( pCur->pRbtree->eTransState == TRANS_ROLLBACK ){
+ sqliteFree(pZ->pKey);
+ sqliteFree(pZ->pData);
+ }
+ }
+
+ /* pZ now points at the node to be spliced out. This block does the
+ * splicing. */
+ {
+ BtRbNode **ppParentSlot = 0;
+ assert( !pZ->pLeft || !pZ->pRight ); /* pZ has at most one child */
+ pChild = ((pZ->pLeft)?pZ->pLeft:pZ->pRight);
+ if( pZ->pParent ){
+ assert( pZ == pZ->pParent->pLeft || pZ == pZ->pParent->pRight );
+ ppParentSlot = ((pZ == pZ->pParent->pLeft)
+ ?&pZ->pParent->pLeft:&pZ->pParent->pRight);
+ *ppParentSlot = pChild;
+ }else{
+ pCur->pTree->pHead = pChild;
+ }
+ if( pChild ) pChild->pParent = pZ->pParent;
+ }
+
+ /* pZ now points at the spliced out node. pChild is the only child of pZ, or
+ * NULL if pZ has no children. If pZ is black, and not the tree root, then we
+ * will have violated the "same number of black nodes in every path to a
+ * leaf" property of the red-black tree. The code in do_delete_balancing()
+ * repairs this. */
+ if( pZ->isBlack ){
+ do_delete_balancing(pCur->pTree, pChild, pZ->pParent);
+ }
+
+ sqliteFree(pZ);
+ return SQLITE_OK;
+}
+
+/*
+ * Empty table n of the Rbtree.
+ */
+static int memRbtreeClearTable(Rbtree* tree, int n)
+{
+ BtRbTree *pTree;
+ BtRbNode *pNode;
+
+ pTree = sqliteHashFind(&tree->tblHash, 0, n);
+ assert(pTree);
+
+ pNode = pTree->pHead;
+ while( pNode ){
+ if( pNode->pLeft ){
+ pNode = pNode->pLeft;
+ }
+ else if( pNode->pRight ){
+ pNode = pNode->pRight;
+ }
+ else {
+ BtRbNode *pTmp = pNode->pParent;
+ if( tree->eTransState == TRANS_ROLLBACK ){
+ sqliteFree( pNode->pKey );
+ sqliteFree( pNode->pData );
+ }else{
+ BtRollbackOp *pRollbackOp = sqliteMallocRaw(sizeof(BtRollbackOp));
+ if( pRollbackOp==0 ) return SQLITE_NOMEM;
+ pRollbackOp->eOp = ROLLBACK_INSERT;
+ pRollbackOp->iTab = n;
+ pRollbackOp->nKey = pNode->nKey;
+ pRollbackOp->pKey = pNode->pKey;
+ pRollbackOp->nData = pNode->nData;
+ pRollbackOp->pData = pNode->pData;
+ btreeLogRollbackOp(tree, pRollbackOp);
+ }
+ sqliteFree( pNode );
+ if( pTmp ){
+ if( pTmp->pLeft == pNode ) pTmp->pLeft = 0;
+ else if( pTmp->pRight == pNode ) pTmp->pRight = 0;
+ }
+ pNode = pTmp;
+ }
+ }
+
+ pTree->pHead = 0;
+ return SQLITE_OK;
+}
+
+static int memRbtreeFirst(RbtCursor* pCur, int *pRes)
+{
+ if( pCur->pTree->pHead ){
+ pCur->pNode = pCur->pTree->pHead;
+ while( pCur->pNode->pLeft ){
+ pCur->pNode = pCur->pNode->pLeft;
+ }
+ }
+ if( pCur->pNode ){
+ *pRes = 0;
+ }else{
+ *pRes = 1;
+ }
+ pCur->eSkip = SKIP_NONE;
+ return SQLITE_OK;
+}
+
+static int memRbtreeLast(RbtCursor* pCur, int *pRes)
+{
+ if( pCur->pTree->pHead ){
+ pCur->pNode = pCur->pTree->pHead;
+ while( pCur->pNode->pRight ){
+ pCur->pNode = pCur->pNode->pRight;
+ }
+ }
+ if( pCur->pNode ){
+ *pRes = 0;
+ }else{
+ *pRes = 1;
+ }
+ pCur->eSkip = SKIP_NONE;
+ return SQLITE_OK;
+}
+
+/*
+** Advance the cursor to the next entry in the database. If
+** successful then set *pRes=0. If the cursor
+** was already pointing to the last entry in the database before
+** this routine was called, then set *pRes=1.
+*/
+static int memRbtreeNext(RbtCursor* pCur, int *pRes)
+{
+ if( pCur->pNode && pCur->eSkip != SKIP_NEXT ){
+ if( pCur->pNode->pRight ){
+ pCur->pNode = pCur->pNode->pRight;
+ while( pCur->pNode->pLeft )
+ pCur->pNode = pCur->pNode->pLeft;
+ }else{
+ BtRbNode * pX = pCur->pNode;
+ pCur->pNode = pX->pParent;
+ while( pCur->pNode && (pCur->pNode->pRight == pX) ){
+ pX = pCur->pNode;
+ pCur->pNode = pX->pParent;
+ }
+ }
+ }
+ pCur->eSkip = SKIP_NONE;
+
+ if( !pCur->pNode ){
+ *pRes = 1;
+ }else{
+ *pRes = 0;
+ }
+
+ return SQLITE_OK;
+}
+
+static int memRbtreePrevious(RbtCursor* pCur, int *pRes)
+{
+ if( pCur->pNode && pCur->eSkip != SKIP_PREV ){
+ if( pCur->pNode->pLeft ){
+ pCur->pNode = pCur->pNode->pLeft;
+ while( pCur->pNode->pRight )
+ pCur->pNode = pCur->pNode->pRight;
+ }else{
+ BtRbNode * pX = pCur->pNode;
+ pCur->pNode = pX->pParent;
+ while( pCur->pNode && (pCur->pNode->pLeft == pX) ){
+ pX = pCur->pNode;
+ pCur->pNode = pX->pParent;
+ }
+ }
+ }
+ pCur->eSkip = SKIP_NONE;
+
+ if( !pCur->pNode ){
+ *pRes = 1;
+ }else{
+ *pRes = 0;
+ }
+
+ return SQLITE_OK;
+}
+
+static int memRbtreeKeySize(RbtCursor* pCur, int *pSize)
+{
+ if( pCur->pNode ){
+ *pSize = pCur->pNode->nKey;
+ }else{
+ *pSize = 0;
+ }
+ return SQLITE_OK;
+}
+
+static int memRbtreeKey(RbtCursor* pCur, int offset, int amt, char *zBuf)
+{
+ if( !pCur->pNode ) return 0;
+ if( !pCur->pNode->pKey || ((amt + offset) <= pCur->pNode->nKey) ){
+ memcpy(zBuf, ((char*)pCur->pNode->pKey)+offset, amt);
+ }else{
+ memcpy(zBuf, ((char*)pCur->pNode->pKey)+offset, pCur->pNode->nKey-offset);
+ amt = pCur->pNode->nKey-offset;
+ }
+ return amt;
+}
+
+static int memRbtreeDataSize(RbtCursor* pCur, int *pSize)
+{
+ if( pCur->pNode ){
+ *pSize = pCur->pNode->nData;
+ }else{
+ *pSize = 0;
+ }
+ return SQLITE_OK;
+}
+
+static int memRbtreeData(RbtCursor *pCur, int offset, int amt, char *zBuf)
+{
+ if( !pCur->pNode ) return 0;
+ if( (amt + offset) <= pCur->pNode->nData ){
+ memcpy(zBuf, ((char*)pCur->pNode->pData)+offset, amt);
+ }else{
+ memcpy(zBuf, ((char*)pCur->pNode->pData)+offset ,pCur->pNode->nData-offset);
+ amt = pCur->pNode->nData-offset;
+ }
+ return amt;
+}
+
+static int memRbtreeCloseCursor(RbtCursor* pCur)
+{
+ if( pCur->pTree->pCursors==pCur ){
+ pCur->pTree->pCursors = pCur->pShared;
+ }else{
+ RbtCursor *p = pCur->pTree->pCursors;
+ while( p && p->pShared!=pCur ){ p = p->pShared; }
+ assert( p!=0 );
+ if( p ){
+ p->pShared = pCur->pShared;
+ }
+ }
+ sqliteFree(pCur);
+ return SQLITE_OK;
+}
+
+static int memRbtreeGetMeta(Rbtree* tree, int* aMeta)
+{
+ memcpy( aMeta, tree->aMetaData, sizeof(int) * SQLITE_N_BTREE_META );
+ return SQLITE_OK;
+}
+
+static int memRbtreeUpdateMeta(Rbtree* tree, int* aMeta)
+{
+ memcpy( tree->aMetaData, aMeta, sizeof(int) * SQLITE_N_BTREE_META );
+ return SQLITE_OK;
+}
+
+/*
+ * Check that each table in the Rbtree meets the requirements for a red-black
+ * binary tree. If an error is found, return an explanation of the problem in
+ * memory obtained from sqliteMalloc(). Parameters aRoot and nRoot are ignored.
+ */
+static char *memRbtreeIntegrityCheck(Rbtree* tree, int* aRoot, int nRoot)
+{
+ char * msg = 0;
+ HashElem *p;
+
+ for(p=sqliteHashFirst(&tree->tblHash); p; p=sqliteHashNext(p)){
+ BtRbTree *pTree = sqliteHashData(p);
+ check_redblack_tree(pTree, &msg);
+ }
+
+ return msg;
+}
+
+static int memRbtreeSetCacheSize(Rbtree* tree, int sz)
+{
+ return SQLITE_OK;
+}
+
+static int memRbtreeSetSafetyLevel(Rbtree *pBt, int level){
+ return SQLITE_OK;
+}
+
+static int memRbtreeBeginTrans(Rbtree* tree)
+{
+ if( tree->eTransState != TRANS_NONE )
+ return SQLITE_ERROR;
+
+ assert( tree->pTransRollback == 0 );
+ tree->eTransState = TRANS_INTRANSACTION;
+ return SQLITE_OK;
+}
+
+/*
+** Delete a linked list of BtRollbackOp structures.
+*/
+static void deleteRollbackList(BtRollbackOp *pOp){
+ while( pOp ){
+ BtRollbackOp *pTmp = pOp->pNext;
+ sqliteFree(pOp->pData);
+ sqliteFree(pOp->pKey);
+ sqliteFree(pOp);
+ pOp = pTmp;
+ }
+}
+
+static int memRbtreeCommit(Rbtree* tree){
+ /* Just delete pTransRollback and pCheckRollback */
+ deleteRollbackList(tree->pCheckRollback);
+ deleteRollbackList(tree->pTransRollback);
+ tree->pTransRollback = 0;
+ tree->pCheckRollback = 0;
+ tree->pCheckRollbackTail = 0;
+ tree->eTransState = TRANS_NONE;
+ return SQLITE_OK;
+}
+
+/*
+ * Close the supplied Rbtree. Delete everything associated with it.
+ */
+static int memRbtreeClose(Rbtree* tree)
+{
+ HashElem *p;
+ memRbtreeCommit(tree);
+ while( (p=sqliteHashFirst(&tree->tblHash))!=0 ){
+ tree->eTransState = TRANS_ROLLBACK;
+ memRbtreeDropTable(tree, sqliteHashKeysize(p));
+ }
+ sqliteHashClear(&tree->tblHash);
+ sqliteFree(tree);
+ return SQLITE_OK;
+}
+
+/*
+ * Execute and delete the supplied rollback-list on pRbtree.
+ */
+static void execute_rollback_list(Rbtree *pRbtree, BtRollbackOp *pList)
+{
+ BtRollbackOp *pTmp;
+ RbtCursor cur;
+ int res;
+
+ cur.pRbtree = pRbtree;
+ cur.wrFlag = 1;
+ while( pList ){
+ switch( pList->eOp ){
+ case ROLLBACK_INSERT:
+ cur.pTree = sqliteHashFind( &pRbtree->tblHash, 0, pList->iTab );
+ assert(cur.pTree);
+ cur.iTree = pList->iTab;
+ cur.eSkip = SKIP_NONE;
+ memRbtreeInsert( &cur, pList->pKey,
+ pList->nKey, pList->pData, pList->nData );
+ break;
+ case ROLLBACK_DELETE:
+ cur.pTree = sqliteHashFind( &pRbtree->tblHash, 0, pList->iTab );
+ assert(cur.pTree);
+ cur.iTree = pList->iTab;
+ cur.eSkip = SKIP_NONE;
+ memRbtreeMoveto(&cur, pList->pKey, pList->nKey, &res);
+ assert(res == 0);
+ memRbtreeDelete( &cur );
+ break;
+ case ROLLBACK_CREATE:
+ btreeCreateTable(pRbtree, pList->iTab);
+ break;
+ case ROLLBACK_DROP:
+ memRbtreeDropTable(pRbtree, pList->iTab);
+ break;
+ default:
+ assert(0);
+ }
+ sqliteFree(pList->pKey);
+ sqliteFree(pList->pData);
+ pTmp = pList->pNext;
+ sqliteFree(pList);
+ pList = pTmp;
+ }
+}
+
+static int memRbtreeRollback(Rbtree* tree)
+{
+ tree->eTransState = TRANS_ROLLBACK;
+ execute_rollback_list(tree, tree->pCheckRollback);
+ execute_rollback_list(tree, tree->pTransRollback);
+ tree->pTransRollback = 0;
+ tree->pCheckRollback = 0;
+ tree->pCheckRollbackTail = 0;
+ tree->eTransState = TRANS_NONE;
+ return SQLITE_OK;
+}
+
+static int memRbtreeBeginCkpt(Rbtree* tree)
+{
+ if( tree->eTransState != TRANS_INTRANSACTION )
+ return SQLITE_ERROR;
+
+ assert( tree->pCheckRollback == 0 );
+ assert( tree->pCheckRollbackTail == 0 );
+ tree->eTransState = TRANS_INCHECKPOINT;
+ return SQLITE_OK;
+}
+
+static int memRbtreeCommitCkpt(Rbtree* tree)
+{
+ if( tree->eTransState == TRANS_INCHECKPOINT ){
+ if( tree->pCheckRollback ){
+ tree->pCheckRollbackTail->pNext = tree->pTransRollback;
+ tree->pTransRollback = tree->pCheckRollback;
+ tree->pCheckRollback = 0;
+ tree->pCheckRollbackTail = 0;
+ }
+ tree->eTransState = TRANS_INTRANSACTION;
+ }
+ return SQLITE_OK;
+}
+
+static int memRbtreeRollbackCkpt(Rbtree* tree)
+{
+ if( tree->eTransState != TRANS_INCHECKPOINT ) return SQLITE_OK;
+ tree->eTransState = TRANS_ROLLBACK;
+ execute_rollback_list(tree, tree->pCheckRollback);
+ tree->pCheckRollback = 0;
+ tree->pCheckRollbackTail = 0;
+ tree->eTransState = TRANS_INTRANSACTION;
+ return SQLITE_OK;
+}
+
+#ifdef SQLITE_TEST
+static int memRbtreePageDump(Rbtree* tree, int pgno, int rec)
+{
+ assert(!"Cannot call sqliteRbtreePageDump");
+ return SQLITE_OK;
+}
+
+static int memRbtreeCursorDump(RbtCursor* pCur, int* aRes)
+{
+ assert(!"Cannot call sqliteRbtreeCursorDump");
+ return SQLITE_OK;
+}
+#endif
+
+static struct Pager *memRbtreePager(Rbtree* tree)
+{
+ return 0;
+}
+
+/*
+** Return the full pathname of the underlying database file.
+*/
+static const char *memRbtreeGetFilename(Rbtree *pBt){
+ return 0; /* A NULL return indicates there is no underlying file */
+}
+
+/*
+** The copy file function is not implemented for the in-memory database
+*/
+static int memRbtreeCopyFile(Rbtree *pBt, Rbtree *pBt2){
+ return SQLITE_INTERNAL; /* Not implemented */
+}
+
+static BtOps sqliteRbtreeOps = {
+ (int(*)(Btree*)) memRbtreeClose,
+ (int(*)(Btree*,int)) memRbtreeSetCacheSize,
+ (int(*)(Btree*,int)) memRbtreeSetSafetyLevel,
+ (int(*)(Btree*)) memRbtreeBeginTrans,
+ (int(*)(Btree*)) memRbtreeCommit,
+ (int(*)(Btree*)) memRbtreeRollback,
+ (int(*)(Btree*)) memRbtreeBeginCkpt,
+ (int(*)(Btree*)) memRbtreeCommitCkpt,
+ (int(*)(Btree*)) memRbtreeRollbackCkpt,
+ (int(*)(Btree*,int*)) memRbtreeCreateTable,
+ (int(*)(Btree*,int*)) memRbtreeCreateTable,
+ (int(*)(Btree*,int)) memRbtreeDropTable,
+ (int(*)(Btree*,int)) memRbtreeClearTable,
+ (int(*)(Btree*,int,int,BtCursor**)) memRbtreeCursor,
+ (int(*)(Btree*,int*)) memRbtreeGetMeta,
+ (int(*)(Btree*,int*)) memRbtreeUpdateMeta,
+ (char*(*)(Btree*,int*,int)) memRbtreeIntegrityCheck,
+ (const char*(*)(Btree*)) memRbtreeGetFilename,
+ (int(*)(Btree*,Btree*)) memRbtreeCopyFile,
+ (struct Pager*(*)(Btree*)) memRbtreePager,
+#ifdef SQLITE_TEST
+ (int(*)(Btree*,int,int)) memRbtreePageDump,
+#endif
+};
+
+static BtCursorOps sqliteRbtreeCursorOps = {
+ (int(*)(BtCursor*,const void*,int,int*)) memRbtreeMoveto,
+ (int(*)(BtCursor*)) memRbtreeDelete,
+ (int(*)(BtCursor*,const void*,int,const void*,int)) memRbtreeInsert,
+ (int(*)(BtCursor*,int*)) memRbtreeFirst,
+ (int(*)(BtCursor*,int*)) memRbtreeLast,
+ (int(*)(BtCursor*,int*)) memRbtreeNext,
+ (int(*)(BtCursor*,int*)) memRbtreePrevious,
+ (int(*)(BtCursor*,int*)) memRbtreeKeySize,
+ (int(*)(BtCursor*,int,int,char*)) memRbtreeKey,
+ (int(*)(BtCursor*,const void*,int,int,int*)) memRbtreeKeyCompare,
+ (int(*)(BtCursor*,int*)) memRbtreeDataSize,
+ (int(*)(BtCursor*,int,int,char*)) memRbtreeData,
+ (int(*)(BtCursor*)) memRbtreeCloseCursor,
+#ifdef SQLITE_TEST
+ (int(*)(BtCursor*,int*)) memRbtreeCursorDump,
+#endif
+
+};
+
+#endif /* SQLITE_OMIT_INMEMORYDB */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/build.c b/usr/src/cmd/svc/configd/sqlite/src/build.c
new file mode 100644
index 0000000000..4de92ef15b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/build.c
@@ -0,0 +1,2159 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the SQLite parser
+** when syntax rules are reduced. The routines in this file handle the
+** following kinds of SQL syntax:
+**
+** CREATE TABLE
+** DROP TABLE
+** CREATE INDEX
+** DROP INDEX
+** creating ID lists
+** BEGIN TRANSACTION
+** COMMIT
+** ROLLBACK
+** PRAGMA
+**
+** $Id: build.c,v 1.176.2.2 2004/07/20 00:50:30 drh Exp $
+*/
+#include "sqliteInt.h"
+#include <ctype.h>
+
+/*
+** This routine is called when a new SQL statement is beginning to
+** be parsed. Check to see if the schema for the database needs
+** to be read from the SQLITE_MASTER and SQLITE_TEMP_MASTER tables.
+** If it does, then read it.
+*/
+void sqliteBeginParse(Parse *pParse, int explainFlag){
+ sqlite *db = pParse->db;
+ int i;
+ pParse->explain = explainFlag;
+ if((db->flags & SQLITE_Initialized)==0 && db->init.busy==0 ){
+ int rc = sqliteInit(db, &pParse->zErrMsg);
+ if( rc!=SQLITE_OK ){
+ pParse->rc = rc;
+ pParse->nErr++;
+ }
+ }
+ for(i=0; i<db->nDb; i++){
+ DbClearProperty(db, i, DB_Locked);
+ if( !db->aDb[i].inTrans ){
+ DbClearProperty(db, i, DB_Cookie);
+ }
+ }
+ pParse->nVar = 0;
+}
+
+/*
+** This routine is called after a single SQL statement has been
+** parsed and we want to execute the VDBE code to implement
+** that statement. Prior action routines should have already
+** constructed VDBE code to do the work of the SQL statement.
+** This routine just has to execute the VDBE code.
+**
+** Note that if an error occurred, it might be the case that
+** no VDBE code was generated.
+*/
+void sqliteExec(Parse *pParse){
+ sqlite *db = pParse->db;
+ Vdbe *v = pParse->pVdbe;
+
+ if( v==0 && (v = sqliteGetVdbe(pParse))!=0 ){
+ sqliteVdbeAddOp(v, OP_Halt, 0, 0);
+ }
+ if( sqlite_malloc_failed ) return;
+ if( v && pParse->nErr==0 ){
+ FILE *trace = (db->flags & SQLITE_VdbeTrace)!=0 ? stdout : 0;
+ sqliteVdbeTrace(v, trace);
+ sqliteVdbeMakeReady(v, pParse->nVar, pParse->explain);
+ pParse->rc = pParse->nErr ? SQLITE_ERROR : SQLITE_DONE;
+ pParse->colNamesSet = 0;
+ }else if( pParse->rc==SQLITE_OK ){
+ pParse->rc = SQLITE_ERROR;
+ }
+ pParse->nTab = 0;
+ pParse->nMem = 0;
+ pParse->nSet = 0;
+ pParse->nAgg = 0;
+ pParse->nVar = 0;
+}
+
+/*
+** Locate the in-memory structure that describes
+** a particular database table given the name
+** of that table and (optionally) the name of the database
+** containing the table. Return NULL if not found.
+**
+** If zDatabase is 0, all databases are searched for the
+** table and the first matching table is returned. (No checking
+** for duplicate table names is done.) The search order is
+** TEMP first, then MAIN, then any auxiliary databases added
+** using the ATTACH command.
+**
+** See also sqliteLocateTable().
+*/
+Table *sqliteFindTable(sqlite *db, const char *zName, const char *zDatabase){
+ Table *p = 0;
+ int i;
+ for(i=0; i<db->nDb; i++){
+ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
+ if( zDatabase!=0 && sqliteStrICmp(zDatabase, db->aDb[j].zName) ) continue;
+ p = sqliteHashFind(&db->aDb[j].tblHash, zName, strlen(zName)+1);
+ if( p ) break;
+ }
+ return p;
+}
+
+/*
+** Locate the in-memory structure that describes
+** a particular database table given the name
+** of that table and (optionally) the name of the database
+** containing the table. Return NULL if not found.
+** Also leave an error message in pParse->zErrMsg.
+**
+** The difference between this routine and sqliteFindTable()
+** is that this routine leaves an error message in pParse->zErrMsg
+** where sqliteFindTable() does not.
+*/
+Table *sqliteLocateTable(Parse *pParse, const char *zName, const char *zDbase){
+ Table *p;
+
+ p = sqliteFindTable(pParse->db, zName, zDbase);
+ if( p==0 ){
+ if( zDbase ){
+ sqliteErrorMsg(pParse, "no such table: %s.%s", zDbase, zName);
+ }else if( sqliteFindTable(pParse->db, zName, 0)!=0 ){
+ sqliteErrorMsg(pParse, "table \"%s\" is not in database \"%s\"",
+ zName, zDbase);
+ }else{
+ sqliteErrorMsg(pParse, "no such table: %s", zName);
+ }
+ }
+ return p;
+}
+
+/*
+** Locate the in-memory structure that describes
+** a particular index given the name of that index
+** and the name of the database that contains the index.
+** Return NULL if not found.
+**
+** If zDatabase is 0, all databases are searched for the
+** table and the first matching index is returned. (No checking
+** for duplicate index names is done.) The search order is
+** TEMP first, then MAIN, then any auxiliary databases added
+** using the ATTACH command.
+*/
+Index *sqliteFindIndex(sqlite *db, const char *zName, const char *zDb){
+ Index *p = 0;
+ int i;
+ for(i=0; i<db->nDb; i++){
+ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
+ if( zDb && sqliteStrICmp(zDb, db->aDb[j].zName) ) continue;
+ p = sqliteHashFind(&db->aDb[j].idxHash, zName, strlen(zName)+1);
+ if( p ) break;
+ }
+ return p;
+}
+
+/*
+** Remove the given index from the index hash table, and free
+** its memory structures.
+**
+** The index is removed from the database hash tables but
+** it is not unlinked from the Table that it indexes.
+** Unlinking from the Table must be done by the calling function.
+*/
+static void sqliteDeleteIndex(sqlite *db, Index *p){
+ Index *pOld;
+
+ assert( db!=0 && p->zName!=0 );
+ pOld = sqliteHashInsert(&db->aDb[p->iDb].idxHash, p->zName,
+ strlen(p->zName)+1, 0);
+ if( pOld!=0 && pOld!=p ){
+ sqliteHashInsert(&db->aDb[p->iDb].idxHash, pOld->zName,
+ strlen(pOld->zName)+1, pOld);
+ }
+ sqliteFree(p);
+}
+
+/*
+** Unlink the given index from its table, then remove
+** the index from the index hash table and free its memory
+** structures.
+*/
+void sqliteUnlinkAndDeleteIndex(sqlite *db, Index *pIndex){
+ if( pIndex->pTable->pIndex==pIndex ){
+ pIndex->pTable->pIndex = pIndex->pNext;
+ }else{
+ Index *p;
+ for(p=pIndex->pTable->pIndex; p && p->pNext!=pIndex; p=p->pNext){}
+ if( p && p->pNext==pIndex ){
+ p->pNext = pIndex->pNext;
+ }
+ }
+ sqliteDeleteIndex(db, pIndex);
+}
+
+/*
+** Erase all schema information from the in-memory hash tables of
+** database connection. This routine is called to reclaim memory
+** before the connection closes. It is also called during a rollback
+** if there were schema changes during the transaction.
+**
+** If iDb<=0 then reset the internal schema tables for all database
+** files. If iDb>=2 then reset the internal schema for only the
+** single file indicated.
+*/
+void sqliteResetInternalSchema(sqlite *db, int iDb){
+ HashElem *pElem;
+ Hash temp1;
+ Hash temp2;
+ int i, j;
+
+ assert( iDb>=0 && iDb<db->nDb );
+ db->flags &= ~SQLITE_Initialized;
+ for(i=iDb; i<db->nDb; i++){
+ Db *pDb = &db->aDb[i];
+ temp1 = pDb->tblHash;
+ temp2 = pDb->trigHash;
+ sqliteHashInit(&pDb->trigHash, SQLITE_HASH_STRING, 0);
+ sqliteHashClear(&pDb->aFKey);
+ sqliteHashClear(&pDb->idxHash);
+ for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){
+ Trigger *pTrigger = sqliteHashData(pElem);
+ sqliteDeleteTrigger(pTrigger);
+ }
+ sqliteHashClear(&temp2);
+ sqliteHashInit(&pDb->tblHash, SQLITE_HASH_STRING, 0);
+ for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){
+ Table *pTab = sqliteHashData(pElem);
+ sqliteDeleteTable(db, pTab);
+ }
+ sqliteHashClear(&temp1);
+ DbClearProperty(db, i, DB_SchemaLoaded);
+ if( iDb>0 ) return;
+ }
+ assert( iDb==0 );
+ db->flags &= ~SQLITE_InternChanges;
+
+ /* If one or more of the auxiliary database files has been closed,
+ ** then remove then from the auxiliary database list. We take the
+ ** opportunity to do this here since we have just deleted all of the
+ ** schema hash tables and therefore do not have to make any changes
+ ** to any of those tables.
+ */
+ for(i=0; i<db->nDb; i++){
+ struct Db *pDb = &db->aDb[i];
+ if( pDb->pBt==0 ){
+ if( pDb->pAux && pDb->xFreeAux ) pDb->xFreeAux(pDb->pAux);
+ pDb->pAux = 0;
+ }
+ }
+ for(i=j=2; i<db->nDb; i++){
+ struct Db *pDb = &db->aDb[i];
+ if( pDb->pBt==0 ){
+ sqliteFree(pDb->zName);
+ pDb->zName = 0;
+ continue;
+ }
+ if( j<i ){
+ db->aDb[j] = db->aDb[i];
+ }
+ j++;
+ }
+ memset(&db->aDb[j], 0, (db->nDb-j)*sizeof(db->aDb[j]));
+ db->nDb = j;
+ if( db->nDb<=2 && db->aDb!=db->aDbStatic ){
+ memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0]));
+ sqliteFree(db->aDb);
+ db->aDb = db->aDbStatic;
+ }
+}
+
+/*
+** This routine is called whenever a rollback occurs. If there were
+** schema changes during the transaction, then we have to reset the
+** internal hash tables and reload them from disk.
+*/
+void sqliteRollbackInternalChanges(sqlite *db){
+ if( db->flags & SQLITE_InternChanges ){
+ sqliteResetInternalSchema(db, 0);
+ }
+}
+
+/*
+** This routine is called when a commit occurs.
+*/
+void sqliteCommitInternalChanges(sqlite *db){
+ db->aDb[0].schema_cookie = db->next_cookie;
+ db->flags &= ~SQLITE_InternChanges;
+}
+
+/*
+** Remove the memory data structures associated with the given
+** Table. No changes are made to disk by this routine.
+**
+** This routine just deletes the data structure. It does not unlink
+** the table data structure from the hash table. Nor does it remove
+** foreign keys from the sqlite.aFKey hash table. But it does destroy
+** memory structures of the indices and foreign keys associated with
+** the table.
+**
+** Indices associated with the table are unlinked from the "db"
+** data structure if db!=NULL. If db==NULL, indices attached to
+** the table are deleted, but it is assumed they have already been
+** unlinked.
+*/
+void sqliteDeleteTable(sqlite *db, Table *pTable){
+ int i;
+ Index *pIndex, *pNext;
+ FKey *pFKey, *pNextFKey;
+
+ if( pTable==0 ) return;
+
+ /* Delete all indices associated with this table
+ */
+ for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){
+ pNext = pIndex->pNext;
+ assert( pIndex->iDb==pTable->iDb || (pTable->iDb==0 && pIndex->iDb==1) );
+ sqliteDeleteIndex(db, pIndex);
+ }
+
+ /* Delete all foreign keys associated with this table. The keys
+ ** should have already been unlinked from the db->aFKey hash table
+ */
+ for(pFKey=pTable->pFKey; pFKey; pFKey=pNextFKey){
+ pNextFKey = pFKey->pNextFrom;
+ assert( pTable->iDb<db->nDb );
+ assert( sqliteHashFind(&db->aDb[pTable->iDb].aFKey,
+ pFKey->zTo, strlen(pFKey->zTo)+1)!=pFKey );
+ sqliteFree(pFKey);
+ }
+
+ /* Delete the Table structure itself.
+ */
+ for(i=0; i<pTable->nCol; i++){
+ sqliteFree(pTable->aCol[i].zName);
+ sqliteFree(pTable->aCol[i].zDflt);
+ sqliteFree(pTable->aCol[i].zType);
+ }
+ sqliteFree(pTable->zName);
+ sqliteFree(pTable->aCol);
+ sqliteSelectDelete(pTable->pSelect);
+ sqliteFree(pTable);
+}
+
+/*
+** Unlink the given table from the hash tables and the delete the
+** table structure with all its indices and foreign keys.
+*/
+static void sqliteUnlinkAndDeleteTable(sqlite *db, Table *p){
+ Table *pOld;
+ FKey *pF1, *pF2;
+ int i = p->iDb;
+ assert( db!=0 );
+ pOld = sqliteHashInsert(&db->aDb[i].tblHash, p->zName, strlen(p->zName)+1, 0);
+ assert( pOld==0 || pOld==p );
+ for(pF1=p->pFKey; pF1; pF1=pF1->pNextFrom){
+ int nTo = strlen(pF1->zTo) + 1;
+ pF2 = sqliteHashFind(&db->aDb[i].aFKey, pF1->zTo, nTo);
+ if( pF2==pF1 ){
+ sqliteHashInsert(&db->aDb[i].aFKey, pF1->zTo, nTo, pF1->pNextTo);
+ }else{
+ while( pF2 && pF2->pNextTo!=pF1 ){ pF2=pF2->pNextTo; }
+ if( pF2 ){
+ pF2->pNextTo = pF1->pNextTo;
+ }
+ }
+ }
+ sqliteDeleteTable(db, p);
+}
+
+/*
+** Construct the name of a user table or index from a token.
+**
+** Space to hold the name is obtained from sqliteMalloc() and must
+** be freed by the calling function.
+*/
+char *sqliteTableNameFromToken(Token *pName){
+ char *zName = sqliteStrNDup(pName->z, pName->n);
+ sqliteDequote(zName);
+ return zName;
+}
+
+/*
+** Generate code to open the appropriate master table. The table
+** opened will be SQLITE_MASTER for persistent tables and
+** SQLITE_TEMP_MASTER for temporary tables. The table is opened
+** on cursor 0.
+*/
+void sqliteOpenMasterTable(Vdbe *v, int isTemp){
+ sqliteVdbeAddOp(v, OP_Integer, isTemp, 0);
+ sqliteVdbeAddOp(v, OP_OpenWrite, 0, 2);
+}
+
+/*
+** Begin constructing a new table representation in memory. This is
+** the first of several action routines that get called in response
+** to a CREATE TABLE statement. In particular, this routine is called
+** after seeing tokens "CREATE" and "TABLE" and the table name. The
+** pStart token is the CREATE and pName is the table name. The isTemp
+** flag is true if the table should be stored in the auxiliary database
+** file instead of in the main database file. This is normally the case
+** when the "TEMP" or "TEMPORARY" keyword occurs in between
+** CREATE and TABLE.
+**
+** The new table record is initialized and put in pParse->pNewTable.
+** As more of the CREATE TABLE statement is parsed, additional action
+** routines will be called to add more information to this record.
+** At the end of the CREATE TABLE statement, the sqliteEndTable() routine
+** is called to complete the construction of the new table record.
+*/
+void sqliteStartTable(
+ Parse *pParse, /* Parser context */
+ Token *pStart, /* The "CREATE" token */
+ Token *pName, /* Name of table or view to create */
+ int isTemp, /* True if this is a TEMP table */
+ int isView /* True if this is a VIEW */
+){
+ Table *pTable;
+ Index *pIdx;
+ char *zName;
+ sqlite *db = pParse->db;
+ Vdbe *v;
+ int iDb;
+
+ pParse->sFirstToken = *pStart;
+ zName = sqliteTableNameFromToken(pName);
+ if( zName==0 ) return;
+ if( db->init.iDb==1 ) isTemp = 1;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ assert( (isTemp & 1)==isTemp );
+ {
+ int code;
+ char *zDb = isTemp ? "temp" : "main";
+ if( sqliteAuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){
+ sqliteFree(zName);
+ return;
+ }
+ if( isView ){
+ if( isTemp ){
+ code = SQLITE_CREATE_TEMP_VIEW;
+ }else{
+ code = SQLITE_CREATE_VIEW;
+ }
+ }else{
+ if( isTemp ){
+ code = SQLITE_CREATE_TEMP_TABLE;
+ }else{
+ code = SQLITE_CREATE_TABLE;
+ }
+ }
+ if( sqliteAuthCheck(pParse, code, zName, 0, zDb) ){
+ sqliteFree(zName);
+ return;
+ }
+ }
+#endif
+
+
+ /* Before trying to create a temporary table, make sure the Btree for
+ ** holding temporary tables is open.
+ */
+ if( isTemp && db->aDb[1].pBt==0 && !pParse->explain ){
+ int rc = sqliteBtreeFactory(db, 0, 0, MAX_PAGES, &db->aDb[1].pBt);
+ if( rc!=SQLITE_OK ){
+ sqliteErrorMsg(pParse, "unable to open a temporary database "
+ "file for storing temporary tables");
+ pParse->nErr++;
+ return;
+ }
+ if( db->flags & SQLITE_InTrans ){
+ rc = sqliteBtreeBeginTrans(db->aDb[1].pBt);
+ if( rc!=SQLITE_OK ){
+ sqliteErrorMsg(pParse, "unable to get a write lock on "
+ "the temporary database file");
+ return;
+ }
+ }
+ }
+
+ /* Make sure the new table name does not collide with an existing
+ ** index or table name. Issue an error message if it does.
+ **
+ ** If we are re-reading the sqlite_master table because of a schema
+ ** change and a new permanent table is found whose name collides with
+ ** an existing temporary table, that is not an error.
+ */
+ pTable = sqliteFindTable(db, zName, 0);
+ iDb = isTemp ? 1 : db->init.iDb;
+ if( pTable!=0 && (pTable->iDb==iDb || !db->init.busy) ){
+ sqliteErrorMsg(pParse, "table %T already exists", pName);
+ sqliteFree(zName);
+ return;
+ }
+ if( (pIdx = sqliteFindIndex(db, zName, 0))!=0 &&
+ (pIdx->iDb==0 || !db->init.busy) ){
+ sqliteErrorMsg(pParse, "there is already an index named %s", zName);
+ sqliteFree(zName);
+ return;
+ }
+ pTable = sqliteMalloc( sizeof(Table) );
+ if( pTable==0 ){
+ sqliteFree(zName);
+ return;
+ }
+ pTable->zName = zName;
+ pTable->nCol = 0;
+ pTable->aCol = 0;
+ pTable->iPKey = -1;
+ pTable->pIndex = 0;
+ pTable->iDb = iDb;
+ if( pParse->pNewTable ) sqliteDeleteTable(db, pParse->pNewTable);
+ pParse->pNewTable = pTable;
+
+ /* Begin generating the code that will insert the table record into
+ ** the SQLITE_MASTER table. Note in particular that we must go ahead
+ ** and allocate the record number for the table entry now. Before any
+ ** PRIMARY KEY or UNIQUE keywords are parsed. Those keywords will cause
+ ** indices to be created and the table record must come before the
+ ** indices. Hence, the record number for the table must be allocated
+ ** now.
+ */
+ if( !db->init.busy && (v = sqliteGetVdbe(pParse))!=0 ){
+ sqliteBeginWriteOperation(pParse, 0, isTemp);
+ if( !isTemp ){
+ sqliteVdbeAddOp(v, OP_Integer, db->file_format, 0);
+ sqliteVdbeAddOp(v, OP_SetCookie, 0, 1);
+ }
+ sqliteOpenMasterTable(v, isTemp);
+ sqliteVdbeAddOp(v, OP_NewRecno, 0, 0);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, 0, 0);
+ }
+}
+
+/*
+** Add a new column to the table currently being constructed.
+**
+** The parser calls this routine once for each column declaration
+** in a CREATE TABLE statement. sqliteStartTable() gets called
+** first to get things going. Then this routine is called for each
+** column.
+*/
+void sqliteAddColumn(Parse *pParse, Token *pName){
+ Table *p;
+ int i;
+ char *z = 0;
+ Column *pCol;
+ if( (p = pParse->pNewTable)==0 ) return;
+ sqliteSetNString(&z, pName->z, pName->n, 0);
+ if( z==0 ) return;
+ sqliteDequote(z);
+ for(i=0; i<p->nCol; i++){
+ if( sqliteStrICmp(z, p->aCol[i].zName)==0 ){
+ sqliteErrorMsg(pParse, "duplicate column name: %s", z);
+ sqliteFree(z);
+ return;
+ }
+ }
+ if( (p->nCol & 0x7)==0 ){
+ Column *aNew;
+ aNew = sqliteRealloc( p->aCol, (p->nCol+8)*sizeof(p->aCol[0]));
+ if( aNew==0 ) return;
+ p->aCol = aNew;
+ }
+ pCol = &p->aCol[p->nCol];
+ memset(pCol, 0, sizeof(p->aCol[0]));
+ pCol->zName = z;
+ pCol->sortOrder = SQLITE_SO_NUM;
+ p->nCol++;
+}
+
+/*
+** This routine is called by the parser while in the middle of
+** parsing a CREATE TABLE statement. A "NOT NULL" constraint has
+** been seen on a column. This routine sets the notNull flag on
+** the column currently under construction.
+*/
+void sqliteAddNotNull(Parse *pParse, int onError){
+ Table *p;
+ int i;
+ if( (p = pParse->pNewTable)==0 ) return;
+ i = p->nCol-1;
+ if( i>=0 ) p->aCol[i].notNull = onError;
+}
+
+/*
+** This routine is called by the parser while in the middle of
+** parsing a CREATE TABLE statement. The pFirst token is the first
+** token in the sequence of tokens that describe the type of the
+** column currently under construction. pLast is the last token
+** in the sequence. Use this information to construct a string
+** that contains the typename of the column and store that string
+** in zType.
+*/
+void sqliteAddColumnType(Parse *pParse, Token *pFirst, Token *pLast){
+ Table *p;
+ int i, j;
+ int n;
+ char *z, **pz;
+ Column *pCol;
+ if( (p = pParse->pNewTable)==0 ) return;
+ i = p->nCol-1;
+ if( i<0 ) return;
+ pCol = &p->aCol[i];
+ pz = &pCol->zType;
+ n = pLast->n + Addr(pLast->z) - Addr(pFirst->z);
+ sqliteSetNString(pz, pFirst->z, n, 0);
+ z = *pz;
+ if( z==0 ) return;
+ for(i=j=0; z[i]; i++){
+ int c = z[i];
+ if( isspace(c) ) continue;
+ z[j++] = c;
+ }
+ z[j] = 0;
+ if( pParse->db->file_format>=4 ){
+ pCol->sortOrder = sqliteCollateType(z, n);
+ }else{
+ pCol->sortOrder = SQLITE_SO_NUM;
+ }
+}
+
+/*
+** The given token is the default value for the last column added to
+** the table currently under construction. If "minusFlag" is true, it
+** means the value token was preceded by a minus sign.
+**
+** This routine is called by the parser while in the middle of
+** parsing a CREATE TABLE statement.
+*/
+void sqliteAddDefaultValue(Parse *pParse, Token *pVal, int minusFlag){
+ Table *p;
+ int i;
+ char **pz;
+ if( (p = pParse->pNewTable)==0 ) return;
+ i = p->nCol-1;
+ if( i<0 ) return;
+ pz = &p->aCol[i].zDflt;
+ if( minusFlag ){
+ sqliteSetNString(pz, "-", 1, pVal->z, pVal->n, 0);
+ }else{
+ sqliteSetNString(pz, pVal->z, pVal->n, 0);
+ }
+ sqliteDequote(*pz);
+}
+
+/*
+** Designate the PRIMARY KEY for the table. pList is a list of names
+** of columns that form the primary key. If pList is NULL, then the
+** most recently added column of the table is the primary key.
+**
+** A table can have at most one primary key. If the table already has
+** a primary key (and this is the second primary key) then create an
+** error.
+**
+** If the PRIMARY KEY is on a single column whose datatype is INTEGER,
+** then we will try to use that column as the row id. (Exception:
+** For backwards compatibility with older databases, do not do this
+** if the file format version number is less than 1.) Set the Table.iPKey
+** field of the table under construction to be the index of the
+** INTEGER PRIMARY KEY column. Table.iPKey is set to -1 if there is
+** no INTEGER PRIMARY KEY.
+**
+** If the key is not an INTEGER PRIMARY KEY, then create a unique
+** index for the key. No index is created for INTEGER PRIMARY KEYs.
+*/
+void sqliteAddPrimaryKey(Parse *pParse, IdList *pList, int onError){
+ Table *pTab = pParse->pNewTable;
+ char *zType = 0;
+ int iCol = -1, i;
+ if( pTab==0 ) goto primary_key_exit;
+ if( pTab->hasPrimKey ){
+ sqliteErrorMsg(pParse,
+ "table \"%s\" has more than one primary key", pTab->zName);
+ goto primary_key_exit;
+ }
+ pTab->hasPrimKey = 1;
+ if( pList==0 ){
+ iCol = pTab->nCol - 1;
+ pTab->aCol[iCol].isPrimKey = 1;
+ }else{
+ for(i=0; i<pList->nId; i++){
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( sqliteStrICmp(pList->a[i].zName, pTab->aCol[iCol].zName)==0 ) break;
+ }
+ if( iCol<pTab->nCol ) pTab->aCol[iCol].isPrimKey = 1;
+ }
+ if( pList->nId>1 ) iCol = -1;
+ }
+ if( iCol>=0 && iCol<pTab->nCol ){
+ zType = pTab->aCol[iCol].zType;
+ }
+ if( pParse->db->file_format>=1 &&
+ zType && sqliteStrICmp(zType, "INTEGER")==0 ){
+ pTab->iPKey = iCol;
+ pTab->keyConf = onError;
+ }else{
+ sqliteCreateIndex(pParse, 0, 0, pList, onError, 0, 0);
+ pList = 0;
+ }
+
+primary_key_exit:
+ sqliteIdListDelete(pList);
+ return;
+}
+
+/*
+** Return the appropriate collating type given a type name.
+**
+** The collation type is text (SQLITE_SO_TEXT) if the type
+** name contains the character stream "text" or "blob" or
+** "clob". Any other type name is collated as numeric
+** (SQLITE_SO_NUM).
+*/
+int sqliteCollateType(const char *zType, int nType){
+ int i;
+ for(i=0; i<nType-3; i++){
+ int c = *(zType++) | 0x60;
+ if( (c=='b' || c=='c') && sqliteStrNICmp(zType, "lob", 3)==0 ){
+ return SQLITE_SO_TEXT;
+ }
+ if( c=='c' && sqliteStrNICmp(zType, "har", 3)==0 ){
+ return SQLITE_SO_TEXT;
+ }
+ if( c=='t' && sqliteStrNICmp(zType, "ext", 3)==0 ){
+ return SQLITE_SO_TEXT;
+ }
+ }
+ return SQLITE_SO_NUM;
+}
+
+/*
+** This routine is called by the parser while in the middle of
+** parsing a CREATE TABLE statement. A "COLLATE" clause has
+** been seen on a column. This routine sets the Column.sortOrder on
+** the column currently under construction.
+*/
+void sqliteAddCollateType(Parse *pParse, int collType){
+ Table *p;
+ int i;
+ if( (p = pParse->pNewTable)==0 ) return;
+ i = p->nCol-1;
+ if( i>=0 ) p->aCol[i].sortOrder = collType;
+}
+
+/*
+** Come up with a new random value for the schema cookie. Make sure
+** the new value is different from the old.
+**
+** The schema cookie is used to determine when the schema for the
+** database changes. After each schema change, the cookie value
+** changes. When a process first reads the schema it records the
+** cookie. Thereafter, whenever it goes to access the database,
+** it checks the cookie to make sure the schema has not changed
+** since it was last read.
+**
+** This plan is not completely bullet-proof. It is possible for
+** the schema to change multiple times and for the cookie to be
+** set back to prior value. But schema changes are infrequent
+** and the probability of hitting the same cookie value is only
+** 1 chance in 2^32. So we're safe enough.
+*/
+void sqliteChangeCookie(sqlite *db, Vdbe *v){
+ if( db->next_cookie==db->aDb[0].schema_cookie ){
+ unsigned char r;
+ sqliteRandomness(1, &r);
+ db->next_cookie = db->aDb[0].schema_cookie + r + 1;
+ db->flags |= SQLITE_InternChanges;
+ sqliteVdbeAddOp(v, OP_Integer, db->next_cookie, 0);
+ sqliteVdbeAddOp(v, OP_SetCookie, 0, 0);
+ }
+}
+
+/*
+** Measure the number of characters needed to output the given
+** identifier. The number returned includes any quotes used
+** but does not include the null terminator.
+*/
+static int identLength(const char *z){
+ int n;
+ int needQuote = 0;
+ for(n=0; *z; n++, z++){
+ if( *z=='\'' ){ n++; needQuote=1; }
+ }
+ return n + needQuote*2;
+}
+
+/*
+** Write an identifier onto the end of the given string. Add
+** quote characters as needed.
+*/
+static void identPut(char *z, int *pIdx, char *zIdent){
+ int i, j, needQuote;
+ i = *pIdx;
+ for(j=0; zIdent[j]; j++){
+ if( !isalnum(zIdent[j]) && zIdent[j]!='_' ) break;
+ }
+ needQuote = zIdent[j]!=0 || isdigit(zIdent[0])
+ || sqliteKeywordCode(zIdent, j)!=TK_ID;
+ if( needQuote ) z[i++] = '\'';
+ for(j=0; zIdent[j]; j++){
+ z[i++] = zIdent[j];
+ if( zIdent[j]=='\'' ) z[i++] = '\'';
+ }
+ if( needQuote ) z[i++] = '\'';
+ z[i] = 0;
+ *pIdx = i;
+}
+
+/*
+** Generate a CREATE TABLE statement appropriate for the given
+** table. Memory to hold the text of the statement is obtained
+** from sqliteMalloc() and must be freed by the calling function.
+*/
+static char *createTableStmt(Table *p){
+ int i, k, n;
+ char *zStmt;
+ char *zSep, *zSep2, *zEnd;
+ n = 0;
+ for(i=0; i<p->nCol; i++){
+ n += identLength(p->aCol[i].zName);
+ }
+ n += identLength(p->zName);
+ if( n<40 ){
+ zSep = "";
+ zSep2 = ",";
+ zEnd = ")";
+ }else{
+ zSep = "\n ";
+ zSep2 = ",\n ";
+ zEnd = "\n)";
+ }
+ n += 35 + 6*p->nCol;
+ zStmt = sqliteMallocRaw( n );
+ if( zStmt==0 ) return 0;
+ strcpy(zStmt, p->iDb==1 ? "CREATE TEMP TABLE " : "CREATE TABLE ");
+ k = strlen(zStmt);
+ identPut(zStmt, &k, p->zName);
+ zStmt[k++] = '(';
+ for(i=0; i<p->nCol; i++){
+ strcpy(&zStmt[k], zSep);
+ k += strlen(&zStmt[k]);
+ zSep = zSep2;
+ identPut(zStmt, &k, p->aCol[i].zName);
+ }
+ strcpy(&zStmt[k], zEnd);
+ return zStmt;
+}
+
+/*
+** This routine is called to report the final ")" that terminates
+** a CREATE TABLE statement.
+**
+** The table structure that other action routines have been building
+** is added to the internal hash tables, assuming no errors have
+** occurred.
+**
+** An entry for the table is made in the master table on disk, unless
+** this is a temporary table or db->init.busy==1. When db->init.busy==1
+** it means we are reading the sqlite_master table because we just
+** connected to the database or because the sqlite_master table has
+** recently changes, so the entry for this table already exists in
+** the sqlite_master table. We do not want to create it again.
+**
+** If the pSelect argument is not NULL, it means that this routine
+** was called to create a table generated from a
+** "CREATE TABLE ... AS SELECT ..." statement. The column names of
+** the new table will match the result set of the SELECT.
+*/
+void sqliteEndTable(Parse *pParse, Token *pEnd, Select *pSelect){
+ Table *p;
+ sqlite *db = pParse->db;
+
+ if( (pEnd==0 && pSelect==0) || pParse->nErr || sqlite_malloc_failed ) return;
+ p = pParse->pNewTable;
+ if( p==0 ) return;
+
+ /* If the table is generated from a SELECT, then construct the
+ ** list of columns and the text of the table.
+ */
+ if( pSelect ){
+ Table *pSelTab = sqliteResultSetOfSelect(pParse, 0, pSelect);
+ if( pSelTab==0 ) return;
+ assert( p->aCol==0 );
+ p->nCol = pSelTab->nCol;
+ p->aCol = pSelTab->aCol;
+ pSelTab->nCol = 0;
+ pSelTab->aCol = 0;
+ sqliteDeleteTable(0, pSelTab);
+ }
+
+ /* If the db->init.busy is 1 it means we are reading the SQL off the
+ ** "sqlite_master" or "sqlite_temp_master" table on the disk.
+ ** So do not write to the disk again. Extract the root page number
+ ** for the table from the db->init.newTnum field. (The page number
+ ** should have been put there by the sqliteOpenCb routine.)
+ */
+ if( db->init.busy ){
+ p->tnum = db->init.newTnum;
+ }
+
+ /* If not initializing, then create a record for the new table
+ ** in the SQLITE_MASTER table of the database. The record number
+ ** for the new table entry should already be on the stack.
+ **
+ ** If this is a TEMPORARY table, write the entry into the auxiliary
+ ** file instead of into the main database file.
+ */
+ if( !db->init.busy ){
+ int n;
+ Vdbe *v;
+
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) return;
+ if( p->pSelect==0 ){
+ /* A regular table */
+ sqliteVdbeOp3(v, OP_CreateTable, 0, p->iDb, (char*)&p->tnum, P3_POINTER);
+ }else{
+ /* A view */
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ }
+ p->tnum = 0;
+ sqliteVdbeAddOp(v, OP_Pull, 1, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, p->pSelect==0?"table":"view", P3_STATIC);
+ sqliteVdbeOp3(v, OP_String, 0, 0, p->zName, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, p->zName, 0);
+ sqliteVdbeAddOp(v, OP_Dup, 4, 0);
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ if( pSelect ){
+ char *z = createTableStmt(p);
+ n = z ? strlen(z) : 0;
+ sqliteVdbeChangeP3(v, -1, z, n);
+ sqliteFree(z);
+ }else{
+ assert( pEnd!=0 );
+ n = Addr(pEnd->z) - Addr(pParse->sFirstToken.z) + 1;
+ sqliteVdbeChangeP3(v, -1, pParse->sFirstToken.z, n);
+ }
+ sqliteVdbeAddOp(v, OP_MakeRecord, 5, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, 0, 0);
+ if( !p->iDb ){
+ sqliteChangeCookie(db, v);
+ }
+ sqliteVdbeAddOp(v, OP_Close, 0, 0);
+ if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Integer, p->iDb, 0);
+ sqliteVdbeAddOp(v, OP_OpenWrite, 1, 0);
+ pParse->nTab = 2;
+ sqliteSelect(pParse, pSelect, SRT_Table, 1, 0, 0, 0);
+ }
+ sqliteEndWriteOperation(pParse);
+ }
+
+ /* Add the table to the in-memory representation of the database.
+ */
+ if( pParse->explain==0 && pParse->nErr==0 ){
+ Table *pOld;
+ FKey *pFKey;
+ pOld = sqliteHashInsert(&db->aDb[p->iDb].tblHash,
+ p->zName, strlen(p->zName)+1, p);
+ if( pOld ){
+ assert( p==pOld ); /* Malloc must have failed inside HashInsert() */
+ return;
+ }
+ for(pFKey=p->pFKey; pFKey; pFKey=pFKey->pNextFrom){
+ int nTo = strlen(pFKey->zTo) + 1;
+ pFKey->pNextTo = sqliteHashFind(&db->aDb[p->iDb].aFKey, pFKey->zTo, nTo);
+ sqliteHashInsert(&db->aDb[p->iDb].aFKey, pFKey->zTo, nTo, pFKey);
+ }
+ pParse->pNewTable = 0;
+ db->nTable++;
+ db->flags |= SQLITE_InternChanges;
+ }
+}
+
+/*
+** The parser calls this routine in order to create a new VIEW
+*/
+void sqliteCreateView(
+ Parse *pParse, /* The parsing context */
+ Token *pBegin, /* The CREATE token that begins the statement */
+ Token *pName, /* The token that holds the name of the view */
+ Select *pSelect, /* A SELECT statement that will become the new view */
+ int isTemp /* TRUE for a TEMPORARY view */
+){
+ Table *p;
+ int n;
+ const char *z;
+ Token sEnd;
+ DbFixer sFix;
+
+ sqliteStartTable(pParse, pBegin, pName, isTemp, 1);
+ p = pParse->pNewTable;
+ if( p==0 || pParse->nErr ){
+ sqliteSelectDelete(pSelect);
+ return;
+ }
+ if( sqliteFixInit(&sFix, pParse, p->iDb, "view", pName)
+ && sqliteFixSelect(&sFix, pSelect)
+ ){
+ sqliteSelectDelete(pSelect);
+ return;
+ }
+
+ /* Make a copy of the entire SELECT statement that defines the view.
+ ** This will force all the Expr.token.z values to be dynamically
+ ** allocated rather than point to the input string - which means that
+ ** they will persist after the current sqlite_exec() call returns.
+ */
+ p->pSelect = sqliteSelectDup(pSelect);
+ sqliteSelectDelete(pSelect);
+ if( !pParse->db->init.busy ){
+ sqliteViewGetColumnNames(pParse, p);
+ }
+
+ /* Locate the end of the CREATE VIEW statement. Make sEnd point to
+ ** the end.
+ */
+ sEnd = pParse->sLastToken;
+ if( sEnd.z[0]!=0 && sEnd.z[0]!=';' ){
+ sEnd.z += sEnd.n;
+ }
+ sEnd.n = 0;
+ n = sEnd.z - pBegin->z;
+ z = pBegin->z;
+ while( n>0 && (z[n-1]==';' || isspace(z[n-1])) ){ n--; }
+ sEnd.z = &z[n-1];
+ sEnd.n = 1;
+
+ /* Use sqliteEndTable() to add the view to the SQLITE_MASTER table */
+ sqliteEndTable(pParse, &sEnd, 0);
+ return;
+}
+
+/*
+** The Table structure pTable is really a VIEW. Fill in the names of
+** the columns of the view in the pTable structure. Return the number
+** of errors. If an error is seen leave an error message in pParse->zErrMsg.
+*/
+int sqliteViewGetColumnNames(Parse *pParse, Table *pTable){
+ ExprList *pEList;
+ Select *pSel;
+ Table *pSelTab;
+ int nErr = 0;
+
+ assert( pTable );
+
+ /* A positive nCol means the columns names for this view are
+ ** already known.
+ */
+ if( pTable->nCol>0 ) return 0;
+
+ /* A negative nCol is a special marker meaning that we are currently
+ ** trying to compute the column names. If we enter this routine with
+ ** a negative nCol, it means two or more views form a loop, like this:
+ **
+ ** CREATE VIEW one AS SELECT * FROM two;
+ ** CREATE VIEW two AS SELECT * FROM one;
+ **
+ ** Actually, this error is caught previously and so the following test
+ ** should always fail. But we will leave it in place just to be safe.
+ */
+ if( pTable->nCol<0 ){
+ sqliteErrorMsg(pParse, "view %s is circularly defined", pTable->zName);
+ return 1;
+ }
+
+ /* If we get this far, it means we need to compute the table names.
+ */
+ assert( pTable->pSelect ); /* If nCol==0, then pTable must be a VIEW */
+ pSel = pTable->pSelect;
+
+ /* Note that the call to sqliteResultSetOfSelect() will expand any
+ ** "*" elements in this list. But we will need to restore the list
+ ** back to its original configuration afterwards, so we save a copy of
+ ** the original in pEList.
+ */
+ pEList = pSel->pEList;
+ pSel->pEList = sqliteExprListDup(pEList);
+ if( pSel->pEList==0 ){
+ pSel->pEList = pEList;
+ return 1; /* Malloc failed */
+ }
+ pTable->nCol = -1;
+ pSelTab = sqliteResultSetOfSelect(pParse, 0, pSel);
+ if( pSelTab ){
+ assert( pTable->aCol==0 );
+ pTable->nCol = pSelTab->nCol;
+ pTable->aCol = pSelTab->aCol;
+ pSelTab->nCol = 0;
+ pSelTab->aCol = 0;
+ sqliteDeleteTable(0, pSelTab);
+ DbSetProperty(pParse->db, pTable->iDb, DB_UnresetViews);
+ }else{
+ pTable->nCol = 0;
+ nErr++;
+ }
+ sqliteSelectUnbind(pSel);
+ sqliteExprListDelete(pSel->pEList);
+ pSel->pEList = pEList;
+ return nErr;
+}
+
+/*
+** Clear the column names from the VIEW pTable.
+**
+** This routine is called whenever any other table or view is modified.
+** The view passed into this routine might depend directly or indirectly
+** on the modified or deleted table so we need to clear the old column
+** names so that they will be recomputed.
+*/
+static void sqliteViewResetColumnNames(Table *pTable){
+ int i;
+ Column *pCol;
+ assert( pTable!=0 && pTable->pSelect!=0 );
+ for(i=0, pCol=pTable->aCol; i<pTable->nCol; i++, pCol++){
+ sqliteFree(pCol->zName);
+ sqliteFree(pCol->zDflt);
+ sqliteFree(pCol->zType);
+ }
+ sqliteFree(pTable->aCol);
+ pTable->aCol = 0;
+ pTable->nCol = 0;
+}
+
+/*
+** Clear the column names from every VIEW in database idx.
+*/
+static void sqliteViewResetAll(sqlite *db, int idx){
+ HashElem *i;
+ if( !DbHasProperty(db, idx, DB_UnresetViews) ) return;
+ for(i=sqliteHashFirst(&db->aDb[idx].tblHash); i; i=sqliteHashNext(i)){
+ Table *pTab = sqliteHashData(i);
+ if( pTab->pSelect ){
+ sqliteViewResetColumnNames(pTab);
+ }
+ }
+ DbClearProperty(db, idx, DB_UnresetViews);
+}
+
+/*
+** Given a token, look up a table with that name. If not found, leave
+** an error for the parser to find and return NULL.
+*/
+Table *sqliteTableFromToken(Parse *pParse, Token *pTok){
+ char *zName;
+ Table *pTab;
+ zName = sqliteTableNameFromToken(pTok);
+ if( zName==0 ) return 0;
+ pTab = sqliteFindTable(pParse->db, zName, 0);
+ sqliteFree(zName);
+ if( pTab==0 ){
+ sqliteErrorMsg(pParse, "no such table: %T", pTok);
+ }
+ return pTab;
+}
+
+/*
+** This routine is called to do the work of a DROP TABLE statement.
+** pName is the name of the table to be dropped.
+*/
+void sqliteDropTable(Parse *pParse, Token *pName, int isView){
+ Table *pTable;
+ Vdbe *v;
+ int base;
+ sqlite *db = pParse->db;
+ int iDb;
+
+ if( pParse->nErr || sqlite_malloc_failed ) return;
+ pTable = sqliteTableFromToken(pParse, pName);
+ if( pTable==0 ) return;
+ iDb = pTable->iDb;
+ assert( iDb>=0 && iDb<db->nDb );
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int code;
+ const char *zTab = SCHEMA_TABLE(pTable->iDb);
+ const char *zDb = db->aDb[pTable->iDb].zName;
+ if( sqliteAuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){
+ return;
+ }
+ if( isView ){
+ if( iDb==1 ){
+ code = SQLITE_DROP_TEMP_VIEW;
+ }else{
+ code = SQLITE_DROP_VIEW;
+ }
+ }else{
+ if( iDb==1 ){
+ code = SQLITE_DROP_TEMP_TABLE;
+ }else{
+ code = SQLITE_DROP_TABLE;
+ }
+ }
+ if( sqliteAuthCheck(pParse, code, pTable->zName, 0, zDb) ){
+ return;
+ }
+ if( sqliteAuthCheck(pParse, SQLITE_DELETE, pTable->zName, 0, zDb) ){
+ return;
+ }
+ }
+#endif
+ if( pTable->readOnly ){
+ sqliteErrorMsg(pParse, "table %s may not be dropped", pTable->zName);
+ pParse->nErr++;
+ return;
+ }
+ if( isView && pTable->pSelect==0 ){
+ sqliteErrorMsg(pParse, "use DROP TABLE to delete table %s", pTable->zName);
+ return;
+ }
+ if( !isView && pTable->pSelect ){
+ sqliteErrorMsg(pParse, "use DROP VIEW to delete view %s", pTable->zName);
+ return;
+ }
+
+ /* Generate code to remove the table from the master table
+ ** on disk.
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v ){
+ static VdbeOpList dropTable[] = {
+ { OP_Rewind, 0, ADDR(8), 0},
+ { OP_String, 0, 0, 0}, /* 1 */
+ { OP_MemStore, 1, 1, 0},
+ { OP_MemLoad, 1, 0, 0}, /* 3 */
+ { OP_Column, 0, 2, 0},
+ { OP_Ne, 0, ADDR(7), 0},
+ { OP_Delete, 0, 0, 0},
+ { OP_Next, 0, ADDR(3), 0}, /* 7 */
+ };
+ Index *pIdx;
+ Trigger *pTrigger;
+ sqliteBeginWriteOperation(pParse, 0, pTable->iDb);
+
+ /* Drop all triggers associated with the table being dropped */
+ pTrigger = pTable->pTrigger;
+ while( pTrigger ){
+ assert( pTrigger->iDb==pTable->iDb || pTrigger->iDb==1 );
+ sqliteDropTriggerPtr(pParse, pTrigger, 1);
+ if( pParse->explain ){
+ pTrigger = pTrigger->pNext;
+ }else{
+ pTrigger = pTable->pTrigger;
+ }
+ }
+
+ /* Drop all SQLITE_MASTER entries that refer to the table */
+ sqliteOpenMasterTable(v, pTable->iDb);
+ base = sqliteVdbeAddOpList(v, ArraySize(dropTable), dropTable);
+ sqliteVdbeChangeP3(v, base+1, pTable->zName, 0);
+
+ /* Drop all SQLITE_TEMP_MASTER entries that refer to the table */
+ if( pTable->iDb!=1 ){
+ sqliteOpenMasterTable(v, 1);
+ base = sqliteVdbeAddOpList(v, ArraySize(dropTable), dropTable);
+ sqliteVdbeChangeP3(v, base+1, pTable->zName, 0);
+ }
+
+ if( pTable->iDb==0 ){
+ sqliteChangeCookie(db, v);
+ }
+ sqliteVdbeAddOp(v, OP_Close, 0, 0);
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Destroy, pTable->tnum, pTable->iDb);
+ for(pIdx=pTable->pIndex; pIdx; pIdx=pIdx->pNext){
+ sqliteVdbeAddOp(v, OP_Destroy, pIdx->tnum, pIdx->iDb);
+ }
+ }
+ sqliteEndWriteOperation(pParse);
+ }
+
+ /* Delete the in-memory description of the table.
+ **
+ ** Exception: if the SQL statement began with the EXPLAIN keyword,
+ ** then no changes should be made.
+ */
+ if( !pParse->explain ){
+ sqliteUnlinkAndDeleteTable(db, pTable);
+ db->flags |= SQLITE_InternChanges;
+ }
+ sqliteViewResetAll(db, iDb);
+}
+
+/*
+** This routine constructs a P3 string suitable for an OP_MakeIdxKey
+** opcode and adds that P3 string to the most recently inserted instruction
+** in the virtual machine. The P3 string consists of a single character
+** for each column in the index pIdx of table pTab. If the column uses
+** a numeric sort order, then the P3 string character corresponding to
+** that column is 'n'. If the column uses a text sort order, then the
+** P3 string is 't'. See the OP_MakeIdxKey opcode documentation for
+** additional information. See also the sqliteAddKeyType() routine.
+*/
+void sqliteAddIdxKeyType(Vdbe *v, Index *pIdx){
+ char *zType;
+ Table *pTab;
+ int i, n;
+ assert( pIdx!=0 && pIdx->pTable!=0 );
+ pTab = pIdx->pTable;
+ n = pIdx->nColumn;
+ zType = sqliteMallocRaw( n+1 );
+ if( zType==0 ) return;
+ for(i=0; i<n; i++){
+ int iCol = pIdx->aiColumn[i];
+ assert( iCol>=0 && iCol<pTab->nCol );
+ if( (pTab->aCol[iCol].sortOrder & SQLITE_SO_TYPEMASK)==SQLITE_SO_TEXT ){
+ zType[i] = 't';
+ }else{
+ zType[i] = 'n';
+ }
+ }
+ zType[n] = 0;
+ sqliteVdbeChangeP3(v, -1, zType, n);
+ sqliteFree(zType);
+}
+
+/*
+** This routine is called to create a new foreign key on the table
+** currently under construction. pFromCol determines which columns
+** in the current table point to the foreign key. If pFromCol==0 then
+** connect the key to the last column inserted. pTo is the name of
+** the table referred to. pToCol is a list of tables in the other
+** pTo table that the foreign key points to. flags contains all
+** information about the conflict resolution algorithms specified
+** in the ON DELETE, ON UPDATE and ON INSERT clauses.
+**
+** An FKey structure is created and added to the table currently
+** under construction in the pParse->pNewTable field. The new FKey
+** is not linked into db->aFKey at this point - that does not happen
+** until sqliteEndTable().
+**
+** The foreign key is set for IMMEDIATE processing. A subsequent call
+** to sqliteDeferForeignKey() might change this to DEFERRED.
+*/
+void sqliteCreateForeignKey(
+ Parse *pParse, /* Parsing context */
+ IdList *pFromCol, /* Columns in this table that point to other table */
+ Token *pTo, /* Name of the other table */
+ IdList *pToCol, /* Columns in the other table */
+ int flags /* Conflict resolution algorithms. */
+){
+ Table *p = pParse->pNewTable;
+ int nByte;
+ int i;
+ int nCol;
+ char *z;
+ FKey *pFKey = 0;
+
+ assert( pTo!=0 );
+ if( p==0 || pParse->nErr ) goto fk_end;
+ if( pFromCol==0 ){
+ int iCol = p->nCol-1;
+ if( iCol<0 ) goto fk_end;
+ if( pToCol && pToCol->nId!=1 ){
+ sqliteErrorMsg(pParse, "foreign key on %s"
+ " should reference only one column of table %T",
+ p->aCol[iCol].zName, pTo);
+ goto fk_end;
+ }
+ nCol = 1;
+ }else if( pToCol && pToCol->nId!=pFromCol->nId ){
+ sqliteErrorMsg(pParse,
+ "number of columns in foreign key does not match the number of "
+ "columns in the referenced table");
+ goto fk_end;
+ }else{
+ nCol = pFromCol->nId;
+ }
+ nByte = sizeof(*pFKey) + nCol*sizeof(pFKey->aCol[0]) + pTo->n + 1;
+ if( pToCol ){
+ for(i=0; i<pToCol->nId; i++){
+ nByte += strlen(pToCol->a[i].zName) + 1;
+ }
+ }
+ pFKey = sqliteMalloc( nByte );
+ if( pFKey==0 ) goto fk_end;
+ pFKey->pFrom = p;
+ pFKey->pNextFrom = p->pFKey;
+ z = (char*)&pFKey[1];
+ pFKey->aCol = (struct sColMap*)z;
+ z += sizeof(struct sColMap)*nCol;
+ pFKey->zTo = z;
+ memcpy(z, pTo->z, pTo->n);
+ z[pTo->n] = 0;
+ z += pTo->n+1;
+ pFKey->pNextTo = 0;
+ pFKey->nCol = nCol;
+ if( pFromCol==0 ){
+ pFKey->aCol[0].iFrom = p->nCol-1;
+ }else{
+ for(i=0; i<nCol; i++){
+ int j;
+ for(j=0; j<p->nCol; j++){
+ if( sqliteStrICmp(p->aCol[j].zName, pFromCol->a[i].zName)==0 ){
+ pFKey->aCol[i].iFrom = j;
+ break;
+ }
+ }
+ if( j>=p->nCol ){
+ sqliteErrorMsg(pParse,
+ "unknown column \"%s\" in foreign key definition",
+ pFromCol->a[i].zName);
+ goto fk_end;
+ }
+ }
+ }
+ if( pToCol ){
+ for(i=0; i<nCol; i++){
+ int n = strlen(pToCol->a[i].zName);
+ pFKey->aCol[i].zCol = z;
+ memcpy(z, pToCol->a[i].zName, n);
+ z[n] = 0;
+ z += n+1;
+ }
+ }
+ pFKey->isDeferred = 0;
+ pFKey->deleteConf = flags & 0xff;
+ pFKey->updateConf = (flags >> 8 ) & 0xff;
+ pFKey->insertConf = (flags >> 16 ) & 0xff;
+
+ /* Link the foreign key to the table as the last step.
+ */
+ p->pFKey = pFKey;
+ pFKey = 0;
+
+fk_end:
+ sqliteFree(pFKey);
+ sqliteIdListDelete(pFromCol);
+ sqliteIdListDelete(pToCol);
+}
+
+/*
+** This routine is called when an INITIALLY IMMEDIATE or INITIALLY DEFERRED
+** clause is seen as part of a foreign key definition. The isDeferred
+** parameter is 1 for INITIALLY DEFERRED and 0 for INITIALLY IMMEDIATE.
+** The behavior of the most recently created foreign key is adjusted
+** accordingly.
+*/
+void sqliteDeferForeignKey(Parse *pParse, int isDeferred){
+ Table *pTab;
+ FKey *pFKey;
+ if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return;
+ pFKey->isDeferred = isDeferred;
+}
+
+/*
+** Create a new index for an SQL table. pIndex is the name of the index
+** and pTable is the name of the table that is to be indexed. Both will
+** be NULL for a primary key or an index that is created to satisfy a
+** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable
+** as the table to be indexed. pParse->pNewTable is a table that is
+** currently being constructed by a CREATE TABLE statement.
+**
+** pList is a list of columns to be indexed. pList will be NULL if this
+** is a primary key or unique-constraint on the most recent column added
+** to the table currently under construction.
+*/
+void sqliteCreateIndex(
+ Parse *pParse, /* All information about this parse */
+ Token *pName, /* Name of the index. May be NULL */
+ SrcList *pTable, /* Name of the table to index. Use pParse->pNewTable if 0 */
+ IdList *pList, /* A list of columns to be indexed */
+ int onError, /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */
+ Token *pStart, /* The CREATE token that begins a CREATE TABLE statement */
+ Token *pEnd /* The ")" that closes the CREATE INDEX statement */
+){
+ Table *pTab; /* Table to be indexed */
+ Index *pIndex; /* The index to be created */
+ char *zName = 0;
+ int i, j;
+ Token nullId; /* Fake token for an empty ID list */
+ DbFixer sFix; /* For assigning database names to pTable */
+ int isTemp; /* True for a temporary index */
+ sqlite *db = pParse->db;
+
+ if( pParse->nErr || sqlite_malloc_failed ) goto exit_create_index;
+ if( db->init.busy
+ && sqliteFixInit(&sFix, pParse, db->init.iDb, "index", pName)
+ && sqliteFixSrcList(&sFix, pTable)
+ ){
+ goto exit_create_index;
+ }
+
+ /*
+ ** Find the table that is to be indexed. Return early if not found.
+ */
+ if( pTable!=0 ){
+ assert( pName!=0 );
+ assert( pTable->nSrc==1 );
+ pTab = sqliteSrcListLookup(pParse, pTable);
+ }else{
+ assert( pName==0 );
+ pTab = pParse->pNewTable;
+ }
+ if( pTab==0 || pParse->nErr ) goto exit_create_index;
+ if( pTab->readOnly ){
+ sqliteErrorMsg(pParse, "table %s may not be indexed", pTab->zName);
+ goto exit_create_index;
+ }
+ if( pTab->iDb>=2 && db->init.busy==0 ){
+ sqliteErrorMsg(pParse, "table %s may not have indices added", pTab->zName);
+ goto exit_create_index;
+ }
+ if( pTab->pSelect ){
+ sqliteErrorMsg(pParse, "views may not be indexed");
+ goto exit_create_index;
+ }
+ isTemp = pTab->iDb==1;
+
+ /*
+ ** Find the name of the index. Make sure there is not already another
+ ** index or table with the same name.
+ **
+ ** Exception: If we are reading the names of permanent indices from the
+ ** sqlite_master table (because some other process changed the schema) and
+ ** one of the index names collides with the name of a temporary table or
+ ** index, then we will continue to process this index.
+ **
+ ** If pName==0 it means that we are
+ ** dealing with a primary key or UNIQUE constraint. We have to invent our
+ ** own name.
+ */
+ if( pName && !db->init.busy ){
+ Index *pISameName; /* Another index with the same name */
+ Table *pTSameName; /* A table with same name as the index */
+ zName = sqliteTableNameFromToken(pName);
+ if( zName==0 ) goto exit_create_index;
+ if( (pISameName = sqliteFindIndex(db, zName, 0))!=0 ){
+ sqliteErrorMsg(pParse, "index %s already exists", zName);
+ goto exit_create_index;
+ }
+ if( (pTSameName = sqliteFindTable(db, zName, 0))!=0 ){
+ sqliteErrorMsg(pParse, "there is already a table named %s", zName);
+ goto exit_create_index;
+ }
+ }else if( pName==0 ){
+ char zBuf[30];
+ int n;
+ Index *pLoop;
+ for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){}
+ sprintf(zBuf,"%d)",n);
+ zName = 0;
+ sqliteSetString(&zName, "(", pTab->zName, " autoindex ", zBuf, (char*)0);
+ if( zName==0 ) goto exit_create_index;
+ }else{
+ zName = sqliteStrNDup(pName->z, pName->n);
+ }
+
+ /* Check for authorization to create an index.
+ */
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ const char *zDb = db->aDb[pTab->iDb].zName;
+
+ assert( pTab->iDb==db->init.iDb || isTemp );
+ if( sqliteAuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){
+ goto exit_create_index;
+ }
+ i = SQLITE_CREATE_INDEX;
+ if( isTemp ) i = SQLITE_CREATE_TEMP_INDEX;
+ if( sqliteAuthCheck(pParse, i, zName, pTab->zName, zDb) ){
+ goto exit_create_index;
+ }
+ }
+#endif
+
+ /* If pList==0, it means this routine was called to make a primary
+ ** key out of the last column added to the table under construction.
+ ** So create a fake list to simulate this.
+ */
+ if( pList==0 ){
+ nullId.z = pTab->aCol[pTab->nCol-1].zName;
+ nullId.n = strlen(nullId.z);
+ pList = sqliteIdListAppend(0, &nullId);
+ if( pList==0 ) goto exit_create_index;
+ }
+
+ /*
+ ** Allocate the index structure.
+ */
+ pIndex = sqliteMalloc( sizeof(Index) + strlen(zName) + 1 +
+ sizeof(int)*pList->nId );
+ if( pIndex==0 ) goto exit_create_index;
+ pIndex->aiColumn = (int*)&pIndex[1];
+ pIndex->zName = (char*)&pIndex->aiColumn[pList->nId];
+ strcpy(pIndex->zName, zName);
+ pIndex->pTable = pTab;
+ pIndex->nColumn = pList->nId;
+ pIndex->onError = onError;
+ pIndex->autoIndex = pName==0;
+ pIndex->iDb = isTemp ? 1 : db->init.iDb;
+
+ /* Scan the names of the columns of the table to be indexed and
+ ** load the column indices into the Index structure. Report an error
+ ** if any column is not found.
+ */
+ for(i=0; i<pList->nId; i++){
+ for(j=0; j<pTab->nCol; j++){
+ if( sqliteStrICmp(pList->a[i].zName, pTab->aCol[j].zName)==0 ) break;
+ }
+ if( j>=pTab->nCol ){
+ sqliteErrorMsg(pParse, "table %s has no column named %s",
+ pTab->zName, pList->a[i].zName);
+ sqliteFree(pIndex);
+ goto exit_create_index;
+ }
+ pIndex->aiColumn[i] = j;
+ }
+
+ /* Link the new Index structure to its table and to the other
+ ** in-memory database structures.
+ */
+ if( !pParse->explain ){
+ Index *p;
+ p = sqliteHashInsert(&db->aDb[pIndex->iDb].idxHash,
+ pIndex->zName, strlen(pIndex->zName)+1, pIndex);
+ if( p ){
+ assert( p==pIndex ); /* Malloc must have failed */
+ sqliteFree(pIndex);
+ goto exit_create_index;
+ }
+ db->flags |= SQLITE_InternChanges;
+ }
+
+ /* When adding an index to the list of indices for a table, make
+ ** sure all indices labeled OE_Replace come after all those labeled
+ ** OE_Ignore. This is necessary for the correct operation of UPDATE
+ ** and INSERT.
+ */
+ if( onError!=OE_Replace || pTab->pIndex==0
+ || pTab->pIndex->onError==OE_Replace){
+ pIndex->pNext = pTab->pIndex;
+ pTab->pIndex = pIndex;
+ }else{
+ Index *pOther = pTab->pIndex;
+ while( pOther->pNext && pOther->pNext->onError!=OE_Replace ){
+ pOther = pOther->pNext;
+ }
+ pIndex->pNext = pOther->pNext;
+ pOther->pNext = pIndex;
+ }
+
+ /* If the db->init.busy is 1 it means we are reading the SQL off the
+ ** "sqlite_master" table on the disk. So do not write to the disk
+ ** again. Extract the table number from the db->init.newTnum field.
+ */
+ if( db->init.busy && pTable!=0 ){
+ pIndex->tnum = db->init.newTnum;
+ }
+
+ /* If the db->init.busy is 0 then create the index on disk. This
+ ** involves writing the index into the master table and filling in the
+ ** index with the current table contents.
+ **
+ ** The db->init.busy is 0 when the user first enters a CREATE INDEX
+ ** command. db->init.busy is 1 when a database is opened and
+ ** CREATE INDEX statements are read out of the master table. In
+ ** the latter case the index already exists on disk, which is why
+ ** we don't want to recreate it.
+ **
+ ** If pTable==0 it means this index is generated as a primary key
+ ** or UNIQUE constraint of a CREATE TABLE statement. Since the table
+ ** has just been created, it contains no data and the index initialization
+ ** step can be skipped.
+ */
+ else if( db->init.busy==0 ){
+ int n;
+ Vdbe *v;
+ int lbl1, lbl2;
+ int i;
+ int addr;
+
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) goto exit_create_index;
+ if( pTable!=0 ){
+ sqliteBeginWriteOperation(pParse, 0, isTemp);
+ sqliteOpenMasterTable(v, isTemp);
+ }
+ sqliteVdbeAddOp(v, OP_NewRecno, 0, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, "index", P3_STATIC);
+ sqliteVdbeOp3(v, OP_String, 0, 0, pIndex->zName, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, pTab->zName, 0);
+ sqliteVdbeOp3(v, OP_CreateIndex, 0, isTemp,(char*)&pIndex->tnum,P3_POINTER);
+ pIndex->tnum = 0;
+ if( pTable ){
+ sqliteVdbeCode(v,
+ OP_Dup, 0, 0,
+ OP_Integer, isTemp, 0,
+ OP_OpenWrite, 1, 0,
+ 0);
+ }
+ addr = sqliteVdbeAddOp(v, OP_String, 0, 0);
+ if( pStart && pEnd ){
+ n = Addr(pEnd->z) - Addr(pStart->z) + 1;
+ sqliteVdbeChangeP3(v, addr, pStart->z, n);
+ }
+ sqliteVdbeAddOp(v, OP_MakeRecord, 5, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, 0, 0);
+ if( pTable ){
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, 2, pTab->tnum, pTab->zName, 0);
+ lbl2 = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_Rewind, 2, lbl2);
+ lbl1 = sqliteVdbeAddOp(v, OP_Recno, 2, 0);
+ for(i=0; i<pIndex->nColumn; i++){
+ int iCol = pIndex->aiColumn[i];
+ if( pTab->iPKey==iCol ){
+ sqliteVdbeAddOp(v, OP_Dup, i, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_Column, 2, iCol);
+ }
+ }
+ sqliteVdbeAddOp(v, OP_MakeIdxKey, pIndex->nColumn, 0);
+ if( db->file_format>=4 ) sqliteAddIdxKeyType(v, pIndex);
+ sqliteVdbeOp3(v, OP_IdxPut, 1, pIndex->onError!=OE_None,
+ "indexed columns are not unique", P3_STATIC);
+ sqliteVdbeAddOp(v, OP_Next, 2, lbl1);
+ sqliteVdbeResolveLabel(v, lbl2);
+ sqliteVdbeAddOp(v, OP_Close, 2, 0);
+ sqliteVdbeAddOp(v, OP_Close, 1, 0);
+ }
+ if( pTable!=0 ){
+ if( !isTemp ){
+ sqliteChangeCookie(db, v);
+ }
+ sqliteVdbeAddOp(v, OP_Close, 0, 0);
+ sqliteEndWriteOperation(pParse);
+ }
+ }
+
+ /* Clean up before exiting */
+exit_create_index:
+ sqliteIdListDelete(pList);
+ sqliteSrcListDelete(pTable);
+ sqliteFree(zName);
+ return;
+}
+
+/*
+** This routine will drop an existing named index. This routine
+** implements the DROP INDEX statement.
+*/
+void sqliteDropIndex(Parse *pParse, SrcList *pName){
+ Index *pIndex;
+ Vdbe *v;
+ sqlite *db = pParse->db;
+
+ if( pParse->nErr || sqlite_malloc_failed ) return;
+ assert( pName->nSrc==1 );
+ pIndex = sqliteFindIndex(db, pName->a[0].zName, pName->a[0].zDatabase);
+ if( pIndex==0 ){
+ sqliteErrorMsg(pParse, "no such index: %S", pName, 0);
+ goto exit_drop_index;
+ }
+ if( pIndex->autoIndex ){
+ sqliteErrorMsg(pParse, "index associated with UNIQUE "
+ "or PRIMARY KEY constraint cannot be dropped", 0);
+ goto exit_drop_index;
+ }
+ if( pIndex->iDb>1 ){
+ sqliteErrorMsg(pParse, "cannot alter schema of attached "
+ "databases", 0);
+ goto exit_drop_index;
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int code = SQLITE_DROP_INDEX;
+ Table *pTab = pIndex->pTable;
+ const char *zDb = db->aDb[pIndex->iDb].zName;
+ const char *zTab = SCHEMA_TABLE(pIndex->iDb);
+ if( sqliteAuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){
+ goto exit_drop_index;
+ }
+ if( pIndex->iDb ) code = SQLITE_DROP_TEMP_INDEX;
+ if( sqliteAuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){
+ goto exit_drop_index;
+ }
+ }
+#endif
+
+ /* Generate code to remove the index and from the master table */
+ v = sqliteGetVdbe(pParse);
+ if( v ){
+ static VdbeOpList dropIndex[] = {
+ { OP_Rewind, 0, ADDR(9), 0},
+ { OP_String, 0, 0, 0}, /* 1 */
+ { OP_MemStore, 1, 1, 0},
+ { OP_MemLoad, 1, 0, 0}, /* 3 */
+ { OP_Column, 0, 1, 0},
+ { OP_Eq, 0, ADDR(8), 0},
+ { OP_Next, 0, ADDR(3), 0},
+ { OP_Goto, 0, ADDR(9), 0},
+ { OP_Delete, 0, 0, 0}, /* 8 */
+ };
+ int base;
+
+ sqliteBeginWriteOperation(pParse, 0, pIndex->iDb);
+ sqliteOpenMasterTable(v, pIndex->iDb);
+ base = sqliteVdbeAddOpList(v, ArraySize(dropIndex), dropIndex);
+ sqliteVdbeChangeP3(v, base+1, pIndex->zName, 0);
+ if( pIndex->iDb==0 ){
+ sqliteChangeCookie(db, v);
+ }
+ sqliteVdbeAddOp(v, OP_Close, 0, 0);
+ sqliteVdbeAddOp(v, OP_Destroy, pIndex->tnum, pIndex->iDb);
+ sqliteEndWriteOperation(pParse);
+ }
+
+ /* Delete the in-memory description of this index.
+ */
+ if( !pParse->explain ){
+ sqliteUnlinkAndDeleteIndex(db, pIndex);
+ db->flags |= SQLITE_InternChanges;
+ }
+
+exit_drop_index:
+ sqliteSrcListDelete(pName);
+}
+
+/*
+** Append a new element to the given IdList. Create a new IdList if
+** need be.
+**
+** A new IdList is returned, or NULL if malloc() fails.
+*/
+IdList *sqliteIdListAppend(IdList *pList, Token *pToken){
+ if( pList==0 ){
+ pList = sqliteMalloc( sizeof(IdList) );
+ if( pList==0 ) return 0;
+ pList->nAlloc = 0;
+ }
+ if( pList->nId>=pList->nAlloc ){
+ struct IdList_item *a;
+ pList->nAlloc = pList->nAlloc*2 + 5;
+ a = sqliteRealloc(pList->a, pList->nAlloc*sizeof(pList->a[0]) );
+ if( a==0 ){
+ sqliteIdListDelete(pList);
+ return 0;
+ }
+ pList->a = a;
+ }
+ memset(&pList->a[pList->nId], 0, sizeof(pList->a[0]));
+ if( pToken ){
+ char **pz = &pList->a[pList->nId].zName;
+ sqliteSetNString(pz, pToken->z, pToken->n, 0);
+ if( *pz==0 ){
+ sqliteIdListDelete(pList);
+ return 0;
+ }else{
+ sqliteDequote(*pz);
+ }
+ }
+ pList->nId++;
+ return pList;
+}
+
+/*
+** Append a new table name to the given SrcList. Create a new SrcList if
+** need be. A new entry is created in the SrcList even if pToken is NULL.
+**
+** A new SrcList is returned, or NULL if malloc() fails.
+**
+** If pDatabase is not null, it means that the table has an optional
+** database name prefix. Like this: "database.table". The pDatabase
+** points to the table name and the pTable points to the database name.
+** The SrcList.a[].zName field is filled with the table name which might
+** come from pTable (if pDatabase is NULL) or from pDatabase.
+** SrcList.a[].zDatabase is filled with the database name from pTable,
+** or with NULL if no database is specified.
+**
+** In other words, if call like this:
+**
+** sqliteSrcListAppend(A,B,0);
+**
+** Then B is a table name and the database name is unspecified. If called
+** like this:
+**
+** sqliteSrcListAppend(A,B,C);
+**
+** Then C is the table name and B is the database name.
+*/
+SrcList *sqliteSrcListAppend(SrcList *pList, Token *pTable, Token *pDatabase){
+ if( pList==0 ){
+ pList = sqliteMalloc( sizeof(SrcList) );
+ if( pList==0 ) return 0;
+ pList->nAlloc = 1;
+ }
+ if( pList->nSrc>=pList->nAlloc ){
+ SrcList *pNew;
+ pList->nAlloc *= 2;
+ pNew = sqliteRealloc(pList,
+ sizeof(*pList) + (pList->nAlloc-1)*sizeof(pList->a[0]) );
+ if( pNew==0 ){
+ sqliteSrcListDelete(pList);
+ return 0;
+ }
+ pList = pNew;
+ }
+ memset(&pList->a[pList->nSrc], 0, sizeof(pList->a[0]));
+ if( pDatabase && pDatabase->z==0 ){
+ pDatabase = 0;
+ }
+ if( pDatabase && pTable ){
+ Token *pTemp = pDatabase;
+ pDatabase = pTable;
+ pTable = pTemp;
+ }
+ if( pTable ){
+ char **pz = &pList->a[pList->nSrc].zName;
+ sqliteSetNString(pz, pTable->z, pTable->n, 0);
+ if( *pz==0 ){
+ sqliteSrcListDelete(pList);
+ return 0;
+ }else{
+ sqliteDequote(*pz);
+ }
+ }
+ if( pDatabase ){
+ char **pz = &pList->a[pList->nSrc].zDatabase;
+ sqliteSetNString(pz, pDatabase->z, pDatabase->n, 0);
+ if( *pz==0 ){
+ sqliteSrcListDelete(pList);
+ return 0;
+ }else{
+ sqliteDequote(*pz);
+ }
+ }
+ pList->a[pList->nSrc].iCursor = -1;
+ pList->nSrc++;
+ return pList;
+}
+
+/*
+** Assign cursors to all tables in a SrcList
+*/
+void sqliteSrcListAssignCursors(Parse *pParse, SrcList *pList){
+ int i;
+ for(i=0; i<pList->nSrc; i++){
+ if( pList->a[i].iCursor<0 ){
+ pList->a[i].iCursor = pParse->nTab++;
+ }
+ }
+}
+
+/*
+** Add an alias to the last identifier on the given identifier list.
+*/
+void sqliteSrcListAddAlias(SrcList *pList, Token *pToken){
+ if( pList && pList->nSrc>0 ){
+ int i = pList->nSrc - 1;
+ sqliteSetNString(&pList->a[i].zAlias, pToken->z, pToken->n, 0);
+ sqliteDequote(pList->a[i].zAlias);
+ }
+}
+
+/*
+** Delete an IdList.
+*/
+void sqliteIdListDelete(IdList *pList){
+ int i;
+ if( pList==0 ) return;
+ for(i=0; i<pList->nId; i++){
+ sqliteFree(pList->a[i].zName);
+ }
+ sqliteFree(pList->a);
+ sqliteFree(pList);
+}
+
+/*
+** Return the index in pList of the identifier named zId. Return -1
+** if not found.
+*/
+int sqliteIdListIndex(IdList *pList, const char *zName){
+ int i;
+ if( pList==0 ) return -1;
+ for(i=0; i<pList->nId; i++){
+ if( sqliteStrICmp(pList->a[i].zName, zName)==0 ) return i;
+ }
+ return -1;
+}
+
+/*
+** Delete an entire SrcList including all its substructure.
+*/
+void sqliteSrcListDelete(SrcList *pList){
+ int i;
+ if( pList==0 ) return;
+ for(i=0; i<pList->nSrc; i++){
+ sqliteFree(pList->a[i].zDatabase);
+ sqliteFree(pList->a[i].zName);
+ sqliteFree(pList->a[i].zAlias);
+ if( pList->a[i].pTab && pList->a[i].pTab->isTransient ){
+ sqliteDeleteTable(0, pList->a[i].pTab);
+ }
+ sqliteSelectDelete(pList->a[i].pSelect);
+ sqliteExprDelete(pList->a[i].pOn);
+ sqliteIdListDelete(pList->a[i].pUsing);
+ }
+ sqliteFree(pList);
+}
+
+/*
+** Begin a transaction
+*/
+void sqliteBeginTransaction(Parse *pParse, int onError){
+ sqlite *db;
+
+ if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return;
+ if( pParse->nErr || sqlite_malloc_failed ) return;
+ if( sqliteAuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ) return;
+ if( db->flags & SQLITE_InTrans ){
+ sqliteErrorMsg(pParse, "cannot start a transaction within a transaction");
+ return;
+ }
+ sqliteBeginWriteOperation(pParse, 0, 0);
+ if( !pParse->explain ){
+ db->flags |= SQLITE_InTrans;
+ db->onError = onError;
+ }
+}
+
+/*
+** Commit a transaction
+*/
+void sqliteCommitTransaction(Parse *pParse){
+ sqlite *db;
+
+ if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return;
+ if( pParse->nErr || sqlite_malloc_failed ) return;
+ if( sqliteAuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ) return;
+ if( (db->flags & SQLITE_InTrans)==0 ){
+ sqliteErrorMsg(pParse, "cannot commit - no transaction is active");
+ return;
+ }
+ if( !pParse->explain ){
+ db->flags &= ~SQLITE_InTrans;
+ }
+ sqliteEndWriteOperation(pParse);
+ if( !pParse->explain ){
+ db->onError = OE_Default;
+ }
+}
+
+/*
+** Rollback a transaction
+*/
+void sqliteRollbackTransaction(Parse *pParse){
+ sqlite *db;
+ Vdbe *v;
+
+ if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return;
+ if( pParse->nErr || sqlite_malloc_failed ) return;
+ if( sqliteAuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ) return;
+ if( (db->flags & SQLITE_InTrans)==0 ){
+ sqliteErrorMsg(pParse, "cannot rollback - no transaction is active");
+ return;
+ }
+ v = sqliteGetVdbe(pParse);
+ if( v ){
+ sqliteVdbeAddOp(v, OP_Rollback, 0, 0);
+ }
+ if( !pParse->explain ){
+ db->flags &= ~SQLITE_InTrans;
+ db->onError = OE_Default;
+ }
+}
+
+/*
+** Generate VDBE code that will verify the schema cookie for all
+** named database files.
+*/
+void sqliteCodeVerifySchema(Parse *pParse, int iDb){
+ sqlite *db = pParse->db;
+ Vdbe *v = sqliteGetVdbe(pParse);
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( db->aDb[iDb].pBt!=0 );
+ if( iDb!=1 && !DbHasProperty(db, iDb, DB_Cookie) ){
+ sqliteVdbeAddOp(v, OP_VerifyCookie, iDb, db->aDb[iDb].schema_cookie);
+ DbSetProperty(db, iDb, DB_Cookie);
+ }
+}
+
+/*
+** Generate VDBE code that prepares for doing an operation that
+** might change the database.
+**
+** This routine starts a new transaction if we are not already within
+** a transaction. If we are already within a transaction, then a checkpoint
+** is set if the setCheckpoint parameter is true. A checkpoint should
+** be set for operations that might fail (due to a constraint) part of
+** the way through and which will need to undo some writes without having to
+** rollback the whole transaction. For operations where all constraints
+** can be checked before any changes are made to the database, it is never
+** necessary to undo a write and the checkpoint should not be set.
+**
+** Only database iDb and the temp database are made writable by this call.
+** If iDb==0, then the main and temp databases are made writable. If
+** iDb==1 then only the temp database is made writable. If iDb>1 then the
+** specified auxiliary database and the temp database are made writable.
+*/
+void sqliteBeginWriteOperation(Parse *pParse, int setCheckpoint, int iDb){
+ Vdbe *v;
+ sqlite *db = pParse->db;
+ if( DbHasProperty(db, iDb, DB_Locked) ) return;
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) return;
+ if( !db->aDb[iDb].inTrans ){
+ sqliteVdbeAddOp(v, OP_Transaction, iDb, 0);
+ DbSetProperty(db, iDb, DB_Locked);
+ sqliteCodeVerifySchema(pParse, iDb);
+ if( iDb!=1 ){
+ sqliteBeginWriteOperation(pParse, setCheckpoint, 1);
+ }
+ }else if( setCheckpoint ){
+ sqliteVdbeAddOp(v, OP_Checkpoint, iDb, 0);
+ DbSetProperty(db, iDb, DB_Locked);
+ }
+}
+
+/*
+** Generate code that concludes an operation that may have changed
+** the database. If a statement transaction was started, then emit
+** an OP_Commit that will cause the changes to be committed to disk.
+**
+** Note that checkpoints are automatically committed at the end of
+** a statement. Note also that there can be multiple calls to
+** sqliteBeginWriteOperation() but there should only be a single
+** call to sqliteEndWriteOperation() at the conclusion of the statement.
+*/
+void sqliteEndWriteOperation(Parse *pParse){
+ Vdbe *v;
+ sqlite *db = pParse->db;
+ if( pParse->trigStack ) return; /* if this is in a trigger */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) return;
+ if( db->flags & SQLITE_InTrans ){
+ /* A BEGIN has executed. Do not commit until we see an explicit
+ ** COMMIT statement. */
+ }else{
+ sqliteVdbeAddOp(v, OP_Commit, 0, 0);
+ }
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/config.h b/usr/src/cmd/svc/configd/sqlite/src/config.h
new file mode 100644
index 0000000000..9310f9188e
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/config.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef SQLITE_CONFIG_H
+#define SQLITE_CONFIG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _LP64
+#define SQLITE_PTR_SZ 8
+#else
+#define SQLITE_PTR_SZ 4
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SQLITE_CONFIG_H */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/copy.c b/usr/src/cmd/svc/configd/sqlite/src/copy.c
new file mode 100644
index 0000000000..811946b5a3
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/copy.c
@@ -0,0 +1,113 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the COPY command.
+**
+** $Id: copy.c,v 1.9 2004/02/25 13:47:31 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** The COPY command is for compatibility with PostgreSQL and specificially
+** for the ability to read the output of pg_dump. The format is as
+** follows:
+**
+** COPY table FROM file [USING DELIMITERS string]
+**
+** "table" is an existing table name. We will read lines of code from
+** file to fill this table with data. File might be "stdin". The optional
+** delimiter string identifies the field separators. The default is a tab.
+*/
+void sqliteCopy(
+ Parse *pParse, /* The parser context */
+ SrcList *pTableName, /* The name of the table into which we will insert */
+ Token *pFilename, /* The file from which to obtain information */
+ Token *pDelimiter, /* Use this as the field delimiter */
+ int onError /* What to do if a constraint fails */
+){
+ Table *pTab;
+ int i;
+ Vdbe *v;
+ int addr, end;
+ char *zFile = 0;
+ const char *zDb;
+ sqlite *db = pParse->db;
+
+
+ if( sqlite_malloc_failed ) goto copy_cleanup;
+ assert( pTableName->nSrc==1 );
+ pTab = sqliteSrcListLookup(pParse, pTableName);
+ if( pTab==0 || sqliteIsReadOnly(pParse, pTab, 0) ) goto copy_cleanup;
+ zFile = sqliteStrNDup(pFilename->z, pFilename->n);
+ sqliteDequote(zFile);
+ assert( pTab->iDb<db->nDb );
+ zDb = db->aDb[pTab->iDb].zName;
+ if( sqliteAuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, zDb)
+ || sqliteAuthCheck(pParse, SQLITE_COPY, pTab->zName, zFile, zDb) ){
+ goto copy_cleanup;
+ }
+ v = sqliteGetVdbe(pParse);
+ if( v ){
+ sqliteBeginWriteOperation(pParse, 1, pTab->iDb);
+ addr = sqliteVdbeOp3(v, OP_FileOpen, 0, 0, pFilename->z, pFilename->n);
+ sqliteVdbeDequoteP3(v, addr);
+ sqliteOpenTableAndIndices(pParse, pTab, 0);
+ if( db->flags & SQLITE_CountRows ){
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0); /* Initialize the row count */
+ }
+ end = sqliteVdbeMakeLabel(v);
+ addr = sqliteVdbeAddOp(v, OP_FileRead, pTab->nCol, end);
+ if( pDelimiter ){
+ sqliteVdbeChangeP3(v, addr, pDelimiter->z, pDelimiter->n);
+ sqliteVdbeDequoteP3(v, addr);
+ }else{
+ sqliteVdbeChangeP3(v, addr, "\t", 1);
+ }
+ if( pTab->iPKey>=0 ){
+ sqliteVdbeAddOp(v, OP_FileColumn, pTab->iPKey, 0);
+ sqliteVdbeAddOp(v, OP_MustBeInt, 0, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_NewRecno, 0, 0);
+ }
+ for(i=0; i<pTab->nCol; i++){
+ if( i==pTab->iPKey ){
+ /* The integer primary key column is filled with NULL since its
+ ** value is always pulled from the record number */
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_FileColumn, i, 0);
+ }
+ }
+ sqliteGenerateConstraintChecks(pParse, pTab, 0, 0, pTab->iPKey>=0,
+ 0, onError, addr);
+ sqliteCompleteInsertion(pParse, pTab, 0, 0, 0, 0, -1);
+ if( (db->flags & SQLITE_CountRows)!=0 ){
+ sqliteVdbeAddOp(v, OP_AddImm, 1, 0); /* Increment row count */
+ }
+ sqliteVdbeAddOp(v, OP_Goto, 0, addr);
+ sqliteVdbeResolveLabel(v, end);
+ sqliteVdbeAddOp(v, OP_Noop, 0, 0);
+ sqliteEndWriteOperation(pParse);
+ if( db->flags & SQLITE_CountRows ){
+ sqliteVdbeAddOp(v, OP_ColumnName, 0, 1);
+ sqliteVdbeChangeP3(v, -1, "rows inserted", P3_STATIC);
+ sqliteVdbeAddOp(v, OP_Callback, 1, 0);
+ }
+ }
+
+copy_cleanup:
+ sqliteSrcListDelete(pTableName);
+ sqliteFree(zFile);
+ return;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/date.c b/usr/src/cmd/svc/configd/sqlite/src/date.c
new file mode 100644
index 0000000000..4c7db47449
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/date.c
@@ -0,0 +1,878 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 October 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement date and time
+** functions for SQLite.
+**
+** There is only one exported symbol in this file - the function
+** sqliteRegisterDateTimeFunctions() found at the bottom of the file.
+** All other code has file scope.
+**
+** $Id: date.c,v 1.16.2.2 2004/07/20 00:40:01 drh Exp $
+**
+** NOTES:
+**
+** SQLite processes all times and dates as Julian Day numbers. The
+** dates and times are stored as the number of days since noon
+** in Greenwich on November 24, 4714 B.C. according to the Gregorian
+** calendar system.
+**
+** 1970-01-01 00:00:00 is JD 2440587.5
+** 2000-01-01 00:00:00 is JD 2451544.5
+**
+** This implemention requires years to be expressed as a 4-digit number
+** which means that only dates between 0000-01-01 and 9999-12-31 can
+** be represented, even though julian day numbers allow a much wider
+** range of dates.
+**
+** The Gregorian calendar system is used for all dates and times,
+** even those that predate the Gregorian calendar. Historians usually
+** use the Julian calendar for dates prior to 1582-10-15 and for some
+** dates afterwards, depending on locale. Beware of this difference.
+**
+** The conversion algorithms are implemented based on descriptions
+** in the following text:
+**
+** Jean Meeus
+** Astronomical Algorithms, 2nd Edition, 1998
+** ISBM 0-943396-61-1
+** Willmann-Bell, Inc
+** Richmond, Virginia (USA)
+*/
+#include "os.h"
+#include "sqliteInt.h"
+#include <ctype.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <time.h>
+
+#ifndef SQLITE_OMIT_DATETIME_FUNCS
+
+/*
+** A structure for holding a single date and time.
+*/
+typedef struct DateTime DateTime;
+struct DateTime {
+ double rJD; /* The julian day number */
+ int Y, M, D; /* Year, month, and day */
+ int h, m; /* Hour and minutes */
+ int tz; /* Timezone offset in minutes */
+ double s; /* Seconds */
+ char validYMD; /* True if Y,M,D are valid */
+ char validHMS; /* True if h,m,s are valid */
+ char validJD; /* True if rJD is valid */
+ char validTZ; /* True if tz is valid */
+};
+
+
+/*
+** Convert zDate into one or more integers. Additional arguments
+** come in groups of 5 as follows:
+**
+** N number of digits in the integer
+** min minimum allowed value of the integer
+** max maximum allowed value of the integer
+** nextC first character after the integer
+** pVal where to write the integers value.
+**
+** Conversions continue until one with nextC==0 is encountered.
+** The function returns the number of successful conversions.
+*/
+static int getDigits(const char *zDate, ...){
+ va_list ap;
+ int val;
+ int N;
+ int min;
+ int max;
+ int nextC;
+ int *pVal;
+ int cnt = 0;
+ va_start(ap, zDate);
+ do{
+ N = va_arg(ap, int);
+ min = va_arg(ap, int);
+ max = va_arg(ap, int);
+ nextC = va_arg(ap, int);
+ pVal = va_arg(ap, int*);
+ val = 0;
+ while( N-- ){
+ if( !isdigit(*zDate) ){
+ return cnt;
+ }
+ val = val*10 + *zDate - '0';
+ zDate++;
+ }
+ if( val<min || val>max || (nextC!=0 && nextC!=*zDate) ){
+ return cnt;
+ }
+ *pVal = val;
+ zDate++;
+ cnt++;
+ }while( nextC );
+ return cnt;
+}
+
+/*
+** Read text from z[] and convert into a floating point number. Return
+** the number of digits converted.
+*/
+static int getValue(const char *z, double *pR){
+ const char *zEnd;
+ *pR = sqliteAtoF(z, &zEnd);
+ return zEnd - z;
+}
+
+/*
+** Parse a timezone extension on the end of a date-time.
+** The extension is of the form:
+**
+** (+/-)HH:MM
+**
+** If the parse is successful, write the number of minutes
+** of change in *pnMin and return 0. If a parser error occurs,
+** return 0.
+**
+** A missing specifier is not considered an error.
+*/
+static int parseTimezone(const char *zDate, DateTime *p){
+ int sgn = 0;
+ int nHr, nMn;
+ while( isspace(*zDate) ){ zDate++; }
+ p->tz = 0;
+ if( *zDate=='-' ){
+ sgn = -1;
+ }else if( *zDate=='+' ){
+ sgn = +1;
+ }else{
+ return *zDate!=0;
+ }
+ zDate++;
+ if( getDigits(zDate, 2, 0, 14, ':', &nHr, 2, 0, 59, 0, &nMn)!=2 ){
+ return 1;
+ }
+ zDate += 5;
+ p->tz = sgn*(nMn + nHr*60);
+ while( isspace(*zDate) ){ zDate++; }
+ return *zDate!=0;
+}
+
+/*
+** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF.
+** The HH, MM, and SS must each be exactly 2 digits. The
+** fractional seconds FFFF can be one or more digits.
+**
+** Return 1 if there is a parsing error and 0 on success.
+*/
+static int parseHhMmSs(const char *zDate, DateTime *p){
+ int h, m, s;
+ double ms = 0.0;
+ if( getDigits(zDate, 2, 0, 24, ':', &h, 2, 0, 59, 0, &m)!=2 ){
+ return 1;
+ }
+ zDate += 5;
+ if( *zDate==':' ){
+ zDate++;
+ if( getDigits(zDate, 2, 0, 59, 0, &s)!=1 ){
+ return 1;
+ }
+ zDate += 2;
+ if( *zDate=='.' && isdigit(zDate[1]) ){
+ double rScale = 1.0;
+ zDate++;
+ while( isdigit(*zDate) ){
+ ms = ms*10.0 + *zDate - '0';
+ rScale *= 10.0;
+ zDate++;
+ }
+ ms /= rScale;
+ }
+ }else{
+ s = 0;
+ }
+ p->validJD = 0;
+ p->validHMS = 1;
+ p->h = h;
+ p->m = m;
+ p->s = s + ms;
+ if( parseTimezone(zDate, p) ) return 1;
+ p->validTZ = p->tz!=0;
+ return 0;
+}
+
+/*
+** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume
+** that the YYYY-MM-DD is according to the Gregorian calendar.
+**
+** Reference: Meeus page 61
+*/
+static void computeJD(DateTime *p){
+ int Y, M, D, A, B, X1, X2;
+
+ if( p->validJD ) return;
+ if( p->validYMD ){
+ Y = p->Y;
+ M = p->M;
+ D = p->D;
+ }else{
+ Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */
+ M = 1;
+ D = 1;
+ }
+ if( M<=2 ){
+ Y--;
+ M += 12;
+ }
+ A = Y/100;
+ B = 2 - A + (A/4);
+ X1 = 365.25*(Y+4716);
+ X2 = 30.6001*(M+1);
+ p->rJD = X1 + X2 + D + B - 1524.5;
+ p->validJD = 1;
+ p->validYMD = 0;
+ if( p->validHMS ){
+ p->rJD += (p->h*3600.0 + p->m*60.0 + p->s)/86400.0;
+ if( p->validTZ ){
+ p->rJD += p->tz*60/86400.0;
+ p->validHMS = 0;
+ p->validTZ = 0;
+ }
+ }
+}
+
+/*
+** Parse dates of the form
+**
+** YYYY-MM-DD HH:MM:SS.FFF
+** YYYY-MM-DD HH:MM:SS
+** YYYY-MM-DD HH:MM
+** YYYY-MM-DD
+**
+** Write the result into the DateTime structure and return 0
+** on success and 1 if the input string is not a well-formed
+** date.
+*/
+static int parseYyyyMmDd(const char *zDate, DateTime *p){
+ int Y, M, D, neg;
+
+ if( zDate[0]=='-' ){
+ zDate++;
+ neg = 1;
+ }else{
+ neg = 0;
+ }
+ if( getDigits(zDate,4,0,9999,'-',&Y,2,1,12,'-',&M,2,1,31,0,&D)!=3 ){
+ return 1;
+ }
+ zDate += 10;
+ while( isspace(*zDate) ){ zDate++; }
+ if( parseHhMmSs(zDate, p)==0 ){
+ /* We got the time */
+ }else if( *zDate==0 ){
+ p->validHMS = 0;
+ }else{
+ return 1;
+ }
+ p->validJD = 0;
+ p->validYMD = 1;
+ p->Y = neg ? -Y : Y;
+ p->M = M;
+ p->D = D;
+ if( p->validTZ ){
+ computeJD(p);
+ }
+ return 0;
+}
+
+/*
+** Attempt to parse the given string into a Julian Day Number. Return
+** the number of errors.
+**
+** The following are acceptable forms for the input string:
+**
+** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM
+** DDDD.DD
+** now
+**
+** In the first form, the +/-HH:MM is always optional. The fractional
+** seconds extension (the ".FFF") is optional. The seconds portion
+** (":SS.FFF") is option. The year and date can be omitted as long
+** as there is a time string. The time string can be omitted as long
+** as there is a year and date.
+*/
+static int parseDateOrTime(const char *zDate, DateTime *p){
+ memset(p, 0, sizeof(*p));
+ if( parseYyyyMmDd(zDate,p)==0 ){
+ return 0;
+ }else if( parseHhMmSs(zDate, p)==0 ){
+ return 0;
+ }else if( sqliteStrICmp(zDate,"now")==0){
+ double r;
+ if( sqliteOsCurrentTime(&r)==0 ){
+ p->rJD = r;
+ p->validJD = 1;
+ return 0;
+ }
+ return 1;
+ }else if( sqliteIsNumber(zDate) ){
+ p->rJD = sqliteAtoF(zDate, 0);
+ p->validJD = 1;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+** Compute the Year, Month, and Day from the julian day number.
+*/
+static void computeYMD(DateTime *p){
+ int Z, A, B, C, D, E, X1;
+ if( p->validYMD ) return;
+ if( !p->validJD ){
+ p->Y = 2000;
+ p->M = 1;
+ p->D = 1;
+ }else{
+ Z = p->rJD + 0.5;
+ A = (Z - 1867216.25)/36524.25;
+ A = Z + 1 + A - (A/4);
+ B = A + 1524;
+ C = (B - 122.1)/365.25;
+ D = 365.25*C;
+ E = (B-D)/30.6001;
+ X1 = 30.6001*E;
+ p->D = B - D - X1;
+ p->M = E<14 ? E-1 : E-13;
+ p->Y = p->M>2 ? C - 4716 : C - 4715;
+ }
+ p->validYMD = 1;
+}
+
+/*
+** Compute the Hour, Minute, and Seconds from the julian day number.
+*/
+static void computeHMS(DateTime *p){
+ int Z, s;
+ if( p->validHMS ) return;
+ Z = p->rJD + 0.5;
+ s = (p->rJD + 0.5 - Z)*86400000.0 + 0.5;
+ p->s = 0.001*s;
+ s = p->s;
+ p->s -= s;
+ p->h = s/3600;
+ s -= p->h*3600;
+ p->m = s/60;
+ p->s += s - p->m*60;
+ p->validHMS = 1;
+}
+
+/*
+** Compute both YMD and HMS
+*/
+static void computeYMD_HMS(DateTime *p){
+ computeYMD(p);
+ computeHMS(p);
+}
+
+/*
+** Clear the YMD and HMS and the TZ
+*/
+static void clearYMD_HMS_TZ(DateTime *p){
+ p->validYMD = 0;
+ p->validHMS = 0;
+ p->validTZ = 0;
+}
+
+/*
+** Compute the difference (in days) between localtime and UTC (a.k.a. GMT)
+** for the time value p where p is in UTC.
+*/
+static double localtimeOffset(DateTime *p){
+ DateTime x, y;
+ time_t t;
+ struct tm *pTm;
+ x = *p;
+ computeYMD_HMS(&x);
+ if( x.Y<1971 || x.Y>=2038 ){
+ x.Y = 2000;
+ x.M = 1;
+ x.D = 1;
+ x.h = 0;
+ x.m = 0;
+ x.s = 0.0;
+ } else {
+ int s = x.s + 0.5;
+ x.s = s;
+ }
+ x.tz = 0;
+ x.validJD = 0;
+ computeJD(&x);
+ t = (x.rJD-2440587.5)*86400.0 + 0.5;
+ sqliteOsEnterMutex();
+ pTm = localtime(&t);
+ y.Y = pTm->tm_year + 1900;
+ y.M = pTm->tm_mon + 1;
+ y.D = pTm->tm_mday;
+ y.h = pTm->tm_hour;
+ y.m = pTm->tm_min;
+ y.s = pTm->tm_sec;
+ sqliteOsLeaveMutex();
+ y.validYMD = 1;
+ y.validHMS = 1;
+ y.validJD = 0;
+ y.validTZ = 0;
+ computeJD(&y);
+ return y.rJD - x.rJD;
+}
+
+/*
+** Process a modifier to a date-time stamp. The modifiers are
+** as follows:
+**
+** NNN days
+** NNN hours
+** NNN minutes
+** NNN.NNNN seconds
+** NNN months
+** NNN years
+** start of month
+** start of year
+** start of week
+** start of day
+** weekday N
+** unixepoch
+** localtime
+** utc
+**
+** Return 0 on success and 1 if there is any kind of error.
+*/
+static int parseModifier(const char *zMod, DateTime *p){
+ int rc = 1;
+ int n;
+ double r;
+ char *z, zBuf[30];
+ z = zBuf;
+ for(n=0; n<sizeof(zBuf)-1 && zMod[n]; n++){
+ z[n] = tolower(zMod[n]);
+ }
+ z[n] = 0;
+ switch( z[0] ){
+ case 'l': {
+ /* localtime
+ **
+ ** Assuming the current time value is UTC (a.k.a. GMT), shift it to
+ ** show local time.
+ */
+ if( strcmp(z, "localtime")==0 ){
+ computeJD(p);
+ p->rJD += localtimeOffset(p);
+ clearYMD_HMS_TZ(p);
+ rc = 0;
+ }
+ break;
+ }
+ case 'u': {
+ /*
+ ** unixepoch
+ **
+ ** Treat the current value of p->rJD as the number of
+ ** seconds since 1970. Convert to a real julian day number.
+ */
+ if( strcmp(z, "unixepoch")==0 && p->validJD ){
+ p->rJD = p->rJD/86400.0 + 2440587.5;
+ clearYMD_HMS_TZ(p);
+ rc = 0;
+ }else if( strcmp(z, "utc")==0 ){
+ double c1;
+ computeJD(p);
+ c1 = localtimeOffset(p);
+ p->rJD -= c1;
+ clearYMD_HMS_TZ(p);
+ p->rJD += c1 - localtimeOffset(p);
+ rc = 0;
+ }
+ break;
+ }
+ case 'w': {
+ /*
+ ** weekday N
+ **
+ ** Move the date to the same time on the next occurrance of
+ ** weekday N where 0==Sunday, 1==Monday, and so forth. If the
+ ** date is already on the appropriate weekday, this is a no-op.
+ */
+ if( strncmp(z, "weekday ", 8)==0 && getValue(&z[8],&r)>0
+ && (n=r)==r && n>=0 && r<7 ){
+ int Z;
+ computeYMD_HMS(p);
+ p->validTZ = 0;
+ p->validJD = 0;
+ computeJD(p);
+ Z = p->rJD + 1.5;
+ Z %= 7;
+ if( Z>n ) Z -= 7;
+ p->rJD += n - Z;
+ clearYMD_HMS_TZ(p);
+ rc = 0;
+ }
+ break;
+ }
+ case 's': {
+ /*
+ ** start of TTTTT
+ **
+ ** Move the date backwards to the beginning of the current day,
+ ** or month or year.
+ */
+ if( strncmp(z, "start of ", 9)!=0 ) break;
+ z += 9;
+ computeYMD(p);
+ p->validHMS = 1;
+ p->h = p->m = 0;
+ p->s = 0.0;
+ p->validTZ = 0;
+ p->validJD = 0;
+ if( strcmp(z,"month")==0 ){
+ p->D = 1;
+ rc = 0;
+ }else if( strcmp(z,"year")==0 ){
+ computeYMD(p);
+ p->M = 1;
+ p->D = 1;
+ rc = 0;
+ }else if( strcmp(z,"day")==0 ){
+ rc = 0;
+ }
+ break;
+ }
+ case '+':
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ n = getValue(z, &r);
+ if( n<=0 ) break;
+ if( z[n]==':' ){
+ /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the
+ ** specified number of hours, minutes, seconds, and fractional seconds
+ ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be
+ ** omitted.
+ */
+ const char *z2 = z;
+ DateTime tx;
+ int day;
+ if( !isdigit(*z2) ) z2++;
+ memset(&tx, 0, sizeof(tx));
+ if( parseHhMmSs(z2, &tx) ) break;
+ computeJD(&tx);
+ tx.rJD -= 0.5;
+ day = (int)tx.rJD;
+ tx.rJD -= day;
+ if( z[0]=='-' ) tx.rJD = -tx.rJD;
+ computeJD(p);
+ clearYMD_HMS_TZ(p);
+ p->rJD += tx.rJD;
+ rc = 0;
+ break;
+ }
+ z += n;
+ while( isspace(z[0]) ) z++;
+ n = strlen(z);
+ if( n>10 || n<3 ) break;
+ if( z[n-1]=='s' ){ z[n-1] = 0; n--; }
+ computeJD(p);
+ rc = 0;
+ if( n==3 && strcmp(z,"day")==0 ){
+ p->rJD += r;
+ }else if( n==4 && strcmp(z,"hour")==0 ){
+ p->rJD += r/24.0;
+ }else if( n==6 && strcmp(z,"minute")==0 ){
+ p->rJD += r/(24.0*60.0);
+ }else if( n==6 && strcmp(z,"second")==0 ){
+ p->rJD += r/(24.0*60.0*60.0);
+ }else if( n==5 && strcmp(z,"month")==0 ){
+ int x, y;
+ computeYMD_HMS(p);
+ p->M += r;
+ x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
+ p->Y += x;
+ p->M -= x*12;
+ p->validJD = 0;
+ computeJD(p);
+ y = r;
+ if( y!=r ){
+ p->rJD += (r - y)*30.0;
+ }
+ }else if( n==4 && strcmp(z,"year")==0 ){
+ computeYMD_HMS(p);
+ p->Y += r;
+ p->validJD = 0;
+ computeJD(p);
+ }else{
+ rc = 1;
+ }
+ clearYMD_HMS_TZ(p);
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ return rc;
+}
+
+/*
+** Process time function arguments. argv[0] is a date-time stamp.
+** argv[1] and following are modifiers. Parse them all and write
+** the resulting time into the DateTime structure p. Return 0
+** on success and 1 if there are any errors.
+*/
+static int isDate(int argc, const char **argv, DateTime *p){
+ int i;
+ if( argc==0 ) return 1;
+ if( argv[0]==0 || parseDateOrTime(argv[0], p) ) return 1;
+ for(i=1; i<argc; i++){
+ if( argv[i]==0 || parseModifier(argv[i], p) ) return 1;
+ }
+ return 0;
+}
+
+
+/*
+** The following routines implement the various date and time functions
+** of SQLite.
+*/
+
+/*
+** julianday( TIMESTRING, MOD, MOD, ...)
+**
+** Return the julian day number of the date specified in the arguments
+*/
+static void juliandayFunc(sqlite_func *context, int argc, const char **argv){
+ DateTime x;
+ if( isDate(argc, argv, &x)==0 ){
+ computeJD(&x);
+ sqlite_set_result_double(context, x.rJD);
+ }
+}
+
+/*
+** datetime( TIMESTRING, MOD, MOD, ...)
+**
+** Return YYYY-MM-DD HH:MM:SS
+*/
+static void datetimeFunc(sqlite_func *context, int argc, const char **argv){
+ DateTime x;
+ if( isDate(argc, argv, &x)==0 ){
+ char zBuf[100];
+ computeYMD_HMS(&x);
+ sprintf(zBuf, "%04d-%02d-%02d %02d:%02d:%02d",x.Y, x.M, x.D, x.h, x.m,
+ (int)(x.s));
+ sqlite_set_result_string(context, zBuf, -1);
+ }
+}
+
+/*
+** time( TIMESTRING, MOD, MOD, ...)
+**
+** Return HH:MM:SS
+*/
+static void timeFunc(sqlite_func *context, int argc, const char **argv){
+ DateTime x;
+ if( isDate(argc, argv, &x)==0 ){
+ char zBuf[100];
+ computeHMS(&x);
+ sprintf(zBuf, "%02d:%02d:%02d", x.h, x.m, (int)x.s);
+ sqlite_set_result_string(context, zBuf, -1);
+ }
+}
+
+/*
+** date( TIMESTRING, MOD, MOD, ...)
+**
+** Return YYYY-MM-DD
+*/
+static void dateFunc(sqlite_func *context, int argc, const char **argv){
+ DateTime x;
+ if( isDate(argc, argv, &x)==0 ){
+ char zBuf[100];
+ computeYMD(&x);
+ sprintf(zBuf, "%04d-%02d-%02d", x.Y, x.M, x.D);
+ sqlite_set_result_string(context, zBuf, -1);
+ }
+}
+
+/*
+** strftime( FORMAT, TIMESTRING, MOD, MOD, ...)
+**
+** Return a string described by FORMAT. Conversions as follows:
+**
+** %d day of month
+** %f ** fractional seconds SS.SSS
+** %H hour 00-24
+** %j day of year 000-366
+** %J ** Julian day number
+** %m month 01-12
+** %M minute 00-59
+** %s seconds since 1970-01-01
+** %S seconds 00-59
+** %w day of week 0-6 sunday==0
+** %W week of year 00-53
+** %Y year 0000-9999
+** %% %
+*/
+static void strftimeFunc(sqlite_func *context, int argc, const char **argv){
+ DateTime x;
+ int n, i, j;
+ char *z;
+ const char *zFmt = argv[0];
+ char zBuf[100];
+ if( argv[0]==0 || isDate(argc-1, argv+1, &x) ) return;
+ for(i=0, n=1; zFmt[i]; i++, n++){
+ if( zFmt[i]=='%' ){
+ switch( zFmt[i+1] ){
+ case 'd':
+ case 'H':
+ case 'm':
+ case 'M':
+ case 'S':
+ case 'W':
+ n++;
+ /* fall thru */
+ case 'w':
+ case '%':
+ break;
+ case 'f':
+ n += 8;
+ break;
+ case 'j':
+ n += 3;
+ break;
+ case 'Y':
+ n += 8;
+ break;
+ case 's':
+ case 'J':
+ n += 50;
+ break;
+ default:
+ return; /* ERROR. return a NULL */
+ }
+ i++;
+ }
+ }
+ if( n<sizeof(zBuf) ){
+ z = zBuf;
+ }else{
+ z = sqliteMalloc( n );
+ if( z==0 ) return;
+ }
+ computeJD(&x);
+ computeYMD_HMS(&x);
+ for(i=j=0; zFmt[i]; i++){
+ if( zFmt[i]!='%' ){
+ z[j++] = zFmt[i];
+ }else{
+ i++;
+ switch( zFmt[i] ){
+ case 'd': sprintf(&z[j],"%02d",x.D); j+=2; break;
+ case 'f': {
+ int s = x.s;
+ int ms = (x.s - s)*1000.0;
+ sprintf(&z[j],"%02d.%03d",s,ms);
+ j += strlen(&z[j]);
+ break;
+ }
+ case 'H': sprintf(&z[j],"%02d",x.h); j+=2; break;
+ case 'W': /* Fall thru */
+ case 'j': {
+ int n; /* Number of days since 1st day of year */
+ DateTime y = x;
+ y.validJD = 0;
+ y.M = 1;
+ y.D = 1;
+ computeJD(&y);
+ n = x.rJD - y.rJD;
+ if( zFmt[i]=='W' ){
+ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */
+ wd = ((int)(x.rJD+0.5)) % 7;
+ sprintf(&z[j],"%02d",(n+7-wd)/7);
+ j += 2;
+ }else{
+ sprintf(&z[j],"%03d",n+1);
+ j += 3;
+ }
+ break;
+ }
+ case 'J': sprintf(&z[j],"%.16g",x.rJD); j+=strlen(&z[j]); break;
+ case 'm': sprintf(&z[j],"%02d",x.M); j+=2; break;
+ case 'M': sprintf(&z[j],"%02d",x.m); j+=2; break;
+ case 's': {
+ sprintf(&z[j],"%d",(int)((x.rJD-2440587.5)*86400.0 + 0.5));
+ j += strlen(&z[j]);
+ break;
+ }
+ case 'S': sprintf(&z[j],"%02d",(int)(x.s+0.5)); j+=2; break;
+ case 'w': z[j++] = (((int)(x.rJD+1.5)) % 7) + '0'; break;
+ case 'Y': sprintf(&z[j],"%04d",x.Y); j+=strlen(&z[j]); break;
+ case '%': z[j++] = '%'; break;
+ }
+ }
+ }
+ z[j] = 0;
+ sqlite_set_result_string(context, z, -1);
+ if( z!=zBuf ){
+ sqliteFree(z);
+ }
+}
+
+
+#endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */
+
+/*
+** This function registered all of the above C functions as SQL
+** functions. This should be the only routine in this file with
+** external linkage.
+*/
+void sqliteRegisterDateTimeFunctions(sqlite *db){
+#ifndef SQLITE_OMIT_DATETIME_FUNCS
+ static struct {
+ char *zName;
+ int nArg;
+ int dataType;
+ void (*xFunc)(sqlite_func*,int,const char**);
+ } aFuncs[] = {
+ { "julianday", -1, SQLITE_NUMERIC, juliandayFunc },
+ { "date", -1, SQLITE_TEXT, dateFunc },
+ { "time", -1, SQLITE_TEXT, timeFunc },
+ { "datetime", -1, SQLITE_TEXT, datetimeFunc },
+ { "strftime", -1, SQLITE_TEXT, strftimeFunc },
+ };
+ int i;
+
+ for(i=0; i<sizeof(aFuncs)/sizeof(aFuncs[0]); i++){
+ sqlite_create_function(db, aFuncs[i].zName,
+ aFuncs[i].nArg, aFuncs[i].xFunc, 0);
+ if( aFuncs[i].xFunc ){
+ sqlite_function_type(db, aFuncs[i].zName, aFuncs[i].dataType);
+ }
+ }
+#endif
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/delete.c b/usr/src/cmd/svc/configd/sqlite/src/delete.c
new file mode 100644
index 0000000000..949e4c2baa
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/delete.c
@@ -0,0 +1,396 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle DELETE FROM statements.
+**
+** $Id: delete.c,v 1.61 2004/02/24 01:05:32 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** Look up every table that is named in pSrc. If any table is not found,
+** add an error message to pParse->zErrMsg and return NULL. If all tables
+** are found, return a pointer to the last table.
+*/
+Table *sqliteSrcListLookup(Parse *pParse, SrcList *pSrc){
+ Table *pTab = 0;
+ int i;
+ for(i=0; i<pSrc->nSrc; i++){
+ const char *zTab = pSrc->a[i].zName;
+ const char *zDb = pSrc->a[i].zDatabase;
+ pTab = sqliteLocateTable(pParse, zTab, zDb);
+ pSrc->a[i].pTab = pTab;
+ }
+ return pTab;
+}
+
+/*
+** Check to make sure the given table is writable. If it is not
+** writable, generate an error message and return 1. If it is
+** writable return 0;
+*/
+int sqliteIsReadOnly(Parse *pParse, Table *pTab, int viewOk){
+ if( pTab->readOnly ){
+ sqliteErrorMsg(pParse, "table %s may not be modified", pTab->zName);
+ return 1;
+ }
+ if( !viewOk && pTab->pSelect ){
+ sqliteErrorMsg(pParse, "cannot modify %s because it is a view",pTab->zName);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** Process a DELETE FROM statement.
+*/
+void sqliteDeleteFrom(
+ Parse *pParse, /* The parser context */
+ SrcList *pTabList, /* The table from which we should delete things */
+ Expr *pWhere /* The WHERE clause. May be null */
+){
+ Vdbe *v; /* The virtual database engine */
+ Table *pTab; /* The table from which records will be deleted */
+ const char *zDb; /* Name of database holding pTab */
+ int end, addr; /* A couple addresses of generated code */
+ int i; /* Loop counter */
+ WhereInfo *pWInfo; /* Information about the WHERE clause */
+ Index *pIdx; /* For looping over indices of the table */
+ int iCur; /* VDBE Cursor number for pTab */
+ sqlite *db; /* Main database structure */
+ int isView; /* True if attempting to delete from a view */
+ AuthContext sContext; /* Authorization context */
+
+ int row_triggers_exist = 0; /* True if any triggers exist */
+ int before_triggers; /* True if there are BEFORE triggers */
+ int after_triggers; /* True if there are AFTER triggers */
+ int oldIdx = -1; /* Cursor for the OLD table of AFTER triggers */
+
+ sContext.pParse = 0;
+ if( pParse->nErr || sqlite_malloc_failed ){
+ pTabList = 0;
+ goto delete_from_cleanup;
+ }
+ db = pParse->db;
+ assert( pTabList->nSrc==1 );
+
+ /* Locate the table which we want to delete. This table has to be
+ ** put in an SrcList structure because some of the subroutines we
+ ** will be calling are designed to work with multiple tables and expect
+ ** an SrcList* parameter instead of just a Table* parameter.
+ */
+ pTab = sqliteSrcListLookup(pParse, pTabList);
+ if( pTab==0 ) goto delete_from_cleanup;
+ before_triggers = sqliteTriggersExist(pParse, pTab->pTrigger,
+ TK_DELETE, TK_BEFORE, TK_ROW, 0);
+ after_triggers = sqliteTriggersExist(pParse, pTab->pTrigger,
+ TK_DELETE, TK_AFTER, TK_ROW, 0);
+ row_triggers_exist = before_triggers || after_triggers;
+ isView = pTab->pSelect!=0;
+ if( sqliteIsReadOnly(pParse, pTab, before_triggers) ){
+ goto delete_from_cleanup;
+ }
+ assert( pTab->iDb<db->nDb );
+ zDb = db->aDb[pTab->iDb].zName;
+ if( sqliteAuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){
+ goto delete_from_cleanup;
+ }
+
+ /* If pTab is really a view, make sure it has been initialized.
+ */
+ if( isView && sqliteViewGetColumnNames(pParse, pTab) ){
+ goto delete_from_cleanup;
+ }
+
+ /* Allocate a cursor used to store the old.* data for a trigger.
+ */
+ if( row_triggers_exist ){
+ oldIdx = pParse->nTab++;
+ }
+
+ /* Resolve the column names in all the expressions.
+ */
+ assert( pTabList->nSrc==1 );
+ iCur = pTabList->a[0].iCursor = pParse->nTab++;
+ if( pWhere ){
+ if( sqliteExprResolveIds(pParse, pTabList, 0, pWhere) ){
+ goto delete_from_cleanup;
+ }
+ if( sqliteExprCheck(pParse, pWhere, 0, 0) ){
+ goto delete_from_cleanup;
+ }
+ }
+
+ /* Start the view context
+ */
+ if( isView ){
+ sqliteAuthContextPush(pParse, &sContext, pTab->zName);
+ }
+
+ /* Begin generating code.
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ){
+ goto delete_from_cleanup;
+ }
+ sqliteBeginWriteOperation(pParse, row_triggers_exist, pTab->iDb);
+
+ /* If we are trying to delete from a view, construct that view into
+ ** a temporary table.
+ */
+ if( isView ){
+ Select *pView = sqliteSelectDup(pTab->pSelect);
+ sqliteSelect(pParse, pView, SRT_TempTable, iCur, 0, 0, 0);
+ sqliteSelectDelete(pView);
+ }
+
+ /* Initialize the counter of the number of rows deleted, if
+ ** we are counting rows.
+ */
+ if( db->flags & SQLITE_CountRows ){
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ }
+
+ /* Special case: A DELETE without a WHERE clause deletes everything.
+ ** It is easier just to erase the whole table. Note, however, that
+ ** this means that the row change count will be incorrect.
+ */
+ if( pWhere==0 && !row_triggers_exist ){
+ if( db->flags & SQLITE_CountRows ){
+ /* If counting rows deleted, just count the total number of
+ ** entries in the table. */
+ int endOfLoop = sqliteVdbeMakeLabel(v);
+ int addr;
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeAddOp(v, OP_OpenRead, iCur, pTab->tnum);
+ }
+ sqliteVdbeAddOp(v, OP_Rewind, iCur, sqliteVdbeCurrentAddr(v)+2);
+ addr = sqliteVdbeAddOp(v, OP_AddImm, 1, 0);
+ sqliteVdbeAddOp(v, OP_Next, iCur, addr);
+ sqliteVdbeResolveLabel(v, endOfLoop);
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ }
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Clear, pTab->tnum, pTab->iDb);
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ sqliteVdbeAddOp(v, OP_Clear, pIdx->tnum, pIdx->iDb);
+ }
+ }
+ }
+
+ /* The usual case: There is a WHERE clause so we have to scan through
+ ** the table and pick which records to delete.
+ */
+ else{
+ /* Begin the database scan
+ */
+ pWInfo = sqliteWhereBegin(pParse, pTabList, pWhere, 1, 0);
+ if( pWInfo==0 ) goto delete_from_cleanup;
+
+ /* Remember the key of every item to be deleted.
+ */
+ sqliteVdbeAddOp(v, OP_ListWrite, 0, 0);
+ if( db->flags & SQLITE_CountRows ){
+ sqliteVdbeAddOp(v, OP_AddImm, 1, 0);
+ }
+
+ /* End the database scan loop.
+ */
+ sqliteWhereEnd(pWInfo);
+
+ /* Open the pseudo-table used to store OLD if there are triggers.
+ */
+ if( row_triggers_exist ){
+ sqliteVdbeAddOp(v, OP_OpenPseudo, oldIdx, 0);
+ }
+
+ /* Delete every item whose key was written to the list during the
+ ** database scan. We have to delete items after the scan is complete
+ ** because deleting an item can change the scan order.
+ */
+ sqliteVdbeAddOp(v, OP_ListRewind, 0, 0);
+ end = sqliteVdbeMakeLabel(v);
+
+ /* This is the beginning of the delete loop when there are
+ ** row triggers.
+ */
+ if( row_triggers_exist ){
+ addr = sqliteVdbeAddOp(v, OP_ListRead, 0, end);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeAddOp(v, OP_OpenRead, iCur, pTab->tnum);
+ }
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, 0);
+
+ sqliteVdbeAddOp(v, OP_Recno, iCur, 0);
+ sqliteVdbeAddOp(v, OP_RowData, iCur, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, oldIdx, 0);
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ }
+
+ sqliteCodeRowTrigger(pParse, TK_DELETE, 0, TK_BEFORE, pTab, -1,
+ oldIdx, (pParse->trigStack)?pParse->trigStack->orconf:OE_Default,
+ addr);
+ }
+
+ if( !isView ){
+ /* Open cursors for the table we are deleting from and all its
+ ** indices. If there are row triggers, this happens inside the
+ ** OP_ListRead loop because the cursor have to all be closed
+ ** before the trigger fires. If there are no row triggers, the
+ ** cursors are opened only once on the outside the loop.
+ */
+ pParse->nTab = iCur + 1;
+ sqliteOpenTableAndIndices(pParse, pTab, iCur);
+
+ /* This is the beginning of the delete loop when there are no
+ ** row triggers */
+ if( !row_triggers_exist ){
+ addr = sqliteVdbeAddOp(v, OP_ListRead, 0, end);
+ }
+
+ /* Delete the row */
+ sqliteGenerateRowDelete(db, v, pTab, iCur, pParse->trigStack==0);
+ }
+
+ /* If there are row triggers, close all cursors then invoke
+ ** the AFTER triggers
+ */
+ if( row_triggers_exist ){
+ if( !isView ){
+ for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){
+ sqliteVdbeAddOp(v, OP_Close, iCur + i, pIdx->tnum);
+ }
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ }
+ sqliteCodeRowTrigger(pParse, TK_DELETE, 0, TK_AFTER, pTab, -1,
+ oldIdx, (pParse->trigStack)?pParse->trigStack->orconf:OE_Default,
+ addr);
+ }
+
+ /* End of the delete loop */
+ sqliteVdbeAddOp(v, OP_Goto, 0, addr);
+ sqliteVdbeResolveLabel(v, end);
+ sqliteVdbeAddOp(v, OP_ListReset, 0, 0);
+
+ /* Close the cursors after the loop if there are no row triggers */
+ if( !row_triggers_exist ){
+ for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){
+ sqliteVdbeAddOp(v, OP_Close, iCur + i, pIdx->tnum);
+ }
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ pParse->nTab = iCur;
+ }
+ }
+ sqliteVdbeAddOp(v, OP_SetCounts, 0, 0);
+ sqliteEndWriteOperation(pParse);
+
+ /*
+ ** Return the number of rows that were deleted.
+ */
+ if( db->flags & SQLITE_CountRows ){
+ sqliteVdbeAddOp(v, OP_ColumnName, 0, 1);
+ sqliteVdbeChangeP3(v, -1, "rows deleted", P3_STATIC);
+ sqliteVdbeAddOp(v, OP_Callback, 1, 0);
+ }
+
+delete_from_cleanup:
+ sqliteAuthContextPop(&sContext);
+ sqliteSrcListDelete(pTabList);
+ sqliteExprDelete(pWhere);
+ return;
+}
+
+/*
+** This routine generates VDBE code that causes a single row of a
+** single table to be deleted.
+**
+** The VDBE must be in a particular state when this routine is called.
+** These are the requirements:
+**
+** 1. A read/write cursor pointing to pTab, the table containing the row
+** to be deleted, must be opened as cursor number "base".
+**
+** 2. Read/write cursors for all indices of pTab must be open as
+** cursor number base+i for the i-th index.
+**
+** 3. The record number of the row to be deleted must be on the top
+** of the stack.
+**
+** This routine pops the top of the stack to remove the record number
+** and then generates code to remove both the table record and all index
+** entries that point to that record.
+*/
+void sqliteGenerateRowDelete(
+ sqlite *db, /* The database containing the index */
+ Vdbe *v, /* Generate code into this VDBE */
+ Table *pTab, /* Table containing the row to be deleted */
+ int iCur, /* Cursor number for the table */
+ int count /* Increment the row change counter */
+){
+ int addr;
+ addr = sqliteVdbeAddOp(v, OP_NotExists, iCur, 0);
+ sqliteGenerateRowIndexDelete(db, v, pTab, iCur, 0);
+ sqliteVdbeAddOp(v, OP_Delete, iCur,
+ (count?OPFLAG_NCHANGE:0) | OPFLAG_CSCHANGE);
+ sqliteVdbeChangeP2(v, addr, sqliteVdbeCurrentAddr(v));
+}
+
+/*
+** This routine generates VDBE code that causes the deletion of all
+** index entries associated with a single row of a single table.
+**
+** The VDBE must be in a particular state when this routine is called.
+** These are the requirements:
+**
+** 1. A read/write cursor pointing to pTab, the table containing the row
+** to be deleted, must be opened as cursor number "iCur".
+**
+** 2. Read/write cursors for all indices of pTab must be open as
+** cursor number iCur+i for the i-th index.
+**
+** 3. The "iCur" cursor must be pointing to the row that is to be
+** deleted.
+*/
+void sqliteGenerateRowIndexDelete(
+ sqlite *db, /* The database containing the index */
+ Vdbe *v, /* Generate code into this VDBE */
+ Table *pTab, /* Table containing the row to be deleted */
+ int iCur, /* Cursor number for the table */
+ char *aIdxUsed /* Only delete if aIdxUsed!=0 && aIdxUsed[i]!=0 */
+){
+ int i;
+ Index *pIdx;
+
+ for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){
+ int j;
+ if( aIdxUsed!=0 && aIdxUsed[i-1]==0 ) continue;
+ sqliteVdbeAddOp(v, OP_Recno, iCur, 0);
+ for(j=0; j<pIdx->nColumn; j++){
+ int idx = pIdx->aiColumn[j];
+ if( idx==pTab->iPKey ){
+ sqliteVdbeAddOp(v, OP_Dup, j, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_Column, iCur, idx);
+ }
+ }
+ sqliteVdbeAddOp(v, OP_MakeIdxKey, pIdx->nColumn, 0);
+ if( db->file_format>=4 ) sqliteAddIdxKeyType(v, pIdx);
+ sqliteVdbeAddOp(v, OP_IdxDelete, iCur+i, 0);
+ }
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/encode.c b/usr/src/cmd/svc/configd/sqlite/src/encode.c
new file mode 100644
index 0000000000..9d48ec9e42
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/encode.c
@@ -0,0 +1,257 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2002 April 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains helper routines used to translate binary data into
+** a null-terminated string (suitable for use in SQLite) and back again.
+** These are convenience routines for use by people who want to store binary
+** data in an SQLite database. The code in this file is not used by any other
+** part of the SQLite library.
+**
+** $Id: encode.c,v 1.12 2004/03/17 18:44:46 drh Exp $
+*/
+#include <string.h>
+#include <assert.h>
+
+/*
+** How This Encoder Works
+**
+** The output is allowed to contain any character except 0x27 (') and
+** 0x00. This is accomplished by using an escape character to encode
+** 0x27 and 0x00 as a two-byte sequence. The escape character is always
+** 0x01. An 0x00 is encoded as the two byte sequence 0x01 0x01. The
+** 0x27 character is encoded as the two byte sequence 0x01 0x28. Finally,
+** the escape character itself is encoded as the two-character sequence
+** 0x01 0x02.
+**
+** To summarize, the encoder works by using an escape sequences as follows:
+**
+** 0x00 -> 0x01 0x01
+** 0x01 -> 0x01 0x02
+** 0x27 -> 0x01 0x28
+**
+** If that were all the encoder did, it would work, but in certain cases
+** it could double the size of the encoded string. For example, to
+** encode a string of 100 0x27 characters would require 100 instances of
+** the 0x01 0x03 escape sequence resulting in a 200-character output.
+** We would prefer to keep the size of the encoded string smaller than
+** this.
+**
+** To minimize the encoding size, we first add a fixed offset value to each
+** byte in the sequence. The addition is modulo 256. (That is to say, if
+** the sum of the original character value and the offset exceeds 256, then
+** the higher order bits are truncated.) The offset is chosen to minimize
+** the number of characters in the string that need to be escaped. For
+** example, in the case above where the string was composed of 100 0x27
+** characters, the offset might be 0x01. Each of the 0x27 characters would
+** then be converted into an 0x28 character which would not need to be
+** escaped at all and so the 100 character input string would be converted
+** into just 100 characters of output. Actually 101 characters of output -
+** we have to record the offset used as the first byte in the sequence so
+** that the string can be decoded. Since the offset value is stored as
+** part of the output string and the output string is not allowed to contain
+** characters 0x00 or 0x27, the offset cannot be 0x00 or 0x27.
+**
+** Here, then, are the encoding steps:
+**
+** (1) Choose an offset value and make it the first character of
+** output.
+**
+** (2) Copy each input character into the output buffer, one by
+** one, adding the offset value as you copy.
+**
+** (3) If the value of an input character plus offset is 0x00, replace
+** that one character by the two-character sequence 0x01 0x01.
+** If the sum is 0x01, replace it with 0x01 0x02. If the sum
+** is 0x27, replace it with 0x01 0x03.
+**
+** (4) Put a 0x00 terminator at the end of the output.
+**
+** Decoding is obvious:
+**
+** (5) Copy encoded characters except the first into the decode
+** buffer. Set the first encoded character aside for use as
+** the offset in step 7 below.
+**
+** (6) Convert each 0x01 0x01 sequence into a single character 0x00.
+** Convert 0x01 0x02 into 0x01. Convert 0x01 0x28 into 0x27.
+**
+** (7) Subtract the offset value that was the first character of
+** the encoded buffer from all characters in the output buffer.
+**
+** The only tricky part is step (1) - how to compute an offset value to
+** minimize the size of the output buffer. This is accomplished by testing
+** all offset values and picking the one that results in the fewest number
+** of escapes. To do that, we first scan the entire input and count the
+** number of occurances of each character value in the input. Suppose
+** the number of 0x00 characters is N(0), the number of occurances of 0x01
+** is N(1), and so forth up to the number of occurances of 0xff is N(255).
+** An offset of 0 is not allowed so we don't have to test it. The number
+** of escapes required for an offset of 1 is N(1)+N(2)+N(40). The number
+** of escapes required for an offset of 2 is N(2)+N(3)+N(41). And so forth.
+** In this way we find the offset that gives the minimum number of escapes,
+** and thus minimizes the length of the output string.
+*/
+
+/*
+** Encode a binary buffer "in" of size n bytes so that it contains
+** no instances of characters '\'' or '\000'. The output is
+** null-terminated and can be used as a string value in an INSERT
+** or UPDATE statement. Use sqlite_decode_binary() to convert the
+** string back into its original binary.
+**
+** The result is written into a preallocated output buffer "out".
+** "out" must be able to hold at least 2 +(257*n)/254 bytes.
+** In other words, the output will be expanded by as much as 3
+** bytes for every 254 bytes of input plus 2 bytes of fixed overhead.
+** (This is approximately 2 + 1.0118*n or about a 1.2% size increase.)
+**
+** The return value is the number of characters in the encoded
+** string, excluding the "\000" terminator.
+**
+** If out==NULL then no output is generated but the routine still returns
+** the number of characters that would have been generated if out had
+** not been NULL.
+*/
+int sqlite_encode_binary(const unsigned char *in, int n, unsigned char *out){
+ int i, j, e, m;
+ unsigned char x;
+ int cnt[256];
+ if( n<=0 ){
+ if( out ){
+ out[0] = 'x';
+ out[1] = 0;
+ }
+ return 1;
+ }
+ memset(cnt, 0, sizeof(cnt));
+ for(i=n-1; i>=0; i--){ cnt[in[i]]++; }
+ m = n;
+ for(i=1; i<256; i++){
+ int sum;
+ if( i=='\'' ) continue;
+ sum = cnt[i] + cnt[(i+1)&0xff] + cnt[(i+'\'')&0xff];
+ if( sum<m ){
+ m = sum;
+ e = i;
+ if( m==0 ) break;
+ }
+ }
+ if( out==0 ){
+ return n+m+1;
+ }
+ out[0] = e;
+ j = 1;
+ for(i=0; i<n; i++){
+ x = in[i] - e;
+ if( x==0 || x==1 || x=='\''){
+ out[j++] = 1;
+ x++;
+ }
+ out[j++] = x;
+ }
+ out[j] = 0;
+ assert( j==n+m+1 );
+ return j;
+}
+
+/*
+** Decode the string "in" into binary data and write it into "out".
+** This routine reverses the encoding created by sqlite_encode_binary().
+** The output will always be a few bytes less than the input. The number
+** of bytes of output is returned. If the input is not a well-formed
+** encoding, -1 is returned.
+**
+** The "in" and "out" parameters may point to the same buffer in order
+** to decode a string in place.
+*/
+int sqlite_decode_binary(const unsigned char *in, unsigned char *out){
+ int i, e;
+ unsigned char c;
+ e = *(in++);
+ i = 0;
+ while( (c = *(in++))!=0 ){
+ if( c==1 ){
+ c = *(in++) - 1;
+ }
+ out[i++] = c + e;
+ }
+ return i;
+}
+
+#ifdef ENCODER_TEST
+#include <stdio.h>
+/*
+** The subroutines above are not tested by the usual test suite. To test
+** these routines, compile just this one file with a -DENCODER_TEST=1 option
+** and run the result.
+*/
+int main(int argc, char **argv){
+ int i, j, n, m, nOut, nByteIn, nByteOut;
+ unsigned char in[30000];
+ unsigned char out[33000];
+
+ nByteIn = nByteOut = 0;
+ for(i=0; i<sizeof(in); i++){
+ printf("Test %d: ", i+1);
+ n = rand() % (i+1);
+ if( i%100==0 ){
+ int k;
+ for(j=k=0; j<n; j++){
+ /* if( k==0 || k=='\'' ) k++; */
+ in[j] = k;
+ k = (k+1)&0xff;
+ }
+ }else{
+ for(j=0; j<n; j++) in[j] = rand() & 0xff;
+ }
+ nByteIn += n;
+ nOut = sqlite_encode_binary(in, n, out);
+ nByteOut += nOut;
+ if( nOut!=strlen(out) ){
+ printf(" ERROR return value is %d instead of %d\n", nOut, strlen(out));
+ exit(1);
+ }
+ if( nOut!=sqlite_encode_binary(in, n, 0) ){
+ printf(" ERROR actual output size disagrees with predicted size\n");
+ exit(1);
+ }
+ m = (256*n + 1262)/253;
+ printf("size %d->%d (max %d)", n, strlen(out)+1, m);
+ if( strlen(out)+1>m ){
+ printf(" ERROR output too big\n");
+ exit(1);
+ }
+ for(j=0; out[j]; j++){
+ if( out[j]=='\'' ){
+ printf(" ERROR contains (')\n");
+ exit(1);
+ }
+ }
+ j = sqlite_decode_binary(out, out);
+ if( j!=n ){
+ printf(" ERROR decode size %d\n", j);
+ exit(1);
+ }
+ if( memcmp(in, out, n)!=0 ){
+ printf(" ERROR decode mismatch\n");
+ exit(1);
+ }
+ printf(" OK\n");
+ }
+ fprintf(stderr,"Finished. Total encoding: %d->%d bytes\n",
+ nByteIn, nByteOut);
+ fprintf(stderr,"Avg size increase: %.3f%%\n",
+ (nByteOut-nByteIn)*100.0/(double)nByteIn);
+}
+#endif /* ENCODER_TEST */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/expr.c b/usr/src/cmd/svc/configd/sqlite/src/expr.c
new file mode 100644
index 0000000000..1155d045f6
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/expr.c
@@ -0,0 +1,1665 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains routines used for analyzing expressions and
+** for generating VDBE code that evaluates expressions in SQLite.
+**
+** $Id: expr.c,v 1.114.2.3 2004/07/22 17:10:10 drh Exp $
+*/
+#include "sqliteInt.h"
+#include <ctype.h>
+
+/*
+** Construct a new expression node and return a pointer to it. Memory
+** for this node is obtained from sqliteMalloc(). The calling function
+** is responsible for making sure the node eventually gets freed.
+*/
+Expr *sqliteExpr(int op, Expr *pLeft, Expr *pRight, Token *pToken){
+ Expr *pNew;
+ pNew = sqliteMalloc( sizeof(Expr) );
+ if( pNew==0 ){
+ /* When malloc fails, we leak memory from pLeft and pRight */
+ return 0;
+ }
+ pNew->op = op;
+ pNew->pLeft = pLeft;
+ pNew->pRight = pRight;
+ if( pToken ){
+ assert( pToken->dyn==0 );
+ pNew->token = *pToken;
+ pNew->span = *pToken;
+ }else{
+ assert( pNew->token.dyn==0 );
+ assert( pNew->token.z==0 );
+ assert( pNew->token.n==0 );
+ if( pLeft && pRight ){
+ sqliteExprSpan(pNew, &pLeft->span, &pRight->span);
+ }else{
+ pNew->span = pNew->token;
+ }
+ }
+ return pNew;
+}
+
+/*
+** Set the Expr.span field of the given expression to span all
+** text between the two given tokens.
+*/
+void sqliteExprSpan(Expr *pExpr, Token *pLeft, Token *pRight){
+ assert( pRight!=0 );
+ assert( pLeft!=0 );
+ /* Note: pExpr might be NULL due to a prior malloc failure */
+ if( pExpr && pRight->z && pLeft->z ){
+ if( pLeft->dyn==0 && pRight->dyn==0 ){
+ pExpr->span.z = pLeft->z;
+ pExpr->span.n = pRight->n + Addr(pRight->z) - Addr(pLeft->z);
+ }else{
+ pExpr->span.z = 0;
+ }
+ }
+}
+
+/*
+** Construct a new expression node for a function with multiple
+** arguments.
+*/
+Expr *sqliteExprFunction(ExprList *pList, Token *pToken){
+ Expr *pNew;
+ pNew = sqliteMalloc( sizeof(Expr) );
+ if( pNew==0 ){
+ /* sqliteExprListDelete(pList); // Leak pList when malloc fails */
+ return 0;
+ }
+ pNew->op = TK_FUNCTION;
+ pNew->pList = pList;
+ if( pToken ){
+ assert( pToken->dyn==0 );
+ pNew->token = *pToken;
+ }else{
+ pNew->token.z = 0;
+ }
+ pNew->span = pNew->token;
+ return pNew;
+}
+
+/*
+** Recursively delete an expression tree.
+*/
+void sqliteExprDelete(Expr *p){
+ if( p==0 ) return;
+ if( p->span.dyn ) sqliteFree((char*)p->span.z);
+ if( p->token.dyn ) sqliteFree((char*)p->token.z);
+ sqliteExprDelete(p->pLeft);
+ sqliteExprDelete(p->pRight);
+ sqliteExprListDelete(p->pList);
+ sqliteSelectDelete(p->pSelect);
+ sqliteFree(p);
+}
+
+
+/*
+** The following group of routines make deep copies of expressions,
+** expression lists, ID lists, and select statements. The copies can
+** be deleted (by being passed to their respective ...Delete() routines)
+** without effecting the originals.
+**
+** The expression list, ID, and source lists return by sqliteExprListDup(),
+** sqliteIdListDup(), and sqliteSrcListDup() can not be further expanded
+** by subsequent calls to sqlite*ListAppend() routines.
+**
+** Any tables that the SrcList might point to are not duplicated.
+*/
+Expr *sqliteExprDup(Expr *p){
+ Expr *pNew;
+ if( p==0 ) return 0;
+ pNew = sqliteMallocRaw( sizeof(*p) );
+ if( pNew==0 ) return 0;
+ memcpy(pNew, p, sizeof(*pNew));
+ if( p->token.z!=0 ){
+ pNew->token.z = sqliteStrDup(p->token.z);
+ pNew->token.dyn = 1;
+ }else{
+ assert( pNew->token.z==0 );
+ }
+ pNew->span.z = 0;
+ pNew->pLeft = sqliteExprDup(p->pLeft);
+ pNew->pRight = sqliteExprDup(p->pRight);
+ pNew->pList = sqliteExprListDup(p->pList);
+ pNew->pSelect = sqliteSelectDup(p->pSelect);
+ return pNew;
+}
+void sqliteTokenCopy(Token *pTo, Token *pFrom){
+ if( pTo->dyn ) sqliteFree((char*)pTo->z);
+ if( pFrom->z ){
+ pTo->n = pFrom->n;
+ pTo->z = sqliteStrNDup(pFrom->z, pFrom->n);
+ pTo->dyn = 1;
+ }else{
+ pTo->z = 0;
+ }
+}
+ExprList *sqliteExprListDup(ExprList *p){
+ ExprList *pNew;
+ struct ExprList_item *pItem;
+ int i;
+ if( p==0 ) return 0;
+ pNew = sqliteMalloc( sizeof(*pNew) );
+ if( pNew==0 ) return 0;
+ pNew->nExpr = pNew->nAlloc = p->nExpr;
+ pNew->a = pItem = sqliteMalloc( p->nExpr*sizeof(p->a[0]) );
+ if( pItem==0 ){
+ sqliteFree(pNew);
+ return 0;
+ }
+ for(i=0; i<p->nExpr; i++, pItem++){
+ Expr *pNewExpr, *pOldExpr;
+ pItem->pExpr = pNewExpr = sqliteExprDup(pOldExpr = p->a[i].pExpr);
+ if( pOldExpr->span.z!=0 && pNewExpr ){
+ /* Always make a copy of the span for top-level expressions in the
+ ** expression list. The logic in SELECT processing that determines
+ ** the names of columns in the result set needs this information */
+ sqliteTokenCopy(&pNewExpr->span, &pOldExpr->span);
+ }
+ assert( pNewExpr==0 || pNewExpr->span.z!=0
+ || pOldExpr->span.z==0 || sqlite_malloc_failed );
+ pItem->zName = sqliteStrDup(p->a[i].zName);
+ pItem->sortOrder = p->a[i].sortOrder;
+ pItem->isAgg = p->a[i].isAgg;
+ pItem->done = 0;
+ }
+ return pNew;
+}
+SrcList *sqliteSrcListDup(SrcList *p){
+ SrcList *pNew;
+ int i;
+ int nByte;
+ if( p==0 ) return 0;
+ nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0);
+ pNew = sqliteMallocRaw( nByte );
+ if( pNew==0 ) return 0;
+ pNew->nSrc = pNew->nAlloc = p->nSrc;
+ for(i=0; i<p->nSrc; i++){
+ struct SrcList_item *pNewItem = &pNew->a[i];
+ struct SrcList_item *pOldItem = &p->a[i];
+ pNewItem->zDatabase = sqliteStrDup(pOldItem->zDatabase);
+ pNewItem->zName = sqliteStrDup(pOldItem->zName);
+ pNewItem->zAlias = sqliteStrDup(pOldItem->zAlias);
+ pNewItem->jointype = pOldItem->jointype;
+ pNewItem->iCursor = pOldItem->iCursor;
+ pNewItem->pTab = 0;
+ pNewItem->pSelect = sqliteSelectDup(pOldItem->pSelect);
+ pNewItem->pOn = sqliteExprDup(pOldItem->pOn);
+ pNewItem->pUsing = sqliteIdListDup(pOldItem->pUsing);
+ }
+ return pNew;
+}
+IdList *sqliteIdListDup(IdList *p){
+ IdList *pNew;
+ int i;
+ if( p==0 ) return 0;
+ pNew = sqliteMallocRaw( sizeof(*pNew) );
+ if( pNew==0 ) return 0;
+ pNew->nId = pNew->nAlloc = p->nId;
+ pNew->a = sqliteMallocRaw( p->nId*sizeof(p->a[0]) );
+ if( pNew->a==0 ) return 0;
+ for(i=0; i<p->nId; i++){
+ struct IdList_item *pNewItem = &pNew->a[i];
+ struct IdList_item *pOldItem = &p->a[i];
+ pNewItem->zName = sqliteStrDup(pOldItem->zName);
+ pNewItem->idx = pOldItem->idx;
+ }
+ return pNew;
+}
+Select *sqliteSelectDup(Select *p){
+ Select *pNew;
+ if( p==0 ) return 0;
+ pNew = sqliteMallocRaw( sizeof(*p) );
+ if( pNew==0 ) return 0;
+ pNew->isDistinct = p->isDistinct;
+ pNew->pEList = sqliteExprListDup(p->pEList);
+ pNew->pSrc = sqliteSrcListDup(p->pSrc);
+ pNew->pWhere = sqliteExprDup(p->pWhere);
+ pNew->pGroupBy = sqliteExprListDup(p->pGroupBy);
+ pNew->pHaving = sqliteExprDup(p->pHaving);
+ pNew->pOrderBy = sqliteExprListDup(p->pOrderBy);
+ pNew->op = p->op;
+ pNew->pPrior = sqliteSelectDup(p->pPrior);
+ pNew->nLimit = p->nLimit;
+ pNew->nOffset = p->nOffset;
+ pNew->zSelect = 0;
+ pNew->iLimit = -1;
+ pNew->iOffset = -1;
+ return pNew;
+}
+
+
+/*
+** Add a new element to the end of an expression list. If pList is
+** initially NULL, then create a new expression list.
+*/
+ExprList *sqliteExprListAppend(ExprList *pList, Expr *pExpr, Token *pName){
+ if( pList==0 ){
+ pList = sqliteMalloc( sizeof(ExprList) );
+ if( pList==0 ){
+ /* sqliteExprDelete(pExpr); // Leak memory if malloc fails */
+ return 0;
+ }
+ assert( pList->nAlloc==0 );
+ }
+ if( pList->nAlloc<=pList->nExpr ){
+ pList->nAlloc = pList->nAlloc*2 + 4;
+ pList->a = sqliteRealloc(pList->a, pList->nAlloc*sizeof(pList->a[0]));
+ if( pList->a==0 ){
+ /* sqliteExprDelete(pExpr); // Leak memory if malloc fails */
+ pList->nExpr = pList->nAlloc = 0;
+ return pList;
+ }
+ }
+ assert( pList->a!=0 );
+ if( pExpr || pName ){
+ struct ExprList_item *pItem = &pList->a[pList->nExpr++];
+ memset(pItem, 0, sizeof(*pItem));
+ pItem->pExpr = pExpr;
+ if( pName ){
+ sqliteSetNString(&pItem->zName, pName->z, pName->n, 0);
+ sqliteDequote(pItem->zName);
+ }
+ }
+ return pList;
+}
+
+/*
+** Delete an entire expression list.
+*/
+void sqliteExprListDelete(ExprList *pList){
+ int i;
+ if( pList==0 ) return;
+ assert( pList->a!=0 || (pList->nExpr==0 && pList->nAlloc==0) );
+ assert( pList->nExpr<=pList->nAlloc );
+ for(i=0; i<pList->nExpr; i++){
+ sqliteExprDelete(pList->a[i].pExpr);
+ sqliteFree(pList->a[i].zName);
+ }
+ sqliteFree(pList->a);
+ sqliteFree(pList);
+}
+
+/*
+** Walk an expression tree. Return 1 if the expression is constant
+** and 0 if it involves variables.
+**
+** For the purposes of this function, a double-quoted string (ex: "abc")
+** is considered a variable but a single-quoted string (ex: 'abc') is
+** a constant.
+*/
+int sqliteExprIsConstant(Expr *p){
+ switch( p->op ){
+ case TK_ID:
+ case TK_COLUMN:
+ case TK_DOT:
+ case TK_FUNCTION:
+ return 0;
+ case TK_NULL:
+ case TK_STRING:
+ case TK_INTEGER:
+ case TK_FLOAT:
+ case TK_VARIABLE:
+ return 1;
+ default: {
+ if( p->pLeft && !sqliteExprIsConstant(p->pLeft) ) return 0;
+ if( p->pRight && !sqliteExprIsConstant(p->pRight) ) return 0;
+ if( p->pList ){
+ int i;
+ for(i=0; i<p->pList->nExpr; i++){
+ if( !sqliteExprIsConstant(p->pList->a[i].pExpr) ) return 0;
+ }
+ }
+ return p->pLeft!=0 || p->pRight!=0 || (p->pList && p->pList->nExpr>0);
+ }
+ }
+ return 0;
+}
+
+/*
+** If the given expression codes a constant integer that is small enough
+** to fit in a 32-bit integer, return 1 and put the value of the integer
+** in *pValue. If the expression is not an integer or if it is too big
+** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged.
+*/
+int sqliteExprIsInteger(Expr *p, int *pValue){
+ switch( p->op ){
+ case TK_INTEGER: {
+ if( sqliteFitsIn32Bits(p->token.z) ){
+ *pValue = atoi(p->token.z);
+ return 1;
+ }
+ break;
+ }
+ case TK_STRING: {
+ const char *z = p->token.z;
+ int n = p->token.n;
+ if( n>0 && z[0]=='-' ){ z++; n--; }
+ while( n>0 && *z && isdigit(*z) ){ z++; n--; }
+ if( n==0 && sqliteFitsIn32Bits(p->token.z) ){
+ *pValue = atoi(p->token.z);
+ return 1;
+ }
+ break;
+ }
+ case TK_UPLUS: {
+ return sqliteExprIsInteger(p->pLeft, pValue);
+ }
+ case TK_UMINUS: {
+ int v;
+ if( sqliteExprIsInteger(p->pLeft, &v) ){
+ *pValue = -v;
+ return 1;
+ }
+ break;
+ }
+ default: break;
+ }
+ return 0;
+}
+
+/*
+** Return TRUE if the given string is a row-id column name.
+*/
+int sqliteIsRowid(const char *z){
+ if( sqliteStrICmp(z, "_ROWID_")==0 ) return 1;
+ if( sqliteStrICmp(z, "ROWID")==0 ) return 1;
+ if( sqliteStrICmp(z, "OID")==0 ) return 1;
+ return 0;
+}
+
+/*
+** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up
+** that name in the set of source tables in pSrcList and make the pExpr
+** expression node refer back to that source column. The following changes
+** are made to pExpr:
+**
+** pExpr->iDb Set the index in db->aDb[] of the database holding
+** the table.
+** pExpr->iTable Set to the cursor number for the table obtained
+** from pSrcList.
+** pExpr->iColumn Set to the column number within the table.
+** pExpr->dataType Set to the appropriate data type for the column.
+** pExpr->op Set to TK_COLUMN.
+** pExpr->pLeft Any expression this points to is deleted
+** pExpr->pRight Any expression this points to is deleted.
+**
+** The pDbToken is the name of the database (the "X"). This value may be
+** NULL meaning that name is of the form Y.Z or Z. Any available database
+** can be used. The pTableToken is the name of the table (the "Y"). This
+** value can be NULL if pDbToken is also NULL. If pTableToken is NULL it
+** means that the form of the name is Z and that columns from any table
+** can be used.
+**
+** If the name cannot be resolved unambiguously, leave an error message
+** in pParse and return non-zero. Return zero on success.
+*/
+static int lookupName(
+ Parse *pParse, /* The parsing context */
+ Token *pDbToken, /* Name of the database containing table, or NULL */
+ Token *pTableToken, /* Name of table containing column, or NULL */
+ Token *pColumnToken, /* Name of the column. */
+ SrcList *pSrcList, /* List of tables used to resolve column names */
+ ExprList *pEList, /* List of expressions used to resolve "AS" */
+ Expr *pExpr /* Make this EXPR node point to the selected column */
+){
+ char *zDb = 0; /* Name of the database. The "X" in X.Y.Z */
+ char *zTab = 0; /* Name of the table. The "Y" in X.Y.Z or Y.Z */
+ char *zCol = 0; /* Name of the column. The "Z" */
+ int i, j; /* Loop counters */
+ int cnt = 0; /* Number of matching column names */
+ int cntTab = 0; /* Number of matching table names */
+ sqlite *db = pParse->db; /* The database */
+
+ assert( pColumnToken && pColumnToken->z ); /* The Z in X.Y.Z cannot be NULL */
+ if( pDbToken && pDbToken->z ){
+ zDb = sqliteStrNDup(pDbToken->z, pDbToken->n);
+ sqliteDequote(zDb);
+ }else{
+ zDb = 0;
+ }
+ if( pTableToken && pTableToken->z ){
+ zTab = sqliteStrNDup(pTableToken->z, pTableToken->n);
+ sqliteDequote(zTab);
+ }else{
+ assert( zDb==0 );
+ zTab = 0;
+ }
+ zCol = sqliteStrNDup(pColumnToken->z, pColumnToken->n);
+ sqliteDequote(zCol);
+ if( sqlite_malloc_failed ){
+ return 1; /* Leak memory (zDb and zTab) if malloc fails */
+ }
+ assert( zTab==0 || pEList==0 );
+
+ pExpr->iTable = -1;
+ for(i=0; i<pSrcList->nSrc; i++){
+ struct SrcList_item *pItem = &pSrcList->a[i];
+ Table *pTab = pItem->pTab;
+ Column *pCol;
+
+ if( pTab==0 ) continue;
+ assert( pTab->nCol>0 );
+ if( zTab ){
+ if( pItem->zAlias ){
+ char *zTabName = pItem->zAlias;
+ if( sqliteStrICmp(zTabName, zTab)!=0 ) continue;
+ }else{
+ char *zTabName = pTab->zName;
+ if( zTabName==0 || sqliteStrICmp(zTabName, zTab)!=0 ) continue;
+ if( zDb!=0 && sqliteStrICmp(db->aDb[pTab->iDb].zName, zDb)!=0 ){
+ continue;
+ }
+ }
+ }
+ if( 0==(cntTab++) ){
+ pExpr->iTable = pItem->iCursor;
+ pExpr->iDb = pTab->iDb;
+ }
+ for(j=0, pCol=pTab->aCol; j<pTab->nCol; j++, pCol++){
+ if( sqliteStrICmp(pCol->zName, zCol)==0 ){
+ cnt++;
+ pExpr->iTable = pItem->iCursor;
+ pExpr->iDb = pTab->iDb;
+ /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */
+ pExpr->iColumn = j==pTab->iPKey ? -1 : j;
+ pExpr->dataType = pCol->sortOrder & SQLITE_SO_TYPEMASK;
+ break;
+ }
+ }
+ }
+
+ /* If we have not already resolved the name, then maybe
+ ** it is a new.* or old.* trigger argument reference
+ */
+ if( zDb==0 && zTab!=0 && cnt==0 && pParse->trigStack!=0 ){
+ TriggerStack *pTriggerStack = pParse->trigStack;
+ Table *pTab = 0;
+ if( pTriggerStack->newIdx != -1 && sqliteStrICmp("new", zTab) == 0 ){
+ pExpr->iTable = pTriggerStack->newIdx;
+ assert( pTriggerStack->pTab );
+ pTab = pTriggerStack->pTab;
+ }else if( pTriggerStack->oldIdx != -1 && sqliteStrICmp("old", zTab) == 0 ){
+ pExpr->iTable = pTriggerStack->oldIdx;
+ assert( pTriggerStack->pTab );
+ pTab = pTriggerStack->pTab;
+ }
+
+ if( pTab ){
+ int j;
+ Column *pCol = pTab->aCol;
+
+ pExpr->iDb = pTab->iDb;
+ cntTab++;
+ for(j=0; j < pTab->nCol; j++, pCol++) {
+ if( sqliteStrICmp(pCol->zName, zCol)==0 ){
+ cnt++;
+ pExpr->iColumn = j==pTab->iPKey ? -1 : j;
+ pExpr->dataType = pCol->sortOrder & SQLITE_SO_TYPEMASK;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ ** Perhaps the name is a reference to the ROWID
+ */
+ if( cnt==0 && cntTab==1 && sqliteIsRowid(zCol) ){
+ cnt = 1;
+ pExpr->iColumn = -1;
+ pExpr->dataType = SQLITE_SO_NUM;
+ }
+
+ /*
+ ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z
+ ** might refer to an result-set alias. This happens, for example, when
+ ** we are resolving names in the WHERE clause of the following command:
+ **
+ ** SELECT a+b AS x FROM table WHERE x<10;
+ **
+ ** In cases like this, replace pExpr with a copy of the expression that
+ ** forms the result set entry ("a+b" in the example) and return immediately.
+ ** Note that the expression in the result set should have already been
+ ** resolved by the time the WHERE clause is resolved.
+ */
+ if( cnt==0 && pEList!=0 ){
+ for(j=0; j<pEList->nExpr; j++){
+ char *zAs = pEList->a[j].zName;
+ if( zAs!=0 && sqliteStrICmp(zAs, zCol)==0 ){
+ assert( pExpr->pLeft==0 && pExpr->pRight==0 );
+ pExpr->op = TK_AS;
+ pExpr->iColumn = j;
+ pExpr->pLeft = sqliteExprDup(pEList->a[j].pExpr);
+ sqliteFree(zCol);
+ assert( zTab==0 && zDb==0 );
+ return 0;
+ }
+ }
+ }
+
+ /*
+ ** If X and Y are NULL (in other words if only the column name Z is
+ ** supplied) and the value of Z is enclosed in double-quotes, then
+ ** Z is a string literal if it doesn't match any column names. In that
+ ** case, we need to return right away and not make any changes to
+ ** pExpr.
+ */
+ if( cnt==0 && zTab==0 && pColumnToken->z[0]=='"' ){
+ sqliteFree(zCol);
+ return 0;
+ }
+
+ /*
+ ** cnt==0 means there was not match. cnt>1 means there were two or
+ ** more matches. Either way, we have an error.
+ */
+ if( cnt!=1 ){
+ char *z = 0;
+ char *zErr;
+ zErr = cnt==0 ? "no such column: %s" : "ambiguous column name: %s";
+ if( zDb ){
+ sqliteSetString(&z, zDb, ".", zTab, ".", zCol, 0);
+ }else if( zTab ){
+ sqliteSetString(&z, zTab, ".", zCol, 0);
+ }else{
+ z = sqliteStrDup(zCol);
+ }
+ sqliteErrorMsg(pParse, zErr, z);
+ sqliteFree(z);
+ }
+
+ /* Clean up and return
+ */
+ sqliteFree(zDb);
+ sqliteFree(zTab);
+ sqliteFree(zCol);
+ sqliteExprDelete(pExpr->pLeft);
+ pExpr->pLeft = 0;
+ sqliteExprDelete(pExpr->pRight);
+ pExpr->pRight = 0;
+ pExpr->op = TK_COLUMN;
+ sqliteAuthRead(pParse, pExpr, pSrcList);
+ return cnt!=1;
+}
+
+/*
+** This routine walks an expression tree and resolves references to
+** table columns. Nodes of the form ID.ID or ID resolve into an
+** index to the table in the table list and a column offset. The
+** Expr.opcode for such nodes is changed to TK_COLUMN. The Expr.iTable
+** value is changed to the index of the referenced table in pTabList
+** plus the "base" value. The base value will ultimately become the
+** VDBE cursor number for a cursor that is pointing into the referenced
+** table. The Expr.iColumn value is changed to the index of the column
+** of the referenced table. The Expr.iColumn value for the special
+** ROWID column is -1. Any INTEGER PRIMARY KEY column is tried as an
+** alias for ROWID.
+**
+** We also check for instances of the IN operator. IN comes in two
+** forms:
+**
+** expr IN (exprlist)
+** and
+** expr IN (SELECT ...)
+**
+** The first form is handled by creating a set holding the list
+** of allowed values. The second form causes the SELECT to generate
+** a temporary table.
+**
+** This routine also looks for scalar SELECTs that are part of an expression.
+** If it finds any, it generates code to write the value of that select
+** into a memory cell.
+**
+** Unknown columns or tables provoke an error. The function returns
+** the number of errors seen and leaves an error message on pParse->zErrMsg.
+*/
+int sqliteExprResolveIds(
+ Parse *pParse, /* The parser context */
+ SrcList *pSrcList, /* List of tables used to resolve column names */
+ ExprList *pEList, /* List of expressions used to resolve "AS" */
+ Expr *pExpr /* The expression to be analyzed. */
+){
+ int i;
+
+ if( pExpr==0 || pSrcList==0 ) return 0;
+ for(i=0; i<pSrcList->nSrc; i++){
+ assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursor<pParse->nTab );
+ }
+ switch( pExpr->op ){
+ /* Double-quoted strings (ex: "abc") are used as identifiers if
+ ** possible. Otherwise they remain as strings. Single-quoted
+ ** strings (ex: 'abc') are always string literals.
+ */
+ case TK_STRING: {
+ if( pExpr->token.z[0]=='\'' ) break;
+ /* Fall thru into the TK_ID case if this is a double-quoted string */
+ }
+ /* A lone identifier is the name of a columnd.
+ */
+ case TK_ID: {
+ if( lookupName(pParse, 0, 0, &pExpr->token, pSrcList, pEList, pExpr) ){
+ return 1;
+ }
+ break;
+ }
+
+ /* A table name and column name: ID.ID
+ ** Or a database, table and column: ID.ID.ID
+ */
+ case TK_DOT: {
+ Token *pColumn;
+ Token *pTable;
+ Token *pDb;
+ Expr *pRight;
+
+ pRight = pExpr->pRight;
+ if( pRight->op==TK_ID ){
+ pDb = 0;
+ pTable = &pExpr->pLeft->token;
+ pColumn = &pRight->token;
+ }else{
+ assert( pRight->op==TK_DOT );
+ pDb = &pExpr->pLeft->token;
+ pTable = &pRight->pLeft->token;
+ pColumn = &pRight->pRight->token;
+ }
+ if( lookupName(pParse, pDb, pTable, pColumn, pSrcList, 0, pExpr) ){
+ return 1;
+ }
+ break;
+ }
+
+ case TK_IN: {
+ Vdbe *v = sqliteGetVdbe(pParse);
+ if( v==0 ) return 1;
+ if( sqliteExprResolveIds(pParse, pSrcList, pEList, pExpr->pLeft) ){
+ return 1;
+ }
+ if( pExpr->pSelect ){
+ /* Case 1: expr IN (SELECT ...)
+ **
+ ** Generate code to write the results of the select into a temporary
+ ** table. The cursor number of the temporary table has already
+ ** been put in iTable by sqliteExprResolveInSelect().
+ */
+ pExpr->iTable = pParse->nTab++;
+ sqliteVdbeAddOp(v, OP_OpenTemp, pExpr->iTable, 1);
+ sqliteSelect(pParse, pExpr->pSelect, SRT_Set, pExpr->iTable, 0,0,0);
+ }else if( pExpr->pList ){
+ /* Case 2: expr IN (exprlist)
+ **
+ ** Create a set to put the exprlist values in. The Set id is stored
+ ** in iTable.
+ */
+ int i, iSet;
+ for(i=0; i<pExpr->pList->nExpr; i++){
+ Expr *pE2 = pExpr->pList->a[i].pExpr;
+ if( !sqliteExprIsConstant(pE2) ){
+ sqliteErrorMsg(pParse,
+ "right-hand side of IN operator must be constant");
+ return 1;
+ }
+ if( sqliteExprCheck(pParse, pE2, 0, 0) ){
+ return 1;
+ }
+ }
+ iSet = pExpr->iTable = pParse->nSet++;
+ for(i=0; i<pExpr->pList->nExpr; i++){
+ Expr *pE2 = pExpr->pList->a[i].pExpr;
+ switch( pE2->op ){
+ case TK_FLOAT:
+ case TK_INTEGER:
+ case TK_STRING: {
+ int addr;
+ assert( pE2->token.z );
+ addr = sqliteVdbeOp3(v, OP_SetInsert, iSet, 0,
+ pE2->token.z, pE2->token.n);
+ sqliteVdbeDequoteP3(v, addr);
+ break;
+ }
+ default: {
+ sqliteExprCode(pParse, pE2);
+ sqliteVdbeAddOp(v, OP_SetInsert, iSet, 0);
+ break;
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ case TK_SELECT: {
+ /* This has to be a scalar SELECT. Generate code to put the
+ ** value of this select in a memory cell and record the number
+ ** of the memory cell in iColumn.
+ */
+ pExpr->iColumn = pParse->nMem++;
+ if( sqliteSelect(pParse, pExpr->pSelect, SRT_Mem, pExpr->iColumn,0,0,0) ){
+ return 1;
+ }
+ break;
+ }
+
+ /* For all else, just recursively walk the tree */
+ default: {
+ if( pExpr->pLeft
+ && sqliteExprResolveIds(pParse, pSrcList, pEList, pExpr->pLeft) ){
+ return 1;
+ }
+ if( pExpr->pRight
+ && sqliteExprResolveIds(pParse, pSrcList, pEList, pExpr->pRight) ){
+ return 1;
+ }
+ if( pExpr->pList ){
+ int i;
+ ExprList *pList = pExpr->pList;
+ for(i=0; i<pList->nExpr; i++){
+ Expr *pArg = pList->a[i].pExpr;
+ if( sqliteExprResolveIds(pParse, pSrcList, pEList, pArg) ){
+ return 1;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** pExpr is a node that defines a function of some kind. It might
+** be a syntactic function like "count(x)" or it might be a function
+** that implements an operator, like "a LIKE b".
+**
+** This routine makes *pzName point to the name of the function and
+** *pnName hold the number of characters in the function name.
+*/
+static void getFunctionName(Expr *pExpr, const char **pzName, int *pnName){
+ switch( pExpr->op ){
+ case TK_FUNCTION: {
+ *pzName = pExpr->token.z;
+ *pnName = pExpr->token.n;
+ break;
+ }
+ case TK_LIKE: {
+ *pzName = "like";
+ *pnName = 4;
+ break;
+ }
+ case TK_GLOB: {
+ *pzName = "glob";
+ *pnName = 4;
+ break;
+ }
+ default: {
+ *pzName = "can't happen";
+ *pnName = 12;
+ break;
+ }
+ }
+}
+
+/*
+** Error check the functions in an expression. Make sure all
+** function names are recognized and all functions have the correct
+** number of arguments. Leave an error message in pParse->zErrMsg
+** if anything is amiss. Return the number of errors.
+**
+** if pIsAgg is not null and this expression is an aggregate function
+** (like count(*) or max(value)) then write a 1 into *pIsAgg.
+*/
+int sqliteExprCheck(Parse *pParse, Expr *pExpr, int allowAgg, int *pIsAgg){
+ int nErr = 0;
+ if( pExpr==0 ) return 0;
+ switch( pExpr->op ){
+ case TK_GLOB:
+ case TK_LIKE:
+ case TK_FUNCTION: {
+ int n = pExpr->pList ? pExpr->pList->nExpr : 0; /* Number of arguments */
+ int no_such_func = 0; /* True if no such function exists */
+ int wrong_num_args = 0; /* True if wrong number of arguments */
+ int is_agg = 0; /* True if is an aggregate function */
+ int i;
+ int nId; /* Number of characters in function name */
+ const char *zId; /* The function name. */
+ FuncDef *pDef;
+
+ getFunctionName(pExpr, &zId, &nId);
+ pDef = sqliteFindFunction(pParse->db, zId, nId, n, 0);
+ if( pDef==0 ){
+ pDef = sqliteFindFunction(pParse->db, zId, nId, -1, 0);
+ if( pDef==0 ){
+ no_such_func = 1;
+ }else{
+ wrong_num_args = 1;
+ }
+ }else{
+ is_agg = pDef->xFunc==0;
+ }
+ if( is_agg && !allowAgg ){
+ sqliteErrorMsg(pParse, "misuse of aggregate function %.*s()", nId, zId);
+ nErr++;
+ is_agg = 0;
+ }else if( no_such_func ){
+ sqliteErrorMsg(pParse, "no such function: %.*s", nId, zId);
+ nErr++;
+ }else if( wrong_num_args ){
+ sqliteErrorMsg(pParse,"wrong number of arguments to function %.*s()",
+ nId, zId);
+ nErr++;
+ }
+ if( is_agg ){
+ pExpr->op = TK_AGG_FUNCTION;
+ if( pIsAgg ) *pIsAgg = 1;
+ }
+ for(i=0; nErr==0 && i<n; i++){
+ nErr = sqliteExprCheck(pParse, pExpr->pList->a[i].pExpr,
+ allowAgg && !is_agg, pIsAgg);
+ }
+ if( pDef==0 ){
+ /* Already reported an error */
+ }else if( pDef->dataType>=0 ){
+ if( pDef->dataType<n ){
+ pExpr->dataType =
+ sqliteExprType(pExpr->pList->a[pDef->dataType].pExpr);
+ }else{
+ pExpr->dataType = SQLITE_SO_NUM;
+ }
+ }else if( pDef->dataType==SQLITE_ARGS ){
+ pDef->dataType = SQLITE_SO_TEXT;
+ for(i=0; i<n; i++){
+ if( sqliteExprType(pExpr->pList->a[i].pExpr)==SQLITE_SO_NUM ){
+ pExpr->dataType = SQLITE_SO_NUM;
+ break;
+ }
+ }
+ }else if( pDef->dataType==SQLITE_NUMERIC ){
+ pExpr->dataType = SQLITE_SO_NUM;
+ }else{
+ pExpr->dataType = SQLITE_SO_TEXT;
+ }
+ }
+ default: {
+ if( pExpr->pLeft ){
+ nErr = sqliteExprCheck(pParse, pExpr->pLeft, allowAgg, pIsAgg);
+ }
+ if( nErr==0 && pExpr->pRight ){
+ nErr = sqliteExprCheck(pParse, pExpr->pRight, allowAgg, pIsAgg);
+ }
+ if( nErr==0 && pExpr->pList ){
+ int n = pExpr->pList->nExpr;
+ int i;
+ for(i=0; nErr==0 && i<n; i++){
+ Expr *pE2 = pExpr->pList->a[i].pExpr;
+ nErr = sqliteExprCheck(pParse, pE2, allowAgg, pIsAgg);
+ }
+ }
+ break;
+ }
+ }
+ return nErr;
+}
+
+/*
+** Return either SQLITE_SO_NUM or SQLITE_SO_TEXT to indicate whether the
+** given expression should sort as numeric values or as text.
+**
+** The sqliteExprResolveIds() and sqliteExprCheck() routines must have
+** both been called on the expression before it is passed to this routine.
+*/
+int sqliteExprType(Expr *p){
+ if( p==0 ) return SQLITE_SO_NUM;
+ while( p ) switch( p->op ){
+ case TK_PLUS:
+ case TK_MINUS:
+ case TK_STAR:
+ case TK_SLASH:
+ case TK_AND:
+ case TK_OR:
+ case TK_ISNULL:
+ case TK_NOTNULL:
+ case TK_NOT:
+ case TK_UMINUS:
+ case TK_UPLUS:
+ case TK_BITAND:
+ case TK_BITOR:
+ case TK_BITNOT:
+ case TK_LSHIFT:
+ case TK_RSHIFT:
+ case TK_REM:
+ case TK_INTEGER:
+ case TK_FLOAT:
+ case TK_IN:
+ case TK_BETWEEN:
+ case TK_GLOB:
+ case TK_LIKE:
+ return SQLITE_SO_NUM;
+
+ case TK_STRING:
+ case TK_NULL:
+ case TK_CONCAT:
+ case TK_VARIABLE:
+ return SQLITE_SO_TEXT;
+
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ:
+ if( sqliteExprType(p->pLeft)==SQLITE_SO_NUM ){
+ return SQLITE_SO_NUM;
+ }
+ p = p->pRight;
+ break;
+
+ case TK_AS:
+ p = p->pLeft;
+ break;
+
+ case TK_COLUMN:
+ case TK_FUNCTION:
+ case TK_AGG_FUNCTION:
+ return p->dataType;
+
+ case TK_SELECT:
+ assert( p->pSelect );
+ assert( p->pSelect->pEList );
+ assert( p->pSelect->pEList->nExpr>0 );
+ p = p->pSelect->pEList->a[0].pExpr;
+ break;
+
+ case TK_CASE: {
+ if( p->pRight && sqliteExprType(p->pRight)==SQLITE_SO_NUM ){
+ return SQLITE_SO_NUM;
+ }
+ if( p->pList ){
+ int i;
+ ExprList *pList = p->pList;
+ for(i=1; i<pList->nExpr; i+=2){
+ if( sqliteExprType(pList->a[i].pExpr)==SQLITE_SO_NUM ){
+ return SQLITE_SO_NUM;
+ }
+ }
+ }
+ return SQLITE_SO_TEXT;
+ }
+
+ default:
+ assert( p->op==TK_ABORT ); /* Can't Happen */
+ break;
+ }
+ return SQLITE_SO_NUM;
+}
+
+/*
+** Generate code into the current Vdbe to evaluate the given
+** expression and leave the result on the top of stack.
+*/
+void sqliteExprCode(Parse *pParse, Expr *pExpr){
+ Vdbe *v = pParse->pVdbe;
+ int op;
+ if( v==0 || pExpr==0 ) return;
+ switch( pExpr->op ){
+ case TK_PLUS: op = OP_Add; break;
+ case TK_MINUS: op = OP_Subtract; break;
+ case TK_STAR: op = OP_Multiply; break;
+ case TK_SLASH: op = OP_Divide; break;
+ case TK_AND: op = OP_And; break;
+ case TK_OR: op = OP_Or; break;
+ case TK_LT: op = OP_Lt; break;
+ case TK_LE: op = OP_Le; break;
+ case TK_GT: op = OP_Gt; break;
+ case TK_GE: op = OP_Ge; break;
+ case TK_NE: op = OP_Ne; break;
+ case TK_EQ: op = OP_Eq; break;
+ case TK_ISNULL: op = OP_IsNull; break;
+ case TK_NOTNULL: op = OP_NotNull; break;
+ case TK_NOT: op = OP_Not; break;
+ case TK_UMINUS: op = OP_Negative; break;
+ case TK_BITAND: op = OP_BitAnd; break;
+ case TK_BITOR: op = OP_BitOr; break;
+ case TK_BITNOT: op = OP_BitNot; break;
+ case TK_LSHIFT: op = OP_ShiftLeft; break;
+ case TK_RSHIFT: op = OP_ShiftRight; break;
+ case TK_REM: op = OP_Remainder; break;
+ default: break;
+ }
+ switch( pExpr->op ){
+ case TK_COLUMN: {
+ if( pParse->useAgg ){
+ sqliteVdbeAddOp(v, OP_AggGet, 0, pExpr->iAgg);
+ }else if( pExpr->iColumn>=0 ){
+ sqliteVdbeAddOp(v, OP_Column, pExpr->iTable, pExpr->iColumn);
+ }else{
+ sqliteVdbeAddOp(v, OP_Recno, pExpr->iTable, 0);
+ }
+ break;
+ }
+ case TK_STRING:
+ case TK_FLOAT:
+ case TK_INTEGER: {
+ if( pExpr->op==TK_INTEGER && sqliteFitsIn32Bits(pExpr->token.z) ){
+ sqliteVdbeAddOp(v, OP_Integer, atoi(pExpr->token.z), 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ }
+ assert( pExpr->token.z );
+ sqliteVdbeChangeP3(v, -1, pExpr->token.z, pExpr->token.n);
+ sqliteVdbeDequoteP3(v, -1);
+ break;
+ }
+ case TK_NULL: {
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ break;
+ }
+ case TK_VARIABLE: {
+ sqliteVdbeAddOp(v, OP_Variable, pExpr->iTable, 0);
+ break;
+ }
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ: {
+ if( pParse->db->file_format>=4 && sqliteExprType(pExpr)==SQLITE_SO_TEXT ){
+ op += 6; /* Convert numeric opcodes to text opcodes */
+ }
+ /* Fall through into the next case */
+ }
+ case TK_AND:
+ case TK_OR:
+ case TK_PLUS:
+ case TK_STAR:
+ case TK_MINUS:
+ case TK_REM:
+ case TK_BITAND:
+ case TK_BITOR:
+ case TK_SLASH: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteExprCode(pParse, pExpr->pRight);
+ sqliteVdbeAddOp(v, op, 0, 0);
+ break;
+ }
+ case TK_LSHIFT:
+ case TK_RSHIFT: {
+ sqliteExprCode(pParse, pExpr->pRight);
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, op, 0, 0);
+ break;
+ }
+ case TK_CONCAT: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteExprCode(pParse, pExpr->pRight);
+ sqliteVdbeAddOp(v, OP_Concat, 2, 0);
+ break;
+ }
+ case TK_UMINUS: {
+ assert( pExpr->pLeft );
+ if( pExpr->pLeft->op==TK_FLOAT || pExpr->pLeft->op==TK_INTEGER ){
+ Token *p = &pExpr->pLeft->token;
+ char *z = sqliteMalloc( p->n + 2 );
+ sprintf(z, "-%.*s", p->n, p->z);
+ if( pExpr->pLeft->op==TK_INTEGER && sqliteFitsIn32Bits(z) ){
+ sqliteVdbeAddOp(v, OP_Integer, atoi(z), 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ }
+ sqliteVdbeChangeP3(v, -1, z, p->n+1);
+ sqliteFree(z);
+ break;
+ }
+ /* Fall through into TK_NOT */
+ }
+ case TK_BITNOT:
+ case TK_NOT: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, op, 0, 0);
+ break;
+ }
+ case TK_ISNULL:
+ case TK_NOTNULL: {
+ int dest;
+ sqliteVdbeAddOp(v, OP_Integer, 1, 0);
+ sqliteExprCode(pParse, pExpr->pLeft);
+ dest = sqliteVdbeCurrentAddr(v) + 2;
+ sqliteVdbeAddOp(v, op, 1, dest);
+ sqliteVdbeAddOp(v, OP_AddImm, -1, 0);
+ break;
+ }
+ case TK_AGG_FUNCTION: {
+ sqliteVdbeAddOp(v, OP_AggGet, 0, pExpr->iAgg);
+ break;
+ }
+ case TK_GLOB:
+ case TK_LIKE:
+ case TK_FUNCTION: {
+ ExprList *pList = pExpr->pList;
+ int nExpr = pList ? pList->nExpr : 0;
+ FuncDef *pDef;
+ int nId;
+ const char *zId;
+ getFunctionName(pExpr, &zId, &nId);
+ pDef = sqliteFindFunction(pParse->db, zId, nId, nExpr, 0);
+ assert( pDef!=0 );
+ nExpr = sqliteExprCodeExprList(pParse, pList, pDef->includeTypes);
+ sqliteVdbeOp3(v, OP_Function, nExpr, 0, (char*)pDef, P3_POINTER);
+ break;
+ }
+ case TK_SELECT: {
+ sqliteVdbeAddOp(v, OP_MemLoad, pExpr->iColumn, 0);
+ break;
+ }
+ case TK_IN: {
+ int addr;
+ sqliteVdbeAddOp(v, OP_Integer, 1, 0);
+ sqliteExprCode(pParse, pExpr->pLeft);
+ addr = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeAddOp(v, OP_NotNull, -1, addr+4);
+ sqliteVdbeAddOp(v, OP_Pop, 2, 0);
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, addr+6);
+ if( pExpr->pSelect ){
+ sqliteVdbeAddOp(v, OP_Found, pExpr->iTable, addr+6);
+ }else{
+ sqliteVdbeAddOp(v, OP_SetFound, pExpr->iTable, addr+6);
+ }
+ sqliteVdbeAddOp(v, OP_AddImm, -1, 0);
+ break;
+ }
+ case TK_BETWEEN: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ sqliteExprCode(pParse, pExpr->pList->a[0].pExpr);
+ sqliteVdbeAddOp(v, OP_Ge, 0, 0);
+ sqliteVdbeAddOp(v, OP_Pull, 1, 0);
+ sqliteExprCode(pParse, pExpr->pList->a[1].pExpr);
+ sqliteVdbeAddOp(v, OP_Le, 0, 0);
+ sqliteVdbeAddOp(v, OP_And, 0, 0);
+ break;
+ }
+ case TK_UPLUS:
+ case TK_AS: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ break;
+ }
+ case TK_CASE: {
+ int expr_end_label;
+ int jumpInst;
+ int addr;
+ int nExpr;
+ int i;
+
+ assert(pExpr->pList);
+ assert((pExpr->pList->nExpr % 2) == 0);
+ assert(pExpr->pList->nExpr > 0);
+ nExpr = pExpr->pList->nExpr;
+ expr_end_label = sqliteVdbeMakeLabel(v);
+ if( pExpr->pLeft ){
+ sqliteExprCode(pParse, pExpr->pLeft);
+ }
+ for(i=0; i<nExpr; i=i+2){
+ sqliteExprCode(pParse, pExpr->pList->a[i].pExpr);
+ if( pExpr->pLeft ){
+ sqliteVdbeAddOp(v, OP_Dup, 1, 1);
+ jumpInst = sqliteVdbeAddOp(v, OP_Ne, 1, 0);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ }else{
+ jumpInst = sqliteVdbeAddOp(v, OP_IfNot, 1, 0);
+ }
+ sqliteExprCode(pParse, pExpr->pList->a[i+1].pExpr);
+ sqliteVdbeAddOp(v, OP_Goto, 0, expr_end_label);
+ addr = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeChangeP2(v, jumpInst, addr);
+ }
+ if( pExpr->pLeft ){
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ }
+ if( pExpr->pRight ){
+ sqliteExprCode(pParse, pExpr->pRight);
+ }else{
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ }
+ sqliteVdbeResolveLabel(v, expr_end_label);
+ break;
+ }
+ case TK_RAISE: {
+ if( !pParse->trigStack ){
+ sqliteErrorMsg(pParse,
+ "RAISE() may only be used within a trigger-program");
+ pParse->nErr++;
+ return;
+ }
+ if( pExpr->iColumn == OE_Rollback ||
+ pExpr->iColumn == OE_Abort ||
+ pExpr->iColumn == OE_Fail ){
+ sqliteVdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, pExpr->iColumn,
+ pExpr->token.z, pExpr->token.n);
+ sqliteVdbeDequoteP3(v, -1);
+ } else {
+ assert( pExpr->iColumn == OE_Ignore );
+ sqliteVdbeOp3(v, OP_Goto, 0, pParse->trigStack->ignoreJump,
+ "(IGNORE jump)", 0);
+ }
+ }
+ break;
+ }
+}
+
+/*
+** Generate code that pushes the value of every element of the given
+** expression list onto the stack. If the includeTypes flag is true,
+** then also push a string that is the datatype of each element onto
+** the stack after the value.
+**
+** Return the number of elements pushed onto the stack.
+*/
+int sqliteExprCodeExprList(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* The expression list to be coded */
+ int includeTypes /* TRUE to put datatypes on the stack too */
+){
+ struct ExprList_item *pItem;
+ int i, n;
+ Vdbe *v;
+ if( pList==0 ) return 0;
+ v = sqliteGetVdbe(pParse);
+ n = pList->nExpr;
+ for(pItem=pList->a, i=0; i<n; i++, pItem++){
+ sqliteExprCode(pParse, pItem->pExpr);
+ if( includeTypes ){
+ sqliteVdbeOp3(v, OP_String, 0, 0,
+ sqliteExprType(pItem->pExpr)==SQLITE_SO_NUM ? "numeric" : "text",
+ P3_STATIC);
+ }
+ }
+ return includeTypes ? n*2 : n;
+}
+
+/*
+** Generate code for a boolean expression such that a jump is made
+** to the label "dest" if the expression is true but execution
+** continues straight thru if the expression is false.
+**
+** If the expression evaluates to NULL (neither true nor false), then
+** take the jump if the jumpIfNull flag is true.
+*/
+void sqliteExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){
+ Vdbe *v = pParse->pVdbe;
+ int op = 0;
+ if( v==0 || pExpr==0 ) return;
+ switch( pExpr->op ){
+ case TK_LT: op = OP_Lt; break;
+ case TK_LE: op = OP_Le; break;
+ case TK_GT: op = OP_Gt; break;
+ case TK_GE: op = OP_Ge; break;
+ case TK_NE: op = OP_Ne; break;
+ case TK_EQ: op = OP_Eq; break;
+ case TK_ISNULL: op = OP_IsNull; break;
+ case TK_NOTNULL: op = OP_NotNull; break;
+ default: break;
+ }
+ switch( pExpr->op ){
+ case TK_AND: {
+ int d2 = sqliteVdbeMakeLabel(v);
+ sqliteExprIfFalse(pParse, pExpr->pLeft, d2, !jumpIfNull);
+ sqliteExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
+ sqliteVdbeResolveLabel(v, d2);
+ break;
+ }
+ case TK_OR: {
+ sqliteExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull);
+ sqliteExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
+ break;
+ }
+ case TK_NOT: {
+ sqliteExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull);
+ break;
+ }
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteExprCode(pParse, pExpr->pRight);
+ if( pParse->db->file_format>=4 && sqliteExprType(pExpr)==SQLITE_SO_TEXT ){
+ op += 6; /* Convert numeric opcodes to text opcodes */
+ }
+ sqliteVdbeAddOp(v, op, jumpIfNull, dest);
+ break;
+ }
+ case TK_ISNULL:
+ case TK_NOTNULL: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, op, 1, dest);
+ break;
+ }
+ case TK_IN: {
+ int addr;
+ sqliteExprCode(pParse, pExpr->pLeft);
+ addr = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeAddOp(v, OP_NotNull, -1, addr+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, jumpIfNull ? dest : addr+4);
+ if( pExpr->pSelect ){
+ sqliteVdbeAddOp(v, OP_Found, pExpr->iTable, dest);
+ }else{
+ sqliteVdbeAddOp(v, OP_SetFound, pExpr->iTable, dest);
+ }
+ break;
+ }
+ case TK_BETWEEN: {
+ int addr;
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ sqliteExprCode(pParse, pExpr->pList->a[0].pExpr);
+ addr = sqliteVdbeAddOp(v, OP_Lt, !jumpIfNull, 0);
+ sqliteExprCode(pParse, pExpr->pList->a[1].pExpr);
+ sqliteVdbeAddOp(v, OP_Le, jumpIfNull, dest);
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ sqliteVdbeChangeP2(v, addr, sqliteVdbeCurrentAddr(v));
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ break;
+ }
+ default: {
+ sqliteExprCode(pParse, pExpr);
+ sqliteVdbeAddOp(v, OP_If, jumpIfNull, dest);
+ break;
+ }
+ }
+}
+
+/*
+** Generate code for a boolean expression such that a jump is made
+** to the label "dest" if the expression is false but execution
+** continues straight thru if the expression is true.
+**
+** If the expression evaluates to NULL (neither true nor false) then
+** jump if jumpIfNull is true or fall through if jumpIfNull is false.
+*/
+void sqliteExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){
+ Vdbe *v = pParse->pVdbe;
+ int op = 0;
+ if( v==0 || pExpr==0 ) return;
+ switch( pExpr->op ){
+ case TK_LT: op = OP_Ge; break;
+ case TK_LE: op = OP_Gt; break;
+ case TK_GT: op = OP_Le; break;
+ case TK_GE: op = OP_Lt; break;
+ case TK_NE: op = OP_Eq; break;
+ case TK_EQ: op = OP_Ne; break;
+ case TK_ISNULL: op = OP_NotNull; break;
+ case TK_NOTNULL: op = OP_IsNull; break;
+ default: break;
+ }
+ switch( pExpr->op ){
+ case TK_AND: {
+ sqliteExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull);
+ sqliteExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
+ break;
+ }
+ case TK_OR: {
+ int d2 = sqliteVdbeMakeLabel(v);
+ sqliteExprIfTrue(pParse, pExpr->pLeft, d2, !jumpIfNull);
+ sqliteExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
+ sqliteVdbeResolveLabel(v, d2);
+ break;
+ }
+ case TK_NOT: {
+ sqliteExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull);
+ break;
+ }
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ: {
+ if( pParse->db->file_format>=4 && sqliteExprType(pExpr)==SQLITE_SO_TEXT ){
+ /* Convert numeric comparison opcodes into text comparison opcodes.
+ ** This step depends on the fact that the text comparision opcodes are
+ ** always 6 greater than their corresponding numeric comparison
+ ** opcodes.
+ */
+ assert( OP_Eq+6 == OP_StrEq );
+ op += 6;
+ }
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteExprCode(pParse, pExpr->pRight);
+ sqliteVdbeAddOp(v, op, jumpIfNull, dest);
+ break;
+ }
+ case TK_ISNULL:
+ case TK_NOTNULL: {
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, op, 1, dest);
+ break;
+ }
+ case TK_IN: {
+ int addr;
+ sqliteExprCode(pParse, pExpr->pLeft);
+ addr = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeAddOp(v, OP_NotNull, -1, addr+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, jumpIfNull ? dest : addr+4);
+ if( pExpr->pSelect ){
+ sqliteVdbeAddOp(v, OP_NotFound, pExpr->iTable, dest);
+ }else{
+ sqliteVdbeAddOp(v, OP_SetNotFound, pExpr->iTable, dest);
+ }
+ break;
+ }
+ case TK_BETWEEN: {
+ int addr;
+ sqliteExprCode(pParse, pExpr->pLeft);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ sqliteExprCode(pParse, pExpr->pList->a[0].pExpr);
+ addr = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeAddOp(v, OP_Ge, !jumpIfNull, addr+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, dest);
+ sqliteExprCode(pParse, pExpr->pList->a[1].pExpr);
+ sqliteVdbeAddOp(v, OP_Gt, jumpIfNull, dest);
+ break;
+ }
+ default: {
+ sqliteExprCode(pParse, pExpr);
+ sqliteVdbeAddOp(v, OP_IfNot, jumpIfNull, dest);
+ break;
+ }
+ }
+}
+
+/*
+** Do a deep comparison of two expression trees. Return TRUE (non-zero)
+** if they are identical and return FALSE if they differ in any way.
+*/
+int sqliteExprCompare(Expr *pA, Expr *pB){
+ int i;
+ if( pA==0 ){
+ return pB==0;
+ }else if( pB==0 ){
+ return 0;
+ }
+ if( pA->op!=pB->op ) return 0;
+ if( !sqliteExprCompare(pA->pLeft, pB->pLeft) ) return 0;
+ if( !sqliteExprCompare(pA->pRight, pB->pRight) ) return 0;
+ if( pA->pList ){
+ if( pB->pList==0 ) return 0;
+ if( pA->pList->nExpr!=pB->pList->nExpr ) return 0;
+ for(i=0; i<pA->pList->nExpr; i++){
+ if( !sqliteExprCompare(pA->pList->a[i].pExpr, pB->pList->a[i].pExpr) ){
+ return 0;
+ }
+ }
+ }else if( pB->pList ){
+ return 0;
+ }
+ if( pA->pSelect || pB->pSelect ) return 0;
+ if( pA->iTable!=pB->iTable || pA->iColumn!=pB->iColumn ) return 0;
+ if( pA->token.z ){
+ if( pB->token.z==0 ) return 0;
+ if( pB->token.n!=pA->token.n ) return 0;
+ if( sqliteStrNICmp(pA->token.z, pB->token.z, pB->token.n)!=0 ) return 0;
+ }
+ return 1;
+}
+
+/*
+** Add a new element to the pParse->aAgg[] array and return its index.
+*/
+static int appendAggInfo(Parse *pParse){
+ if( (pParse->nAgg & 0x7)==0 ){
+ int amt = pParse->nAgg + 8;
+ AggExpr *aAgg = sqliteRealloc(pParse->aAgg, amt*sizeof(pParse->aAgg[0]));
+ if( aAgg==0 ){
+ return -1;
+ }
+ pParse->aAgg = aAgg;
+ }
+ memset(&pParse->aAgg[pParse->nAgg], 0, sizeof(pParse->aAgg[0]));
+ return pParse->nAgg++;
+}
+
+/*
+** Analyze the given expression looking for aggregate functions and
+** for variables that need to be added to the pParse->aAgg[] array.
+** Make additional entries to the pParse->aAgg[] array as necessary.
+**
+** This routine should only be called after the expression has been
+** analyzed by sqliteExprResolveIds() and sqliteExprCheck().
+**
+** If errors are seen, leave an error message in zErrMsg and return
+** the number of errors.
+*/
+int sqliteExprAnalyzeAggregates(Parse *pParse, Expr *pExpr){
+ int i;
+ AggExpr *aAgg;
+ int nErr = 0;
+
+ if( pExpr==0 ) return 0;
+ switch( pExpr->op ){
+ case TK_COLUMN: {
+ aAgg = pParse->aAgg;
+ for(i=0; i<pParse->nAgg; i++){
+ if( aAgg[i].isAgg ) continue;
+ if( aAgg[i].pExpr->iTable==pExpr->iTable
+ && aAgg[i].pExpr->iColumn==pExpr->iColumn ){
+ break;
+ }
+ }
+ if( i>=pParse->nAgg ){
+ i = appendAggInfo(pParse);
+ if( i<0 ) return 1;
+ pParse->aAgg[i].isAgg = 0;
+ pParse->aAgg[i].pExpr = pExpr;
+ }
+ pExpr->iAgg = i;
+ break;
+ }
+ case TK_AGG_FUNCTION: {
+ aAgg = pParse->aAgg;
+ for(i=0; i<pParse->nAgg; i++){
+ if( !aAgg[i].isAgg ) continue;
+ if( sqliteExprCompare(aAgg[i].pExpr, pExpr) ){
+ break;
+ }
+ }
+ if( i>=pParse->nAgg ){
+ i = appendAggInfo(pParse);
+ if( i<0 ) return 1;
+ pParse->aAgg[i].isAgg = 1;
+ pParse->aAgg[i].pExpr = pExpr;
+ pParse->aAgg[i].pFunc = sqliteFindFunction(pParse->db,
+ pExpr->token.z, pExpr->token.n,
+ pExpr->pList ? pExpr->pList->nExpr : 0, 0);
+ }
+ pExpr->iAgg = i;
+ break;
+ }
+ default: {
+ if( pExpr->pLeft ){
+ nErr = sqliteExprAnalyzeAggregates(pParse, pExpr->pLeft);
+ }
+ if( nErr==0 && pExpr->pRight ){
+ nErr = sqliteExprAnalyzeAggregates(pParse, pExpr->pRight);
+ }
+ if( nErr==0 && pExpr->pList ){
+ int n = pExpr->pList->nExpr;
+ int i;
+ for(i=0; nErr==0 && i<n; i++){
+ nErr = sqliteExprAnalyzeAggregates(pParse, pExpr->pList->a[i].pExpr);
+ }
+ }
+ break;
+ }
+ }
+ return nErr;
+}
+
+/*
+** Locate a user function given a name and a number of arguments.
+** Return a pointer to the FuncDef structure that defines that
+** function, or return NULL if the function does not exist.
+**
+** If the createFlag argument is true, then a new (blank) FuncDef
+** structure is created and liked into the "db" structure if a
+** no matching function previously existed. When createFlag is true
+** and the nArg parameter is -1, then only a function that accepts
+** any number of arguments will be returned.
+**
+** If createFlag is false and nArg is -1, then the first valid
+** function found is returned. A function is valid if either xFunc
+** or xStep is non-zero.
+*/
+FuncDef *sqliteFindFunction(
+ sqlite *db, /* An open database */
+ const char *zName, /* Name of the function. Not null-terminated */
+ int nName, /* Number of characters in the name */
+ int nArg, /* Number of arguments. -1 means any number */
+ int createFlag /* Create new entry if true and does not otherwise exist */
+){
+ FuncDef *pFirst, *p, *pMaybe;
+ pFirst = p = (FuncDef*)sqliteHashFind(&db->aFunc, zName, nName);
+ if( p && !createFlag && nArg<0 ){
+ while( p && p->xFunc==0 && p->xStep==0 ){ p = p->pNext; }
+ return p;
+ }
+ pMaybe = 0;
+ while( p && p->nArg!=nArg ){
+ if( p->nArg<0 && !createFlag && (p->xFunc || p->xStep) ) pMaybe = p;
+ p = p->pNext;
+ }
+ if( p && !createFlag && p->xFunc==0 && p->xStep==0 ){
+ return 0;
+ }
+ if( p==0 && pMaybe ){
+ assert( createFlag==0 );
+ return pMaybe;
+ }
+ if( p==0 && createFlag && (p = sqliteMalloc(sizeof(*p)))!=0 ){
+ p->nArg = nArg;
+ p->pNext = pFirst;
+ p->dataType = pFirst ? pFirst->dataType : SQLITE_NUMERIC;
+ sqliteHashInsert(&db->aFunc, zName, nName, (void*)p);
+ }
+ return p;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/func.c b/usr/src/cmd/svc/configd/sqlite/src/func.c
new file mode 100644
index 0000000000..6c3915f7a8
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/func.c
@@ -0,0 +1,661 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2002 February 23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement various SQL
+** functions of SQLite.
+**
+** There is only one exported symbol in this file - the function
+** sqliteRegisterBuildinFunctions() found at the bottom of the file.
+** All other code has file scope.
+**
+** $Id: func.c,v 1.43.2.3 2004/07/18 23:03:11 drh Exp $
+*/
+#include <ctype.h>
+#include <math.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "sqliteInt.h"
+#include "os.h"
+
+/*
+** Implementation of the non-aggregate min() and max() functions
+*/
+static void minmaxFunc(sqlite_func *context, int argc, const char **argv){
+ const char *zBest;
+ int i;
+ int (*xCompare)(const char*, const char*);
+ int mask; /* 0 for min() or 0xffffffff for max() */
+
+ if( argc==0 ) return;
+ mask = (int)sqlite_user_data(context);
+ zBest = argv[0];
+ if( zBest==0 ) return;
+ if( argv[1][0]=='n' ){
+ xCompare = sqliteCompare;
+ }else{
+ xCompare = strcmp;
+ }
+ for(i=2; i<argc; i+=2){
+ if( argv[i]==0 ) return;
+ if( (xCompare(argv[i], zBest)^mask)<0 ){
+ zBest = argv[i];
+ }
+ }
+ sqlite_set_result_string(context, zBest, -1);
+}
+
+/*
+** Return the type of the argument.
+*/
+static void typeofFunc(sqlite_func *context, int argc, const char **argv){
+ assert( argc==2 );
+ sqlite_set_result_string(context, argv[1], -1);
+}
+
+/*
+** Implementation of the length() function
+*/
+static void lengthFunc(sqlite_func *context, int argc, const char **argv){
+ const char *z;
+ int len;
+
+ assert( argc==1 );
+ z = argv[0];
+ if( z==0 ) return;
+#ifdef SQLITE_UTF8
+ for(len=0; *z; z++){ if( (0xc0&*z)!=0x80 ) len++; }
+#else
+ len = strlen(z);
+#endif
+ sqlite_set_result_int(context, len);
+}
+
+/*
+** Implementation of the abs() function
+*/
+static void absFunc(sqlite_func *context, int argc, const char **argv){
+ const char *z;
+ assert( argc==1 );
+ z = argv[0];
+ if( z==0 ) return;
+ if( z[0]=='-' && isdigit(z[1]) ) z++;
+ sqlite_set_result_string(context, z, -1);
+}
+
+/*
+** Implementation of the substr() function
+*/
+static void substrFunc(sqlite_func *context, int argc, const char **argv){
+ const char *z;
+#ifdef SQLITE_UTF8
+ const char *z2;
+ int i;
+#endif
+ int p1, p2, len;
+ assert( argc==3 );
+ z = argv[0];
+ if( z==0 ) return;
+ p1 = atoi(argv[1]?argv[1]:0);
+ p2 = atoi(argv[2]?argv[2]:0);
+#ifdef SQLITE_UTF8
+ for(len=0, z2=z; *z2; z2++){ if( (0xc0&*z2)!=0x80 ) len++; }
+#else
+ len = strlen(z);
+#endif
+ if( p1<0 ){
+ p1 += len;
+ if( p1<0 ){
+ p2 += p1;
+ p1 = 0;
+ }
+ }else if( p1>0 ){
+ p1--;
+ }
+ if( p1+p2>len ){
+ p2 = len-p1;
+ }
+#ifdef SQLITE_UTF8
+ for(i=0; i<p1 && z[i]; i++){
+ if( (z[i]&0xc0)==0x80 ) p1++;
+ }
+ while( z[i] && (z[i]&0xc0)==0x80 ){ i++; p1++; }
+ for(; i<p1+p2 && z[i]; i++){
+ if( (z[i]&0xc0)==0x80 ) p2++;
+ }
+ while( z[i] && (z[i]&0xc0)==0x80 ){ i++; p2++; }
+#endif
+ if( p2<0 ) p2 = 0;
+ sqlite_set_result_string(context, &z[p1], p2);
+}
+
+/*
+** Implementation of the round() function
+*/
+static void roundFunc(sqlite_func *context, int argc, const char **argv){
+ int n;
+ double r;
+ char zBuf[100];
+ assert( argc==1 || argc==2 );
+ if( argv[0]==0 || (argc==2 && argv[1]==0) ) return;
+ n = argc==2 ? atoi(argv[1]) : 0;
+ if( n>30 ) n = 30;
+ if( n<0 ) n = 0;
+ r = sqliteAtoF(argv[0], 0);
+ sprintf(zBuf,"%.*f",n,r);
+ sqlite_set_result_string(context, zBuf, -1);
+}
+
+/*
+** Implementation of the upper() and lower() SQL functions.
+*/
+static void upperFunc(sqlite_func *context, int argc, const char **argv){
+ unsigned char *z;
+ int i;
+ if( argc<1 || argv[0]==0 ) return;
+ z = (unsigned char*)sqlite_set_result_string(context, argv[0], -1);
+ if( z==0 ) return;
+ for(i=0; z[i]; i++){
+ if( islower(z[i]) ) z[i] = toupper(z[i]);
+ }
+}
+static void lowerFunc(sqlite_func *context, int argc, const char **argv){
+ unsigned char *z;
+ int i;
+ if( argc<1 || argv[0]==0 ) return;
+ z = (unsigned char*)sqlite_set_result_string(context, argv[0], -1);
+ if( z==0 ) return;
+ for(i=0; z[i]; i++){
+ if( isupper(z[i]) ) z[i] = tolower(z[i]);
+ }
+}
+
+/*
+** Implementation of the IFNULL(), NVL(), and COALESCE() functions.
+** All three do the same thing. They return the first non-NULL
+** argument.
+*/
+static void ifnullFunc(sqlite_func *context, int argc, const char **argv){
+ int i;
+ for(i=0; i<argc; i++){
+ if( argv[i] ){
+ sqlite_set_result_string(context, argv[i], -1);
+ break;
+ }
+ }
+}
+
+/*
+** Implementation of random(). Return a random integer.
+*/
+static void randomFunc(sqlite_func *context, int argc, const char **argv){
+ int r;
+ sqliteRandomness(sizeof(r), &r);
+ sqlite_set_result_int(context, r);
+}
+
+/*
+** Implementation of the last_insert_rowid() SQL function. The return
+** value is the same as the sqlite_last_insert_rowid() API function.
+*/
+static void last_insert_rowid(sqlite_func *context, int arg, const char **argv){
+ sqlite *db = sqlite_user_data(context);
+ sqlite_set_result_int(context, sqlite_last_insert_rowid(db));
+}
+
+/*
+** Implementation of the change_count() SQL function. The return
+** value is the same as the sqlite_changes() API function.
+*/
+static void change_count(sqlite_func *context, int arg, const char **argv){
+ sqlite *db = sqlite_user_data(context);
+ sqlite_set_result_int(context, sqlite_changes(db));
+}
+
+/*
+** Implementation of the last_statement_change_count() SQL function. The
+** return value is the same as the sqlite_last_statement_changes() API function.
+*/
+static void last_statement_change_count(sqlite_func *context, int arg,
+ const char **argv){
+ sqlite *db = sqlite_user_data(context);
+ sqlite_set_result_int(context, sqlite_last_statement_changes(db));
+}
+
+/*
+** Implementation of the like() SQL function. This function implements
+** the build-in LIKE operator. The first argument to the function is the
+** string and the second argument is the pattern. So, the SQL statements:
+**
+** A LIKE B
+**
+** is implemented as like(A,B).
+*/
+static void likeFunc(sqlite_func *context, int arg, const char **argv){
+ if( argv[0]==0 || argv[1]==0 ) return;
+ sqlite_set_result_int(context,
+ sqliteLikeCompare((const unsigned char*)argv[0],
+ (const unsigned char*)argv[1]));
+}
+
+/*
+** Implementation of the glob() SQL function. This function implements
+** the build-in GLOB operator. The first argument to the function is the
+** string and the second argument is the pattern. So, the SQL statements:
+**
+** A GLOB B
+**
+** is implemented as glob(A,B).
+*/
+static void globFunc(sqlite_func *context, int arg, const char **argv){
+ if( argv[0]==0 || argv[1]==0 ) return;
+ sqlite_set_result_int(context,
+ sqliteGlobCompare((const unsigned char*)argv[0],
+ (const unsigned char*)argv[1]));
+}
+
+/*
+** Implementation of the NULLIF(x,y) function. The result is the first
+** argument if the arguments are different. The result is NULL if the
+** arguments are equal to each other.
+*/
+static void nullifFunc(sqlite_func *context, int argc, const char **argv){
+ if( argv[0]!=0 && sqliteCompare(argv[0],argv[1])!=0 ){
+ sqlite_set_result_string(context, argv[0], -1);
+ }
+}
+
+/*
+** Implementation of the VERSION(*) function. The result is the version
+** of the SQLite library that is running.
+*/
+static void versionFunc(sqlite_func *context, int argc, const char **argv){
+ sqlite_set_result_string(context, sqlite_version, -1);
+}
+
+/*
+** EXPERIMENTAL - This is not an official function. The interface may
+** change. This function may disappear. Do not write code that depends
+** on this function.
+**
+** Implementation of the QUOTE() function. This function takes a single
+** argument. If the argument is numeric, the return value is the same as
+** the argument. If the argument is NULL, the return value is the string
+** "NULL". Otherwise, the argument is enclosed in single quotes with
+** single-quote escapes.
+*/
+static void quoteFunc(sqlite_func *context, int argc, const char **argv){
+ if( argc<1 ) return;
+ if( argv[0]==0 ){
+ sqlite_set_result_string(context, "NULL", 4);
+ }else if( sqliteIsNumber(argv[0]) ){
+ sqlite_set_result_string(context, argv[0], -1);
+ }else{
+ int i,j,n;
+ char *z;
+ for(i=n=0; argv[0][i]; i++){ if( argv[0][i]=='\'' ) n++; }
+ z = sqliteMalloc( i+n+3 );
+ if( z==0 ) return;
+ z[0] = '\'';
+ for(i=0, j=1; argv[0][i]; i++){
+ z[j++] = argv[0][i];
+ if( argv[0][i]=='\'' ){
+ z[j++] = '\'';
+ }
+ }
+ z[j++] = '\'';
+ z[j] = 0;
+ sqlite_set_result_string(context, z, j);
+ sqliteFree(z);
+ }
+}
+
+#ifdef SQLITE_SOUNDEX
+/*
+** Compute the soundex encoding of a word.
+*/
+static void soundexFunc(sqlite_func *context, int argc, const char **argv){
+ char zResult[8];
+ const char *zIn;
+ int i, j;
+ static const unsigned char iCode[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0,
+ 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0,
+ 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0,
+ 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0,
+ };
+ assert( argc==1 );
+ zIn = argv[0];
+ for(i=0; zIn[i] && !isalpha(zIn[i]); i++){}
+ if( zIn[i] ){
+ zResult[0] = toupper(zIn[i]);
+ for(j=1; j<4 && zIn[i]; i++){
+ int code = iCode[zIn[i]&0x7f];
+ if( code>0 ){
+ zResult[j++] = code + '0';
+ }
+ }
+ while( j<4 ){
+ zResult[j++] = '0';
+ }
+ zResult[j] = 0;
+ sqlite_set_result_string(context, zResult, 4);
+ }else{
+ sqlite_set_result_string(context, "?000", 4);
+ }
+}
+#endif
+
+#ifdef SQLITE_TEST
+/*
+** This function generates a string of random characters. Used for
+** generating test data.
+*/
+static void randStr(sqlite_func *context, int argc, const char **argv){
+ static const unsigned char zSrc[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789"
+ ".-!,:*^+=_|?/<> ";
+ int iMin, iMax, n, r, i;
+ unsigned char zBuf[1000];
+ if( argc>=1 ){
+ iMin = atoi(argv[0]);
+ if( iMin<0 ) iMin = 0;
+ if( iMin>=sizeof(zBuf) ) iMin = sizeof(zBuf)-1;
+ }else{
+ iMin = 1;
+ }
+ if( argc>=2 ){
+ iMax = atoi(argv[1]);
+ if( iMax<iMin ) iMax = iMin;
+ if( iMax>=sizeof(zBuf) ) iMax = sizeof(zBuf)-1;
+ }else{
+ iMax = 50;
+ }
+ n = iMin;
+ if( iMax>iMin ){
+ sqliteRandomness(sizeof(r), &r);
+ r &= 0x7fffffff;
+ n += r%(iMax + 1 - iMin);
+ }
+ assert( n<sizeof(zBuf) );
+ sqliteRandomness(n, zBuf);
+ for(i=0; i<n; i++){
+ zBuf[i] = zSrc[zBuf[i]%(sizeof(zSrc)-1)];
+ }
+ zBuf[n] = 0;
+ sqlite_set_result_string(context, zBuf, n);
+}
+#endif
+
+/*
+** An instance of the following structure holds the context of a
+** sum() or avg() aggregate computation.
+*/
+typedef struct SumCtx SumCtx;
+struct SumCtx {
+ double sum; /* Sum of terms */
+ int cnt; /* Number of elements summed */
+};
+
+/*
+** Routines used to compute the sum or average.
+*/
+static void sumStep(sqlite_func *context, int argc, const char **argv){
+ SumCtx *p;
+ if( argc<1 ) return;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p && argv[0] ){
+ p->sum += sqliteAtoF(argv[0], 0);
+ p->cnt++;
+ }
+}
+static void sumFinalize(sqlite_func *context){
+ SumCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ sqlite_set_result_double(context, p ? p->sum : 0.0);
+}
+static void avgFinalize(sqlite_func *context){
+ SumCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p && p->cnt>0 ){
+ sqlite_set_result_double(context, p->sum/(double)p->cnt);
+ }
+}
+
+/*
+** An instance of the following structure holds the context of a
+** variance or standard deviation computation.
+*/
+typedef struct StdDevCtx StdDevCtx;
+struct StdDevCtx {
+ double sum; /* Sum of terms */
+ double sum2; /* Sum of the squares of terms */
+ int cnt; /* Number of terms counted */
+};
+
+#if 0 /* Omit because math library is required */
+/*
+** Routines used to compute the standard deviation as an aggregate.
+*/
+static void stdDevStep(sqlite_func *context, int argc, const char **argv){
+ StdDevCtx *p;
+ double x;
+ if( argc<1 ) return;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p && argv[0] ){
+ x = sqliteAtoF(argv[0], 0);
+ p->sum += x;
+ p->sum2 += x*x;
+ p->cnt++;
+ }
+}
+static void stdDevFinalize(sqlite_func *context){
+ double rN = sqlite_aggregate_count(context);
+ StdDevCtx *p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p && p->cnt>1 ){
+ double rCnt = cnt;
+ sqlite_set_result_double(context,
+ sqrt((p->sum2 - p->sum*p->sum/rCnt)/(rCnt-1.0)));
+ }
+}
+#endif
+
+/*
+** The following structure keeps track of state information for the
+** count() aggregate function.
+*/
+typedef struct CountCtx CountCtx;
+struct CountCtx {
+ int n;
+};
+
+/*
+** Routines to implement the count() aggregate function.
+*/
+static void countStep(sqlite_func *context, int argc, const char **argv){
+ CountCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( (argc==0 || argv[0]) && p ){
+ p->n++;
+ }
+}
+static void countFinalize(sqlite_func *context){
+ CountCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ sqlite_set_result_int(context, p ? p->n : 0);
+}
+
+/*
+** This function tracks state information for the min() and max()
+** aggregate functions.
+*/
+typedef struct MinMaxCtx MinMaxCtx;
+struct MinMaxCtx {
+ char *z; /* The best so far */
+ char zBuf[28]; /* Space that can be used for storage */
+};
+
+/*
+** Routines to implement min() and max() aggregate functions.
+*/
+static void minmaxStep(sqlite_func *context, int argc, const char **argv){
+ MinMaxCtx *p;
+ int (*xCompare)(const char*, const char*);
+ int mask; /* 0 for min() or 0xffffffff for max() */
+
+ assert( argc==2 );
+ if( argv[0]==0 ) return; /* Ignore NULL values */
+ if( argv[1][0]=='n' ){
+ xCompare = sqliteCompare;
+ }else{
+ xCompare = strcmp;
+ }
+ mask = (int)sqlite_user_data(context);
+ assert( mask==0 || mask==-1 );
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p==0 || argc<1 ) return;
+ if( p->z==0 || (xCompare(argv[0],p->z)^mask)<0 ){
+ int len;
+ if( p->zBuf[0] ){
+ sqliteFree(p->z);
+ }
+ len = strlen(argv[0]);
+ if( len < sizeof(p->zBuf)-1 ){
+ p->z = &p->zBuf[1];
+ p->zBuf[0] = 0;
+ }else{
+ p->z = sqliteMalloc( len+1 );
+ p->zBuf[0] = 1;
+ if( p->z==0 ) return;
+ }
+ strcpy(p->z, argv[0]);
+ }
+}
+static void minMaxFinalize(sqlite_func *context){
+ MinMaxCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p && p->z && p->zBuf[0]<2 ){
+ sqlite_set_result_string(context, p->z, strlen(p->z));
+ }
+ if( p && p->zBuf[0] ){
+ sqliteFree(p->z);
+ }
+}
+
+/*
+** This function registered all of the above C functions as SQL
+** functions. This should be the only routine in this file with
+** external linkage.
+*/
+void sqliteRegisterBuiltinFunctions(sqlite *db){
+ static struct {
+ char *zName;
+ signed char nArg;
+ signed char dataType;
+ u8 argType; /* 0: none. 1: db 2: (-1) */
+ void (*xFunc)(sqlite_func*,int,const char**);
+ } aFuncs[] = {
+ { "min", -1, SQLITE_ARGS, 0, minmaxFunc },
+ { "min", 0, 0, 0, 0 },
+ { "max", -1, SQLITE_ARGS, 2, minmaxFunc },
+ { "max", 0, 0, 2, 0 },
+ { "typeof", 1, SQLITE_TEXT, 0, typeofFunc },
+ { "length", 1, SQLITE_NUMERIC, 0, lengthFunc },
+ { "substr", 3, SQLITE_TEXT, 0, substrFunc },
+ { "abs", 1, SQLITE_NUMERIC, 0, absFunc },
+ { "round", 1, SQLITE_NUMERIC, 0, roundFunc },
+ { "round", 2, SQLITE_NUMERIC, 0, roundFunc },
+ { "upper", 1, SQLITE_TEXT, 0, upperFunc },
+ { "lower", 1, SQLITE_TEXT, 0, lowerFunc },
+ { "coalesce", -1, SQLITE_ARGS, 0, ifnullFunc },
+ { "coalesce", 0, 0, 0, 0 },
+ { "coalesce", 1, 0, 0, 0 },
+ { "ifnull", 2, SQLITE_ARGS, 0, ifnullFunc },
+ { "random", -1, SQLITE_NUMERIC, 0, randomFunc },
+ { "like", 2, SQLITE_NUMERIC, 0, likeFunc },
+ { "glob", 2, SQLITE_NUMERIC, 0, globFunc },
+ { "nullif", 2, SQLITE_ARGS, 0, nullifFunc },
+ { "sqlite_version",0,SQLITE_TEXT, 0, versionFunc},
+ { "quote", 1, SQLITE_ARGS, 0, quoteFunc },
+ { "last_insert_rowid", 0, SQLITE_NUMERIC, 1, last_insert_rowid },
+ { "change_count", 0, SQLITE_NUMERIC, 1, change_count },
+ { "last_statement_change_count",
+ 0, SQLITE_NUMERIC, 1, last_statement_change_count },
+#ifdef SQLITE_SOUNDEX
+ { "soundex", 1, SQLITE_TEXT, 0, soundexFunc},
+#endif
+#ifdef SQLITE_TEST
+ { "randstr", 2, SQLITE_TEXT, 0, randStr },
+#endif
+ };
+ static struct {
+ char *zName;
+ signed char nArg;
+ signed char dataType;
+ u8 argType;
+ void (*xStep)(sqlite_func*,int,const char**);
+ void (*xFinalize)(sqlite_func*);
+ } aAggs[] = {
+ { "min", 1, 0, 0, minmaxStep, minMaxFinalize },
+ { "max", 1, 0, 2, minmaxStep, minMaxFinalize },
+ { "sum", 1, SQLITE_NUMERIC, 0, sumStep, sumFinalize },
+ { "avg", 1, SQLITE_NUMERIC, 0, sumStep, avgFinalize },
+ { "count", 0, SQLITE_NUMERIC, 0, countStep, countFinalize },
+ { "count", 1, SQLITE_NUMERIC, 0, countStep, countFinalize },
+#if 0
+ { "stddev", 1, SQLITE_NUMERIC, 0, stdDevStep, stdDevFinalize },
+#endif
+ };
+ static const char *azTypeFuncs[] = { "min", "max", "typeof" };
+ int i;
+
+ for(i=0; i<sizeof(aFuncs)/sizeof(aFuncs[0]); i++){
+ void *pArg;
+ switch( aFuncs[i].argType ){
+ case 0: pArg = 0; break;
+ case 1: pArg = db; break;
+ case 2: pArg = (void*)(-1); break;
+ }
+ sqlite_create_function(db, aFuncs[i].zName,
+ aFuncs[i].nArg, aFuncs[i].xFunc, pArg);
+ if( aFuncs[i].xFunc ){
+ sqlite_function_type(db, aFuncs[i].zName, aFuncs[i].dataType);
+ }
+ }
+ for(i=0; i<sizeof(aAggs)/sizeof(aAggs[0]); i++){
+ void *pArg;
+ switch( aAggs[i].argType ){
+ case 0: pArg = 0; break;
+ case 1: pArg = db; break;
+ case 2: pArg = (void*)(-1); break;
+ }
+ sqlite_create_aggregate(db, aAggs[i].zName,
+ aAggs[i].nArg, aAggs[i].xStep, aAggs[i].xFinalize, pArg);
+ sqlite_function_type(db, aAggs[i].zName, aAggs[i].dataType);
+ }
+ for(i=0; i<sizeof(azTypeFuncs)/sizeof(azTypeFuncs[0]); i++){
+ int n = strlen(azTypeFuncs[i]);
+ FuncDef *p = sqliteHashFind(&db->aFunc, azTypeFuncs[i], n);
+ while( p ){
+ p->includeTypes = 1;
+ p = p->pNext;
+ }
+ }
+ sqliteRegisterDateTimeFunctions(db);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/hash.c b/usr/src/cmd/svc/configd/sqlite/src/hash.c
new file mode 100644
index 0000000000..fba18bb33b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/hash.c
@@ -0,0 +1,359 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the implementation of generic hash-tables
+** used in SQLite.
+**
+** $Id: hash.c,v 1.11 2004/01/08 02:17:33 drh Exp $
+*/
+#include "sqliteInt.h"
+#include <assert.h>
+
+/* Turn bulk memory into a hash table object by initializing the
+** fields of the Hash structure.
+**
+** "new" is a pointer to the hash table that is to be initialized.
+** keyClass is one of the constants SQLITE_HASH_INT, SQLITE_HASH_POINTER,
+** SQLITE_HASH_BINARY, or SQLITE_HASH_STRING. The value of keyClass
+** determines what kind of key the hash table will use. "copyKey" is
+** true if the hash table should make its own private copy of keys and
+** false if it should just use the supplied pointer. CopyKey only makes
+** sense for SQLITE_HASH_STRING and SQLITE_HASH_BINARY and is ignored
+** for other key classes.
+*/
+void sqliteHashInit(Hash *new, int keyClass, int copyKey){
+ assert( new!=0 );
+ assert( keyClass>=SQLITE_HASH_INT && keyClass<=SQLITE_HASH_BINARY );
+ new->keyClass = keyClass;
+ new->copyKey = copyKey &&
+ (keyClass==SQLITE_HASH_STRING || keyClass==SQLITE_HASH_BINARY);
+ new->first = 0;
+ new->count = 0;
+ new->htsize = 0;
+ new->ht = 0;
+}
+
+/* Remove all entries from a hash table. Reclaim all memory.
+** Call this routine to delete a hash table or to reset a hash table
+** to the empty state.
+*/
+void sqliteHashClear(Hash *pH){
+ HashElem *elem; /* For looping over all elements of the table */
+
+ assert( pH!=0 );
+ elem = pH->first;
+ pH->first = 0;
+ if( pH->ht ) sqliteFree(pH->ht);
+ pH->ht = 0;
+ pH->htsize = 0;
+ while( elem ){
+ HashElem *next_elem = elem->next;
+ if( pH->copyKey && elem->pKey ){
+ sqliteFree(elem->pKey);
+ }
+ sqliteFree(elem);
+ elem = next_elem;
+ }
+ pH->count = 0;
+}
+
+/*
+** Hash and comparison functions when the mode is SQLITE_HASH_INT
+*/
+static int intHash(const void *pKey, int nKey){
+ return nKey ^ (nKey<<8) ^ (nKey>>8);
+}
+static int intCompare(const void *pKey1, int n1, const void *pKey2, int n2){
+ return n2 - n1;
+}
+
+#if 0 /* NOT USED */
+/*
+** Hash and comparison functions when the mode is SQLITE_HASH_POINTER
+*/
+static int ptrHash(const void *pKey, int nKey){
+ uptr x = Addr(pKey);
+ return x ^ (x<<8) ^ (x>>8);
+}
+static int ptrCompare(const void *pKey1, int n1, const void *pKey2, int n2){
+ if( pKey1==pKey2 ) return 0;
+ if( pKey1<pKey2 ) return -1;
+ return 1;
+}
+#endif
+
+/*
+** Hash and comparison functions when the mode is SQLITE_HASH_STRING
+*/
+static int strHash(const void *pKey, int nKey){
+ return sqliteHashNoCase((const char*)pKey, nKey);
+}
+static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){
+ if( n1!=n2 ) return n2-n1;
+ return sqliteStrNICmp((const char*)pKey1,(const char*)pKey2,n1);
+}
+
+/*
+** Hash and comparison functions when the mode is SQLITE_HASH_BINARY
+*/
+static int binHash(const void *pKey, int nKey){
+ int h = 0;
+ const char *z = (const char *)pKey;
+ while( nKey-- > 0 ){
+ h = (h<<3) ^ h ^ *(z++);
+ }
+ return h & 0x7fffffff;
+}
+static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){
+ if( n1!=n2 ) return n2-n1;
+ return memcmp(pKey1,pKey2,n1);
+}
+
+/*
+** Return a pointer to the appropriate hash function given the key class.
+**
+** The C syntax in this function definition may be unfamilar to some
+** programmers, so we provide the following additional explanation:
+**
+** The name of the function is "hashFunction". The function takes a
+** single parameter "keyClass". The return value of hashFunction()
+** is a pointer to another function. Specifically, the return value
+** of hashFunction() is a pointer to a function that takes two parameters
+** with types "const void*" and "int" and returns an "int".
+*/
+static int (*hashFunction(int keyClass))(const void*,int){
+ switch( keyClass ){
+ case SQLITE_HASH_INT: return &intHash;
+ /* case SQLITE_HASH_POINTER: return &ptrHash; // NOT USED */
+ case SQLITE_HASH_STRING: return &strHash;
+ case SQLITE_HASH_BINARY: return &binHash;;
+ default: break;
+ }
+ return 0;
+}
+
+/*
+** Return a pointer to the appropriate hash function given the key class.
+**
+** For help in interpreted the obscure C code in the function definition,
+** see the header comment on the previous function.
+*/
+static int (*compareFunction(int keyClass))(const void*,int,const void*,int){
+ switch( keyClass ){
+ case SQLITE_HASH_INT: return &intCompare;
+ /* case SQLITE_HASH_POINTER: return &ptrCompare; // NOT USED */
+ case SQLITE_HASH_STRING: return &strCompare;
+ case SQLITE_HASH_BINARY: return &binCompare;
+ default: break;
+ }
+ return 0;
+}
+
+
+/* Resize the hash table so that it cantains "new_size" buckets.
+** "new_size" must be a power of 2. The hash table might fail
+** to resize if sqliteMalloc() fails.
+*/
+static void rehash(Hash *pH, int new_size){
+ struct _ht *new_ht; /* The new hash table */
+ HashElem *elem, *next_elem; /* For looping over existing elements */
+ HashElem *x; /* Element being copied to new hash table */
+ int (*xHash)(const void*,int); /* The hash function */
+
+ assert( (new_size & (new_size-1))==0 );
+ new_ht = (struct _ht *)sqliteMalloc( new_size*sizeof(struct _ht) );
+ if( new_ht==0 ) return;
+ if( pH->ht ) sqliteFree(pH->ht);
+ pH->ht = new_ht;
+ pH->htsize = new_size;
+ xHash = hashFunction(pH->keyClass);
+ for(elem=pH->first, pH->first=0; elem; elem = next_elem){
+ int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1);
+ next_elem = elem->next;
+ x = new_ht[h].chain;
+ if( x ){
+ elem->next = x;
+ elem->prev = x->prev;
+ if( x->prev ) x->prev->next = elem;
+ else pH->first = elem;
+ x->prev = elem;
+ }else{
+ elem->next = pH->first;
+ if( pH->first ) pH->first->prev = elem;
+ elem->prev = 0;
+ pH->first = elem;
+ }
+ new_ht[h].chain = elem;
+ new_ht[h].count++;
+ }
+}
+
+/* This function (for internal use only) locates an element in an
+** hash table that matches the given key. The hash for this key has
+** already been computed and is passed as the 4th parameter.
+*/
+static HashElem *findElementGivenHash(
+ const Hash *pH, /* The pH to be searched */
+ const void *pKey, /* The key we are searching for */
+ int nKey,
+ int h /* The hash for this key. */
+){
+ HashElem *elem; /* Used to loop thru the element list */
+ int count; /* Number of elements left to test */
+ int (*xCompare)(const void*,int,const void*,int); /* comparison function */
+
+ if( pH->ht ){
+ elem = pH->ht[h].chain;
+ count = pH->ht[h].count;
+ xCompare = compareFunction(pH->keyClass);
+ while( count-- && elem ){
+ if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){
+ return elem;
+ }
+ elem = elem->next;
+ }
+ }
+ return 0;
+}
+
+/* Remove a single entry from the hash table given a pointer to that
+** element and a hash on the element's key.
+*/
+static void removeElementGivenHash(
+ Hash *pH, /* The pH containing "elem" */
+ HashElem* elem, /* The element to be removed from the pH */
+ int h /* Hash value for the element */
+){
+ if( elem->prev ){
+ elem->prev->next = elem->next;
+ }else{
+ pH->first = elem->next;
+ }
+ if( elem->next ){
+ elem->next->prev = elem->prev;
+ }
+ if( pH->ht[h].chain==elem ){
+ pH->ht[h].chain = elem->next;
+ }
+ pH->ht[h].count--;
+ if( pH->ht[h].count<=0 ){
+ pH->ht[h].chain = 0;
+ }
+ if( pH->copyKey && elem->pKey ){
+ sqliteFree(elem->pKey);
+ }
+ sqliteFree( elem );
+ pH->count--;
+}
+
+/* Attempt to locate an element of the hash table pH with a key
+** that matches pKey,nKey. Return the data for this element if it is
+** found, or NULL if there is no match.
+*/
+void *sqliteHashFind(const Hash *pH, const void *pKey, int nKey){
+ int h; /* A hash on key */
+ HashElem *elem; /* The element that matches key */
+ int (*xHash)(const void*,int); /* The hash function */
+
+ if( pH==0 || pH->ht==0 ) return 0;
+ xHash = hashFunction(pH->keyClass);
+ assert( xHash!=0 );
+ h = (*xHash)(pKey,nKey);
+ assert( (pH->htsize & (pH->htsize-1))==0 );
+ elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1));
+ return elem ? elem->data : 0;
+}
+
+/* Insert an element into the hash table pH. The key is pKey,nKey
+** and the data is "data".
+**
+** If no element exists with a matching key, then a new
+** element is created. A copy of the key is made if the copyKey
+** flag is set. NULL is returned.
+**
+** If another element already exists with the same key, then the
+** new data replaces the old data and the old data is returned.
+** The key is not copied in this instance. If a malloc fails, then
+** the new data is returned and the hash table is unchanged.
+**
+** If the "data" parameter to this function is NULL, then the
+** element corresponding to "key" is removed from the hash table.
+*/
+void *sqliteHashInsert(Hash *pH, const void *pKey, int nKey, void *data){
+ int hraw; /* Raw hash value of the key */
+ int h; /* the hash of the key modulo hash table size */
+ HashElem *elem; /* Used to loop thru the element list */
+ HashElem *new_elem; /* New element added to the pH */
+ int (*xHash)(const void*,int); /* The hash function */
+
+ assert( pH!=0 );
+ xHash = hashFunction(pH->keyClass);
+ assert( xHash!=0 );
+ hraw = (*xHash)(pKey, nKey);
+ assert( (pH->htsize & (pH->htsize-1))==0 );
+ h = hraw & (pH->htsize-1);
+ elem = findElementGivenHash(pH,pKey,nKey,h);
+ if( elem ){
+ void *old_data = elem->data;
+ if( data==0 ){
+ removeElementGivenHash(pH,elem,h);
+ }else{
+ elem->data = data;
+ }
+ return old_data;
+ }
+ if( data==0 ) return 0;
+ new_elem = (HashElem*)sqliteMalloc( sizeof(HashElem) );
+ if( new_elem==0 ) return data;
+ if( pH->copyKey && pKey!=0 ){
+ new_elem->pKey = sqliteMallocRaw( nKey );
+ if( new_elem->pKey==0 ){
+ sqliteFree(new_elem);
+ return data;
+ }
+ memcpy((void*)new_elem->pKey, pKey, nKey);
+ }else{
+ new_elem->pKey = (void*)pKey;
+ }
+ new_elem->nKey = nKey;
+ pH->count++;
+ if( pH->htsize==0 ) rehash(pH,8);
+ if( pH->htsize==0 ){
+ pH->count = 0;
+ sqliteFree(new_elem);
+ return data;
+ }
+ if( pH->count > pH->htsize ){
+ rehash(pH,pH->htsize*2);
+ }
+ assert( (pH->htsize & (pH->htsize-1))==0 );
+ h = hraw & (pH->htsize-1);
+ elem = pH->ht[h].chain;
+ if( elem ){
+ new_elem->next = elem;
+ new_elem->prev = elem->prev;
+ if( elem->prev ){ elem->prev->next = new_elem; }
+ else { pH->first = new_elem; }
+ elem->prev = new_elem;
+ }else{
+ new_elem->next = pH->first;
+ new_elem->prev = 0;
+ if( pH->first ){ pH->first->prev = new_elem; }
+ pH->first = new_elem;
+ }
+ pH->ht[h].count++;
+ pH->ht[h].chain = new_elem;
+ new_elem->data = data;
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/hash.h b/usr/src/cmd/svc/configd/sqlite/src/hash.h
new file mode 100644
index 0000000000..89671655c2
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/hash.h
@@ -0,0 +1,112 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the header file for the generic hash-table implemenation
+** used in SQLite.
+**
+** $Id: hash.h,v 1.6 2004/01/08 02:17:33 drh Exp $
+*/
+#ifndef _SQLITE_HASH_H_
+#define _SQLITE_HASH_H_
+
+/* Forward declarations of structures. */
+typedef struct Hash Hash;
+typedef struct HashElem HashElem;
+
+/* A complete hash table is an instance of the following structure.
+** The internals of this structure are intended to be opaque -- client
+** code should not attempt to access or modify the fields of this structure
+** directly. Change this structure only by using the routines below.
+** However, many of the "procedures" and "functions" for modifying and
+** accessing this structure are really macros, so we can't really make
+** this structure opaque.
+*/
+struct Hash {
+ char keyClass; /* SQLITE_HASH_INT, _POINTER, _STRING, _BINARY */
+ char copyKey; /* True if copy of key made on insert */
+ int count; /* Number of entries in this table */
+ HashElem *first; /* The first element of the array */
+ int htsize; /* Number of buckets in the hash table */
+ struct _ht { /* the hash table */
+ int count; /* Number of entries with this hash */
+ HashElem *chain; /* Pointer to first entry with this hash */
+ } *ht;
+};
+
+/* Each element in the hash table is an instance of the following
+** structure. All elements are stored on a single doubly-linked list.
+**
+** Again, this structure is intended to be opaque, but it can't really
+** be opaque because it is used by macros.
+*/
+struct HashElem {
+ HashElem *next, *prev; /* Next and previous elements in the table */
+ void *data; /* Data associated with this element */
+ void *pKey; int nKey; /* Key associated with this element */
+};
+
+/*
+** There are 4 different modes of operation for a hash table:
+**
+** SQLITE_HASH_INT nKey is used as the key and pKey is ignored.
+**
+** SQLITE_HASH_POINTER pKey is used as the key and nKey is ignored.
+**
+** SQLITE_HASH_STRING pKey points to a string that is nKey bytes long
+** (including the null-terminator, if any). Case
+** is ignored in comparisons.
+**
+** SQLITE_HASH_BINARY pKey points to binary data nKey bytes long.
+** memcmp() is used to compare keys.
+**
+** A copy of the key is made for SQLITE_HASH_STRING and SQLITE_HASH_BINARY
+** if the copyKey parameter to HashInit is 1.
+*/
+#define SQLITE_HASH_INT 1
+/* #define SQLITE_HASH_POINTER 2 // NOT USED */
+#define SQLITE_HASH_STRING 3
+#define SQLITE_HASH_BINARY 4
+
+/*
+** Access routines. To delete, insert a NULL pointer.
+*/
+void sqliteHashInit(Hash*, int keytype, int copyKey);
+void *sqliteHashInsert(Hash*, const void *pKey, int nKey, void *pData);
+void *sqliteHashFind(const Hash*, const void *pKey, int nKey);
+void sqliteHashClear(Hash*);
+
+/*
+** Macros for looping over all elements of a hash table. The idiom is
+** like this:
+**
+** Hash h;
+** HashElem *p;
+** ...
+** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){
+** SomeStructure *pData = sqliteHashData(p);
+** // do something with pData
+** }
+*/
+#define sqliteHashFirst(H) ((H)->first)
+#define sqliteHashNext(E) ((E)->next)
+#define sqliteHashData(E) ((E)->data)
+#define sqliteHashKey(E) ((E)->pKey)
+#define sqliteHashKeysize(E) ((E)->nKey)
+
+/*
+** Number of entries in a hash table
+*/
+#define sqliteHashCount(H) ((H)->count)
+
+#endif /* _SQLITE_HASH_H_ */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/insert.c b/usr/src/cmd/svc/configd/sqlite/src/insert.c
new file mode 100644
index 0000000000..334acbf941
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/insert.c
@@ -0,0 +1,922 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle INSERT statements in SQLite.
+**
+** $Id: insert.c,v 1.94 2004/02/24 01:05:33 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** This routine is call to handle SQL of the following forms:
+**
+** insert into TABLE (IDLIST) values(EXPRLIST)
+** insert into TABLE (IDLIST) select
+**
+** The IDLIST following the table name is always optional. If omitted,
+** then a list of all columns for the table is substituted. The IDLIST
+** appears in the pColumn parameter. pColumn is NULL if IDLIST is omitted.
+**
+** The pList parameter holds EXPRLIST in the first form of the INSERT
+** statement above, and pSelect is NULL. For the second form, pList is
+** NULL and pSelect is a pointer to the select statement used to generate
+** data for the insert.
+**
+** The code generated follows one of three templates. For a simple
+** select with data coming from a VALUES clause, the code executes
+** once straight down through. The template looks like this:
+**
+** open write cursor to <table> and its indices
+** puts VALUES clause expressions onto the stack
+** write the resulting record into <table>
+** cleanup
+**
+** If the statement is of the form
+**
+** INSERT INTO <table> SELECT ...
+**
+** And the SELECT clause does not read from <table> at any time, then
+** the generated code follows this template:
+**
+** goto B
+** A: setup for the SELECT
+** loop over the tables in the SELECT
+** gosub C
+** end loop
+** cleanup after the SELECT
+** goto D
+** B: open write cursor to <table> and its indices
+** goto A
+** C: insert the select result into <table>
+** return
+** D: cleanup
+**
+** The third template is used if the insert statement takes its
+** values from a SELECT but the data is being inserted into a table
+** that is also read as part of the SELECT. In the third form,
+** we have to use a intermediate table to store the results of
+** the select. The template is like this:
+**
+** goto B
+** A: setup for the SELECT
+** loop over the tables in the SELECT
+** gosub C
+** end loop
+** cleanup after the SELECT
+** goto D
+** C: insert the select result into the intermediate table
+** return
+** B: open a cursor to an intermediate table
+** goto A
+** D: open write cursor to <table> and its indices
+** loop over the intermediate table
+** transfer values form intermediate table into <table>
+** end the loop
+** cleanup
+*/
+void sqliteInsert(
+ Parse *pParse, /* Parser context */
+ SrcList *pTabList, /* Name of table into which we are inserting */
+ ExprList *pList, /* List of values to be inserted */
+ Select *pSelect, /* A SELECT statement to use as the data source */
+ IdList *pColumn, /* Column names corresponding to IDLIST. */
+ int onError /* How to handle constraint errors */
+){
+ Table *pTab; /* The table to insert into */
+ char *zTab; /* Name of the table into which we are inserting */
+ const char *zDb; /* Name of the database holding this table */
+ int i, j, idx; /* Loop counters */
+ Vdbe *v; /* Generate code into this virtual machine */
+ Index *pIdx; /* For looping over indices of the table */
+ int nColumn; /* Number of columns in the data */
+ int base; /* VDBE Cursor number for pTab */
+ int iCont, iBreak; /* Beginning and end of the loop over srcTab */
+ sqlite *db; /* The main database structure */
+ int keyColumn = -1; /* Column that is the INTEGER PRIMARY KEY */
+ int endOfLoop; /* Label for the end of the insertion loop */
+ int useTempTable; /* Store SELECT results in intermediate table */
+ int srcTab; /* Data comes from this temporary cursor if >=0 */
+ int iSelectLoop; /* Address of code that implements the SELECT */
+ int iCleanup; /* Address of the cleanup code */
+ int iInsertBlock; /* Address of the subroutine used to insert data */
+ int iCntMem; /* Memory cell used for the row counter */
+ int isView; /* True if attempting to insert into a view */
+
+ int row_triggers_exist = 0; /* True if there are FOR EACH ROW triggers */
+ int before_triggers; /* True if there are BEFORE triggers */
+ int after_triggers; /* True if there are AFTER triggers */
+ int newIdx = -1; /* Cursor for the NEW table */
+
+ if( pParse->nErr || sqlite_malloc_failed ) goto insert_cleanup;
+ db = pParse->db;
+
+ /* Locate the table into which we will be inserting new information.
+ */
+ assert( pTabList->nSrc==1 );
+ zTab = pTabList->a[0].zName;
+ if( zTab==0 ) goto insert_cleanup;
+ pTab = sqliteSrcListLookup(pParse, pTabList);
+ if( pTab==0 ){
+ goto insert_cleanup;
+ }
+ assert( pTab->iDb<db->nDb );
+ zDb = db->aDb[pTab->iDb].zName;
+ if( sqliteAuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, zDb) ){
+ goto insert_cleanup;
+ }
+
+ /* Ensure that:
+ * (a) the table is not read-only,
+ * (b) that if it is a view then ON INSERT triggers exist
+ */
+ before_triggers = sqliteTriggersExist(pParse, pTab->pTrigger, TK_INSERT,
+ TK_BEFORE, TK_ROW, 0);
+ after_triggers = sqliteTriggersExist(pParse, pTab->pTrigger, TK_INSERT,
+ TK_AFTER, TK_ROW, 0);
+ row_triggers_exist = before_triggers || after_triggers;
+ isView = pTab->pSelect!=0;
+ if( sqliteIsReadOnly(pParse, pTab, before_triggers) ){
+ goto insert_cleanup;
+ }
+ if( pTab==0 ) goto insert_cleanup;
+
+ /* If pTab is really a view, make sure it has been initialized.
+ */
+ if( isView && sqliteViewGetColumnNames(pParse, pTab) ){
+ goto insert_cleanup;
+ }
+
+ /* Allocate a VDBE
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) goto insert_cleanup;
+ sqliteBeginWriteOperation(pParse, pSelect || row_triggers_exist, pTab->iDb);
+
+ /* if there are row triggers, allocate a temp table for new.* references. */
+ if( row_triggers_exist ){
+ newIdx = pParse->nTab++;
+ }
+
+ /* Figure out how many columns of data are supplied. If the data
+ ** is coming from a SELECT statement, then this step also generates
+ ** all the code to implement the SELECT statement and invoke a subroutine
+ ** to process each row of the result. (Template 2.) If the SELECT
+ ** statement uses the the table that is being inserted into, then the
+ ** subroutine is also coded here. That subroutine stores the SELECT
+ ** results in a temporary table. (Template 3.)
+ */
+ if( pSelect ){
+ /* Data is coming from a SELECT. Generate code to implement that SELECT
+ */
+ int rc, iInitCode;
+ iInitCode = sqliteVdbeAddOp(v, OP_Goto, 0, 0);
+ iSelectLoop = sqliteVdbeCurrentAddr(v);
+ iInsertBlock = sqliteVdbeMakeLabel(v);
+ rc = sqliteSelect(pParse, pSelect, SRT_Subroutine, iInsertBlock, 0,0,0);
+ if( rc || pParse->nErr || sqlite_malloc_failed ) goto insert_cleanup;
+ iCleanup = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_Goto, 0, iCleanup);
+ assert( pSelect->pEList );
+ nColumn = pSelect->pEList->nExpr;
+
+ /* Set useTempTable to TRUE if the result of the SELECT statement
+ ** should be written into a temporary table. Set to FALSE if each
+ ** row of the SELECT can be written directly into the result table.
+ **
+ ** A temp table must be used if the table being updated is also one
+ ** of the tables being read by the SELECT statement. Also use a
+ ** temp table in the case of row triggers.
+ */
+ if( row_triggers_exist ){
+ useTempTable = 1;
+ }else{
+ int addr = sqliteVdbeFindOp(v, OP_OpenRead, pTab->tnum);
+ useTempTable = 0;
+ if( addr>0 ){
+ VdbeOp *pOp = sqliteVdbeGetOp(v, addr-2);
+ if( pOp->opcode==OP_Integer && pOp->p1==pTab->iDb ){
+ useTempTable = 1;
+ }
+ }
+ }
+
+ if( useTempTable ){
+ /* Generate the subroutine that SELECT calls to process each row of
+ ** the result. Store the result in a temporary table
+ */
+ srcTab = pParse->nTab++;
+ sqliteVdbeResolveLabel(v, iInsertBlock);
+ sqliteVdbeAddOp(v, OP_MakeRecord, nColumn, 0);
+ sqliteVdbeAddOp(v, OP_NewRecno, srcTab, 0);
+ sqliteVdbeAddOp(v, OP_Pull, 1, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, srcTab, 0);
+ sqliteVdbeAddOp(v, OP_Return, 0, 0);
+
+ /* The following code runs first because the GOTO at the very top
+ ** of the program jumps to it. Create the temporary table, then jump
+ ** back up and execute the SELECT code above.
+ */
+ sqliteVdbeChangeP2(v, iInitCode, sqliteVdbeCurrentAddr(v));
+ sqliteVdbeAddOp(v, OP_OpenTemp, srcTab, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, iSelectLoop);
+ sqliteVdbeResolveLabel(v, iCleanup);
+ }else{
+ sqliteVdbeChangeP2(v, iInitCode, sqliteVdbeCurrentAddr(v));
+ }
+ }else{
+ /* This is the case if the data for the INSERT is coming from a VALUES
+ ** clause
+ */
+ SrcList dummy;
+ assert( pList!=0 );
+ srcTab = -1;
+ useTempTable = 0;
+ assert( pList );
+ nColumn = pList->nExpr;
+ dummy.nSrc = 0;
+ for(i=0; i<nColumn; i++){
+ if( sqliteExprResolveIds(pParse, &dummy, 0, pList->a[i].pExpr) ){
+ goto insert_cleanup;
+ }
+ if( sqliteExprCheck(pParse, pList->a[i].pExpr, 0, 0) ){
+ goto insert_cleanup;
+ }
+ }
+ }
+
+ /* Make sure the number of columns in the source data matches the number
+ ** of columns to be inserted into the table.
+ */
+ if( pColumn==0 && nColumn!=pTab->nCol ){
+ sqliteErrorMsg(pParse,
+ "table %S has %d columns but %d values were supplied",
+ pTabList, 0, pTab->nCol, nColumn);
+ goto insert_cleanup;
+ }
+ if( pColumn!=0 && nColumn!=pColumn->nId ){
+ sqliteErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId);
+ goto insert_cleanup;
+ }
+
+ /* If the INSERT statement included an IDLIST term, then make sure
+ ** all elements of the IDLIST really are columns of the table and
+ ** remember the column indices.
+ **
+ ** If the table has an INTEGER PRIMARY KEY column and that column
+ ** is named in the IDLIST, then record in the keyColumn variable
+ ** the index into IDLIST of the primary key column. keyColumn is
+ ** the index of the primary key as it appears in IDLIST, not as
+ ** is appears in the original table. (The index of the primary
+ ** key in the original table is pTab->iPKey.)
+ */
+ if( pColumn ){
+ for(i=0; i<pColumn->nId; i++){
+ pColumn->a[i].idx = -1;
+ }
+ for(i=0; i<pColumn->nId; i++){
+ for(j=0; j<pTab->nCol; j++){
+ if( sqliteStrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){
+ pColumn->a[i].idx = j;
+ if( j==pTab->iPKey ){
+ keyColumn = i;
+ }
+ break;
+ }
+ }
+ if( j>=pTab->nCol ){
+ if( sqliteIsRowid(pColumn->a[i].zName) ){
+ keyColumn = i;
+ }else{
+ sqliteErrorMsg(pParse, "table %S has no column named %s",
+ pTabList, 0, pColumn->a[i].zName);
+ pParse->nErr++;
+ goto insert_cleanup;
+ }
+ }
+ }
+ }
+
+ /* If there is no IDLIST term but the table has an integer primary
+ ** key, the set the keyColumn variable to the primary key column index
+ ** in the original table definition.
+ */
+ if( pColumn==0 ){
+ keyColumn = pTab->iPKey;
+ }
+
+ /* Open the temp table for FOR EACH ROW triggers
+ */
+ if( row_triggers_exist ){
+ sqliteVdbeAddOp(v, OP_OpenPseudo, newIdx, 0);
+ }
+
+ /* Initialize the count of rows to be inserted
+ */
+ if( db->flags & SQLITE_CountRows ){
+ iCntMem = pParse->nMem++;
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, iCntMem, 1);
+ }
+
+ /* Open tables and indices if there are no row triggers */
+ if( !row_triggers_exist ){
+ base = pParse->nTab;
+ idx = sqliteOpenTableAndIndices(pParse, pTab, base);
+ pParse->nTab += idx;
+ }
+
+ /* If the data source is a temporary table, then we have to create
+ ** a loop because there might be multiple rows of data. If the data
+ ** source is a subroutine call from the SELECT statement, then we need
+ ** to launch the SELECT statement processing.
+ */
+ if( useTempTable ){
+ iBreak = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_Rewind, srcTab, iBreak);
+ iCont = sqliteVdbeCurrentAddr(v);
+ }else if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Goto, 0, iSelectLoop);
+ sqliteVdbeResolveLabel(v, iInsertBlock);
+ }
+
+ /* Run the BEFORE and INSTEAD OF triggers, if there are any
+ */
+ endOfLoop = sqliteVdbeMakeLabel(v);
+ if( before_triggers ){
+
+ /* build the NEW.* reference row. Note that if there is an INTEGER
+ ** PRIMARY KEY into which a NULL is being inserted, that NULL will be
+ ** translated into a unique ID for the row. But on a BEFORE trigger,
+ ** we do not know what the unique ID will be (because the insert has
+ ** not happened yet) so we substitute a rowid of -1
+ */
+ if( keyColumn<0 ){
+ sqliteVdbeAddOp(v, OP_Integer, -1, 0);
+ }else if( useTempTable ){
+ sqliteVdbeAddOp(v, OP_Column, srcTab, keyColumn);
+ }else if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Dup, nColumn - keyColumn - 1, 1);
+ }else{
+ sqliteExprCode(pParse, pList->a[keyColumn].pExpr);
+ sqliteVdbeAddOp(v, OP_NotNull, -1, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeAddOp(v, OP_Integer, -1, 0);
+ sqliteVdbeAddOp(v, OP_MustBeInt, 0, 0);
+ }
+
+ /* Create the new column data
+ */
+ for(i=0; i<pTab->nCol; i++){
+ if( pColumn==0 ){
+ j = i;
+ }else{
+ for(j=0; j<pColumn->nId; j++){
+ if( pColumn->a[j].idx==i ) break;
+ }
+ }
+ if( pColumn && j>=pColumn->nId ){
+ sqliteVdbeOp3(v, OP_String, 0, 0, pTab->aCol[i].zDflt, P3_STATIC);
+ }else if( useTempTable ){
+ sqliteVdbeAddOp(v, OP_Column, srcTab, j);
+ }else if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Dup, nColumn-j-1, 1);
+ }else{
+ sqliteExprCode(pParse, pList->a[j].pExpr);
+ }
+ }
+ sqliteVdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, newIdx, 0);
+
+ /* Fire BEFORE or INSTEAD OF triggers */
+ if( sqliteCodeRowTrigger(pParse, TK_INSERT, 0, TK_BEFORE, pTab,
+ newIdx, -1, onError, endOfLoop) ){
+ goto insert_cleanup;
+ }
+ }
+
+ /* If any triggers exists, the opening of tables and indices is deferred
+ ** until now.
+ */
+ if( row_triggers_exist && !isView ){
+ base = pParse->nTab;
+ idx = sqliteOpenTableAndIndices(pParse, pTab, base);
+ pParse->nTab += idx;
+ }
+
+ /* Push the record number for the new entry onto the stack. The
+ ** record number is a randomly generate integer created by NewRecno
+ ** except when the table has an INTEGER PRIMARY KEY column, in which
+ ** case the record number is the same as that column.
+ */
+ if( !isView ){
+ if( keyColumn>=0 ){
+ if( useTempTable ){
+ sqliteVdbeAddOp(v, OP_Column, srcTab, keyColumn);
+ }else if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Dup, nColumn - keyColumn - 1, 1);
+ }else{
+ sqliteExprCode(pParse, pList->a[keyColumn].pExpr);
+ }
+ /* If the PRIMARY KEY expression is NULL, then use OP_NewRecno
+ ** to generate a unique primary key value.
+ */
+ sqliteVdbeAddOp(v, OP_NotNull, -1, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeAddOp(v, OP_NewRecno, base, 0);
+ sqliteVdbeAddOp(v, OP_MustBeInt, 0, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_NewRecno, base, 0);
+ }
+
+ /* Push onto the stack, data for all columns of the new entry, beginning
+ ** with the first column.
+ */
+ for(i=0; i<pTab->nCol; i++){
+ if( i==pTab->iPKey ){
+ /* The value of the INTEGER PRIMARY KEY column is always a NULL.
+ ** Whenever this column is read, the record number will be substituted
+ ** in its place. So will fill this column with a NULL to avoid
+ ** taking up data space with information that will never be used. */
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ continue;
+ }
+ if( pColumn==0 ){
+ j = i;
+ }else{
+ for(j=0; j<pColumn->nId; j++){
+ if( pColumn->a[j].idx==i ) break;
+ }
+ }
+ if( pColumn && j>=pColumn->nId ){
+ sqliteVdbeOp3(v, OP_String, 0, 0, pTab->aCol[i].zDflt, P3_STATIC);
+ }else if( useTempTable ){
+ sqliteVdbeAddOp(v, OP_Column, srcTab, j);
+ }else if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Dup, i+nColumn-j, 1);
+ }else{
+ sqliteExprCode(pParse, pList->a[j].pExpr);
+ }
+ }
+
+ /* Generate code to check constraints and generate index keys and
+ ** do the insertion.
+ */
+ sqliteGenerateConstraintChecks(pParse, pTab, base, 0, keyColumn>=0,
+ 0, onError, endOfLoop);
+ sqliteCompleteInsertion(pParse, pTab, base, 0,0,0,
+ after_triggers ? newIdx : -1);
+ }
+
+ /* Update the count of rows that are inserted
+ */
+ if( (db->flags & SQLITE_CountRows)!=0 ){
+ sqliteVdbeAddOp(v, OP_MemIncr, iCntMem, 0);
+ }
+
+ if( row_triggers_exist ){
+ /* Close all tables opened */
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Close, base, 0);
+ for(idx=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){
+ sqliteVdbeAddOp(v, OP_Close, idx+base, 0);
+ }
+ }
+
+ /* Code AFTER triggers */
+ if( sqliteCodeRowTrigger(pParse, TK_INSERT, 0, TK_AFTER, pTab, newIdx, -1,
+ onError, endOfLoop) ){
+ goto insert_cleanup;
+ }
+ }
+
+ /* The bottom of the loop, if the data source is a SELECT statement
+ */
+ sqliteVdbeResolveLabel(v, endOfLoop);
+ if( useTempTable ){
+ sqliteVdbeAddOp(v, OP_Next, srcTab, iCont);
+ sqliteVdbeResolveLabel(v, iBreak);
+ sqliteVdbeAddOp(v, OP_Close, srcTab, 0);
+ }else if( pSelect ){
+ sqliteVdbeAddOp(v, OP_Pop, nColumn, 0);
+ sqliteVdbeAddOp(v, OP_Return, 0, 0);
+ sqliteVdbeResolveLabel(v, iCleanup);
+ }
+
+ if( !row_triggers_exist ){
+ /* Close all tables opened */
+ sqliteVdbeAddOp(v, OP_Close, base, 0);
+ for(idx=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){
+ sqliteVdbeAddOp(v, OP_Close, idx+base, 0);
+ }
+ }
+
+ sqliteVdbeAddOp(v, OP_SetCounts, 0, 0);
+ sqliteEndWriteOperation(pParse);
+
+ /*
+ ** Return the number of rows inserted.
+ */
+ if( db->flags & SQLITE_CountRows ){
+ sqliteVdbeOp3(v, OP_ColumnName, 0, 1, "rows inserted", P3_STATIC);
+ sqliteVdbeAddOp(v, OP_MemLoad, iCntMem, 0);
+ sqliteVdbeAddOp(v, OP_Callback, 1, 0);
+ }
+
+insert_cleanup:
+ sqliteSrcListDelete(pTabList);
+ if( pList ) sqliteExprListDelete(pList);
+ if( pSelect ) sqliteSelectDelete(pSelect);
+ sqliteIdListDelete(pColumn);
+}
+
+/*
+** Generate code to do a constraint check prior to an INSERT or an UPDATE.
+**
+** When this routine is called, the stack contains (from bottom to top)
+** the following values:
+**
+** 1. The recno of the row to be updated before the update. This
+** value is omitted unless we are doing an UPDATE that involves a
+** change to the record number.
+**
+** 2. The recno of the row after the update.
+**
+** 3. The data in the first column of the entry after the update.
+**
+** i. Data from middle columns...
+**
+** N. The data in the last column of the entry after the update.
+**
+** The old recno shown as entry (1) above is omitted unless both isUpdate
+** and recnoChng are 1. isUpdate is true for UPDATEs and false for
+** INSERTs and recnoChng is true if the record number is being changed.
+**
+** The code generated by this routine pushes additional entries onto
+** the stack which are the keys for new index entries for the new record.
+** The order of index keys is the same as the order of the indices on
+** the pTable->pIndex list. A key is only created for index i if
+** aIdxUsed!=0 and aIdxUsed[i]!=0.
+**
+** This routine also generates code to check constraints. NOT NULL,
+** CHECK, and UNIQUE constraints are all checked. If a constraint fails,
+** then the appropriate action is performed. There are five possible
+** actions: ROLLBACK, ABORT, FAIL, REPLACE, and IGNORE.
+**
+** Constraint type Action What Happens
+** --------------- ---------- ----------------------------------------
+** any ROLLBACK The current transaction is rolled back and
+** sqlite_exec() returns immediately with a
+** return code of SQLITE_CONSTRAINT.
+**
+** any ABORT Back out changes from the current command
+** only (do not do a complete rollback) then
+** cause sqlite_exec() to return immediately
+** with SQLITE_CONSTRAINT.
+**
+** any FAIL Sqlite_exec() returns immediately with a
+** return code of SQLITE_CONSTRAINT. The
+** transaction is not rolled back and any
+** prior changes are retained.
+**
+** any IGNORE The record number and data is popped from
+** the stack and there is an immediate jump
+** to label ignoreDest.
+**
+** NOT NULL REPLACE The NULL value is replace by the default
+** value for that column. If the default value
+** is NULL, the action is the same as ABORT.
+**
+** UNIQUE REPLACE The other row that conflicts with the row
+** being inserted is removed.
+**
+** CHECK REPLACE Illegal. The results in an exception.
+**
+** Which action to take is determined by the overrideError parameter.
+** Or if overrideError==OE_Default, then the pParse->onError parameter
+** is used. Or if pParse->onError==OE_Default then the onError value
+** for the constraint is used.
+**
+** The calling routine must open a read/write cursor for pTab with
+** cursor number "base". All indices of pTab must also have open
+** read/write cursors with cursor number base+i for the i-th cursor.
+** Except, if there is no possibility of a REPLACE action then
+** cursors do not need to be open for indices where aIdxUsed[i]==0.
+**
+** If the isUpdate flag is true, it means that the "base" cursor is
+** initially pointing to an entry that is being updated. The isUpdate
+** flag causes extra code to be generated so that the "base" cursor
+** is still pointing at the same entry after the routine returns.
+** Without the isUpdate flag, the "base" cursor might be moved.
+*/
+void sqliteGenerateConstraintChecks(
+ Parse *pParse, /* The parser context */
+ Table *pTab, /* the table into which we are inserting */
+ int base, /* Index of a read/write cursor pointing at pTab */
+ char *aIdxUsed, /* Which indices are used. NULL means all are used */
+ int recnoChng, /* True if the record number will change */
+ int isUpdate, /* True for UPDATE, False for INSERT */
+ int overrideError, /* Override onError to this if not OE_Default */
+ int ignoreDest /* Jump to this label on an OE_Ignore resolution */
+){
+ int i;
+ Vdbe *v;
+ int nCol;
+ int onError;
+ int addr;
+ int extra;
+ int iCur;
+ Index *pIdx;
+ int seenReplace = 0;
+ int jumpInst1, jumpInst2;
+ int contAddr;
+ int hasTwoRecnos = (isUpdate && recnoChng);
+
+ v = sqliteGetVdbe(pParse);
+ assert( v!=0 );
+ assert( pTab->pSelect==0 ); /* This table is not a VIEW */
+ nCol = pTab->nCol;
+
+ /* Test all NOT NULL constraints.
+ */
+ for(i=0; i<nCol; i++){
+ if( i==pTab->iPKey ){
+ continue;
+ }
+ onError = pTab->aCol[i].notNull;
+ if( onError==OE_None ) continue;
+ if( overrideError!=OE_Default ){
+ onError = overrideError;
+ }else if( pParse->db->onError!=OE_Default ){
+ onError = pParse->db->onError;
+ }else if( onError==OE_Default ){
+ onError = OE_Abort;
+ }
+ if( onError==OE_Replace && pTab->aCol[i].zDflt==0 ){
+ onError = OE_Abort;
+ }
+ sqliteVdbeAddOp(v, OP_Dup, nCol-1-i, 1);
+ addr = sqliteVdbeAddOp(v, OP_NotNull, 1, 0);
+ switch( onError ){
+ case OE_Rollback:
+ case OE_Abort:
+ case OE_Fail: {
+ char *zMsg = 0;
+ sqliteVdbeAddOp(v, OP_Halt, SQLITE_CONSTRAINT, onError);
+ sqliteSetString(&zMsg, pTab->zName, ".", pTab->aCol[i].zName,
+ " may not be NULL", (char*)0);
+ sqliteVdbeChangeP3(v, -1, zMsg, P3_DYNAMIC);
+ break;
+ }
+ case OE_Ignore: {
+ sqliteVdbeAddOp(v, OP_Pop, nCol+1+hasTwoRecnos, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, ignoreDest);
+ break;
+ }
+ case OE_Replace: {
+ sqliteVdbeOp3(v, OP_String, 0, 0, pTab->aCol[i].zDflt, P3_STATIC);
+ sqliteVdbeAddOp(v, OP_Push, nCol-i, 0);
+ break;
+ }
+ default: assert(0);
+ }
+ sqliteVdbeChangeP2(v, addr, sqliteVdbeCurrentAddr(v));
+ }
+
+ /* Test all CHECK constraints
+ */
+ /**** TBD ****/
+
+ /* If we have an INTEGER PRIMARY KEY, make sure the primary key
+ ** of the new record does not previously exist. Except, if this
+ ** is an UPDATE and the primary key is not changing, that is OK.
+ */
+ if( recnoChng ){
+ onError = pTab->keyConf;
+ if( overrideError!=OE_Default ){
+ onError = overrideError;
+ }else if( pParse->db->onError!=OE_Default ){
+ onError = pParse->db->onError;
+ }else if( onError==OE_Default ){
+ onError = OE_Abort;
+ }
+
+ if( isUpdate ){
+ sqliteVdbeAddOp(v, OP_Dup, nCol+1, 1);
+ sqliteVdbeAddOp(v, OP_Dup, nCol+1, 1);
+ jumpInst1 = sqliteVdbeAddOp(v, OP_Eq, 0, 0);
+ }
+ sqliteVdbeAddOp(v, OP_Dup, nCol, 1);
+ jumpInst2 = sqliteVdbeAddOp(v, OP_NotExists, base, 0);
+ switch( onError ){
+ default: {
+ onError = OE_Abort;
+ /* Fall thru into the next case */
+ }
+ case OE_Rollback:
+ case OE_Abort:
+ case OE_Fail: {
+ sqliteVdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError,
+ "PRIMARY KEY must be unique", P3_STATIC);
+ break;
+ }
+ case OE_Replace: {
+ sqliteGenerateRowIndexDelete(pParse->db, v, pTab, base, 0);
+ if( isUpdate ){
+ sqliteVdbeAddOp(v, OP_Dup, nCol+hasTwoRecnos, 1);
+ sqliteVdbeAddOp(v, OP_MoveTo, base, 0);
+ }
+ seenReplace = 1;
+ break;
+ }
+ case OE_Ignore: {
+ assert( seenReplace==0 );
+ sqliteVdbeAddOp(v, OP_Pop, nCol+1+hasTwoRecnos, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, ignoreDest);
+ break;
+ }
+ }
+ contAddr = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeChangeP2(v, jumpInst2, contAddr);
+ if( isUpdate ){
+ sqliteVdbeChangeP2(v, jumpInst1, contAddr);
+ sqliteVdbeAddOp(v, OP_Dup, nCol+1, 1);
+ sqliteVdbeAddOp(v, OP_MoveTo, base, 0);
+ }
+ }
+
+ /* Test all UNIQUE constraints by creating entries for each UNIQUE
+ ** index and making sure that duplicate entries do not already exist.
+ ** Add the new records to the indices as we go.
+ */
+ extra = -1;
+ for(iCur=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, iCur++){
+ if( aIdxUsed && aIdxUsed[iCur]==0 ) continue; /* Skip unused indices */
+ extra++;
+
+ /* Create a key for accessing the index entry */
+ sqliteVdbeAddOp(v, OP_Dup, nCol+extra, 1);
+ for(i=0; i<pIdx->nColumn; i++){
+ int idx = pIdx->aiColumn[i];
+ if( idx==pTab->iPKey ){
+ sqliteVdbeAddOp(v, OP_Dup, i+extra+nCol+1, 1);
+ }else{
+ sqliteVdbeAddOp(v, OP_Dup, i+extra+nCol-idx, 1);
+ }
+ }
+ jumpInst1 = sqliteVdbeAddOp(v, OP_MakeIdxKey, pIdx->nColumn, 0);
+ if( pParse->db->file_format>=4 ) sqliteAddIdxKeyType(v, pIdx);
+
+ /* Find out what action to take in case there is an indexing conflict */
+ onError = pIdx->onError;
+ if( onError==OE_None ) continue; /* pIdx is not a UNIQUE index */
+ if( overrideError!=OE_Default ){
+ onError = overrideError;
+ }else if( pParse->db->onError!=OE_Default ){
+ onError = pParse->db->onError;
+ }else if( onError==OE_Default ){
+ onError = OE_Abort;
+ }
+ if( seenReplace ){
+ if( onError==OE_Ignore ) onError = OE_Replace;
+ else if( onError==OE_Fail ) onError = OE_Abort;
+ }
+
+
+ /* Check to see if the new index entry will be unique */
+ sqliteVdbeAddOp(v, OP_Dup, extra+nCol+1+hasTwoRecnos, 1);
+ jumpInst2 = sqliteVdbeAddOp(v, OP_IsUnique, base+iCur+1, 0);
+
+ /* Generate code that executes if the new index entry is not unique */
+ switch( onError ){
+ case OE_Rollback:
+ case OE_Abort:
+ case OE_Fail: {
+ int j, n1, n2;
+ char zErrMsg[200];
+ strcpy(zErrMsg, pIdx->nColumn>1 ? "columns " : "column ");
+ n1 = strlen(zErrMsg);
+ for(j=0; j<pIdx->nColumn && n1<sizeof(zErrMsg)-30; j++){
+ char *zCol = pTab->aCol[pIdx->aiColumn[j]].zName;
+ n2 = strlen(zCol);
+ if( j>0 ){
+ strcpy(&zErrMsg[n1], ", ");
+ n1 += 2;
+ }
+ if( n1+n2>sizeof(zErrMsg)-30 ){
+ strcpy(&zErrMsg[n1], "...");
+ n1 += 3;
+ break;
+ }else{
+ strcpy(&zErrMsg[n1], zCol);
+ n1 += n2;
+ }
+ }
+ strcpy(&zErrMsg[n1],
+ pIdx->nColumn>1 ? " are not unique" : " is not unique");
+ sqliteVdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, zErrMsg, 0);
+ break;
+ }
+ case OE_Ignore: {
+ assert( seenReplace==0 );
+ sqliteVdbeAddOp(v, OP_Pop, nCol+extra+3+hasTwoRecnos, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, ignoreDest);
+ break;
+ }
+ case OE_Replace: {
+ sqliteGenerateRowDelete(pParse->db, v, pTab, base, 0);
+ if( isUpdate ){
+ sqliteVdbeAddOp(v, OP_Dup, nCol+extra+1+hasTwoRecnos, 1);
+ sqliteVdbeAddOp(v, OP_MoveTo, base, 0);
+ }
+ seenReplace = 1;
+ break;
+ }
+ default: assert(0);
+ }
+ contAddr = sqliteVdbeCurrentAddr(v);
+#if NULL_DISTINCT_FOR_UNIQUE
+ sqliteVdbeChangeP2(v, jumpInst1, contAddr);
+#endif
+ sqliteVdbeChangeP2(v, jumpInst2, contAddr);
+ }
+}
+
+/*
+** This routine generates code to finish the INSERT or UPDATE operation
+** that was started by a prior call to sqliteGenerateConstraintChecks.
+** The stack must contain keys for all active indices followed by data
+** and the recno for the new entry. This routine creates the new
+** entries in all indices and in the main table.
+**
+** The arguments to this routine should be the same as the first six
+** arguments to sqliteGenerateConstraintChecks.
+*/
+void sqliteCompleteInsertion(
+ Parse *pParse, /* The parser context */
+ Table *pTab, /* the table into which we are inserting */
+ int base, /* Index of a read/write cursor pointing at pTab */
+ char *aIdxUsed, /* Which indices are used. NULL means all are used */
+ int recnoChng, /* True if the record number will change */
+ int isUpdate, /* True for UPDATE, False for INSERT */
+ int newIdx /* Index of NEW table for triggers. -1 if none */
+){
+ int i;
+ Vdbe *v;
+ int nIdx;
+ Index *pIdx;
+
+ v = sqliteGetVdbe(pParse);
+ assert( v!=0 );
+ assert( pTab->pSelect==0 ); /* This table is not a VIEW */
+ for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){}
+ for(i=nIdx-1; i>=0; i--){
+ if( aIdxUsed && aIdxUsed[i]==0 ) continue;
+ sqliteVdbeAddOp(v, OP_IdxPut, base+i+1, 0);
+ }
+ sqliteVdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0);
+ if( newIdx>=0 ){
+ sqliteVdbeAddOp(v, OP_Dup, 1, 0);
+ sqliteVdbeAddOp(v, OP_Dup, 1, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, newIdx, 0);
+ }
+ sqliteVdbeAddOp(v, OP_PutIntKey, base,
+ (pParse->trigStack?0:OPFLAG_NCHANGE) |
+ (isUpdate?0:OPFLAG_LASTROWID) | OPFLAG_CSCHANGE);
+ if( isUpdate && recnoChng ){
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ }
+}
+
+/*
+** Generate code that will open write cursors for a table and for all
+** indices of that table. The "base" parameter is the cursor number used
+** for the table. Indices are opened on subsequent cursors.
+**
+** Return the total number of cursors opened. This is always at least
+** 1 (for the main table) plus more for each cursor.
+*/
+int sqliteOpenTableAndIndices(Parse *pParse, Table *pTab, int base){
+ int i;
+ Index *pIdx;
+ Vdbe *v = sqliteGetVdbe(pParse);
+ assert( v!=0 );
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenWrite, base, pTab->tnum, pTab->zName, P3_STATIC);
+ for(i=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
+ sqliteVdbeAddOp(v, OP_Integer, pIdx->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenWrite, i+base, pIdx->tnum, pIdx->zName, P3_STATIC);
+ }
+ return i;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/main.c b/usr/src/cmd/svc/configd/sqlite/src/main.c
new file mode 100644
index 0000000000..41e7afa4dc
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/main.c
@@ -0,0 +1,1146 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Main file for the SQLite library. The routines in this file
+** implement the programmer interface to the library. Routines in
+** other files are for internal use by SQLite and should not be
+** accessed by users of the library.
+**
+** $Id: main.c,v 1.164.2.2 2004/06/26 14:40:05 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "os.h"
+#include <ctype.h>
+
+/*
+** A pointer to this structure is used to communicate information
+** from sqliteInit into the sqliteInitCallback.
+*/
+typedef struct {
+ sqlite *db; /* The database being initialized */
+ char **pzErrMsg; /* Error message stored here */
+} InitData;
+
+/*
+** Fill the InitData structure with an error message that indicates
+** that the database is corrupt.
+*/
+static void corruptSchema(InitData *pData, const char *zExtra){
+ sqliteSetString(pData->pzErrMsg, "malformed database schema",
+ zExtra!=0 && zExtra[0]!=0 ? " - " : (char*)0, zExtra, (char*)0);
+}
+
+/*
+** This is the callback routine for the code that initializes the
+** database. See sqliteInit() below for additional information.
+**
+** Each callback contains the following information:
+**
+** argv[0] = "file-format" or "schema-cookie" or "table" or "index"
+** argv[1] = table or index name or meta statement type.
+** argv[2] = root page number for table or index. NULL for meta.
+** argv[3] = SQL text for a CREATE TABLE or CREATE INDEX statement.
+** argv[4] = "1" for temporary files, "0" for main database, "2" or more
+** for auxiliary database files.
+**
+*/
+static
+int sqliteInitCallback(void *pInit, int argc, char **argv, char **azColName){
+ InitData *pData = (InitData*)pInit;
+ int nErr = 0;
+
+ assert( argc==5 );
+ if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */
+ if( argv[0]==0 ){
+ corruptSchema(pData, 0);
+ return 1;
+ }
+ switch( argv[0][0] ){
+ case 'v':
+ case 'i':
+ case 't': { /* CREATE TABLE, CREATE INDEX, or CREATE VIEW statements */
+ sqlite *db = pData->db;
+ if( argv[2]==0 || argv[4]==0 ){
+ corruptSchema(pData, 0);
+ return 1;
+ }
+ if( argv[3] && argv[3][0] ){
+ /* Call the parser to process a CREATE TABLE, INDEX or VIEW.
+ ** But because db->init.busy is set to 1, no VDBE code is generated
+ ** or executed. All the parser does is build the internal data
+ ** structures that describe the table, index, or view.
+ */
+ char *zErr;
+ assert( db->init.busy );
+ db->init.iDb = atoi(argv[4]);
+ assert( db->init.iDb>=0 && db->init.iDb<db->nDb );
+ db->init.newTnum = atoi(argv[2]);
+ if( sqlite_exec(db, argv[3], 0, 0, &zErr) ){
+ corruptSchema(pData, zErr);
+ sqlite_freemem(zErr);
+ }
+ db->init.iDb = 0;
+ }else{
+ /* If the SQL column is blank it means this is an index that
+ ** was created to be the PRIMARY KEY or to fulfill a UNIQUE
+ ** constraint for a CREATE TABLE. The index should have already
+ ** been created when we processed the CREATE TABLE. All we have
+ ** to do here is record the root page number for that index.
+ */
+ int iDb;
+ Index *pIndex;
+
+ iDb = atoi(argv[4]);
+ assert( iDb>=0 && iDb<db->nDb );
+ pIndex = sqliteFindIndex(db, argv[1], db->aDb[iDb].zName);
+ if( pIndex==0 || pIndex->tnum!=0 ){
+ /* This can occur if there exists an index on a TEMP table which
+ ** has the same name as another index on a permanent index. Since
+ ** the permanent table is hidden by the TEMP table, we can also
+ ** safely ignore the index on the permanent table.
+ */
+ /* Do Nothing */;
+ }else{
+ pIndex->tnum = atoi(argv[2]);
+ }
+ }
+ break;
+ }
+ default: {
+ /* This can not happen! */
+ nErr = 1;
+ assert( nErr==0 );
+ }
+ }
+ return nErr;
+}
+
+/*
+** This is a callback procedure used to reconstruct a table. The
+** name of the table to be reconstructed is passed in as argv[0].
+**
+** This routine is used to automatically upgrade a database from
+** format version 1 or 2 to version 3. The correct operation of
+** this routine relys on the fact that no indices are used when
+** copying a table out to a temporary file.
+**
+** The change from version 2 to version 3 occurred between SQLite
+** version 2.5.6 and 2.6.0 on 2002-July-18.
+*/
+static
+int upgrade_3_callback(void *pInit, int argc, char **argv, char **NotUsed){
+ InitData *pData = (InitData*)pInit;
+ int rc;
+ Table *pTab;
+ Trigger *pTrig;
+ char *zErr = 0;
+
+ pTab = sqliteFindTable(pData->db, argv[0], 0);
+ assert( pTab!=0 );
+ assert( sqliteStrICmp(pTab->zName, argv[0])==0 );
+ if( pTab ){
+ pTrig = pTab->pTrigger;
+ pTab->pTrigger = 0; /* Disable all triggers before rebuilding the table */
+ }
+ rc = sqlite_exec_printf(pData->db,
+ "CREATE TEMP TABLE sqlite_x AS SELECT * FROM '%q'; "
+ "DELETE FROM '%q'; "
+ "INSERT INTO '%q' SELECT * FROM sqlite_x; "
+ "DROP TABLE sqlite_x;",
+ 0, 0, &zErr, argv[0], argv[0], argv[0]);
+ if( zErr ){
+ if( *pData->pzErrMsg ) sqlite_freemem(*pData->pzErrMsg);
+ *pData->pzErrMsg = zErr;
+ }
+
+ /* If an error occurred in the SQL above, then the transaction will
+ ** rollback which will delete the internal symbol tables. This will
+ ** cause the structure that pTab points to be deleted. In case that
+ ** happened, we need to refetch pTab.
+ */
+ pTab = sqliteFindTable(pData->db, argv[0], 0);
+ if( pTab ){
+ assert( sqliteStrICmp(pTab->zName, argv[0])==0 );
+ pTab->pTrigger = pTrig; /* Re-enable triggers */
+ }
+ return rc!=SQLITE_OK;
+}
+
+
+
+/*
+** Attempt to read the database schema and initialize internal
+** data structures for a single database file. The index of the
+** database file is given by iDb. iDb==0 is used for the main
+** database. iDb==1 should never be used. iDb>=2 is used for
+** auxiliary databases. Return one of the SQLITE_ error codes to
+** indicate success or failure.
+*/
+static int sqliteInitOne(sqlite *db, int iDb, char **pzErrMsg){
+ int rc;
+ BtCursor *curMain;
+ int size;
+ Table *pTab;
+ char const *azArg[6];
+ char zDbNum[30];
+ int meta[SQLITE_N_BTREE_META];
+ InitData initData;
+ char const *zMasterSchema;
+ char const *zMasterName;
+ char *zSql = 0;
+
+ /*
+ ** The master database table has a structure like this
+ */
+ static char master_schema[] =
+ "CREATE TABLE sqlite_master(\n"
+ " type text,\n"
+ " name text,\n"
+ " tbl_name text,\n"
+ " rootpage integer,\n"
+ " sql text\n"
+ ")"
+ ;
+ static char temp_master_schema[] =
+ "CREATE TEMP TABLE sqlite_temp_master(\n"
+ " type text,\n"
+ " name text,\n"
+ " tbl_name text,\n"
+ " rootpage integer,\n"
+ " sql text\n"
+ ")"
+ ;
+
+ assert( iDb>=0 && iDb<db->nDb );
+
+ /* zMasterSchema and zInitScript are set to point at the master schema
+ ** and initialisation script appropriate for the database being
+ ** initialised. zMasterName is the name of the master table.
+ */
+ if( iDb==1 ){
+ zMasterSchema = temp_master_schema;
+ zMasterName = TEMP_MASTER_NAME;
+ }else{
+ zMasterSchema = master_schema;
+ zMasterName = MASTER_NAME;
+ }
+
+ /* Construct the schema table.
+ */
+ sqliteSafetyOff(db);
+ azArg[0] = "table";
+ azArg[1] = zMasterName;
+ azArg[2] = "2";
+ azArg[3] = zMasterSchema;
+ sprintf(zDbNum, "%d", iDb);
+ azArg[4] = zDbNum;
+ azArg[5] = 0;
+ initData.db = db;
+ initData.pzErrMsg = pzErrMsg;
+ sqliteInitCallback(&initData, 5, (char **)azArg, 0);
+ pTab = sqliteFindTable(db, zMasterName, db->aDb[iDb].zName);
+ if( pTab ){
+ pTab->readOnly = 1;
+ }else{
+ return SQLITE_NOMEM;
+ }
+ sqliteSafetyOn(db);
+
+ /* Create a cursor to hold the database open
+ */
+ if( db->aDb[iDb].pBt==0 ) return SQLITE_OK;
+ rc = sqliteBtreeCursor(db->aDb[iDb].pBt, 2, 0, &curMain);
+ if( rc ){
+ sqliteSetString(pzErrMsg, sqlite_error_string(rc), (char*)0);
+ return rc;
+ }
+
+ /* Get the database meta information
+ */
+ rc = sqliteBtreeGetMeta(db->aDb[iDb].pBt, meta);
+ if( rc ){
+ sqliteSetString(pzErrMsg, sqlite_error_string(rc), (char*)0);
+ sqliteBtreeCloseCursor(curMain);
+ return rc;
+ }
+ db->aDb[iDb].schema_cookie = meta[1];
+ if( iDb==0 ){
+ db->next_cookie = meta[1];
+ db->file_format = meta[2];
+ size = meta[3];
+ if( size==0 ){ size = MAX_PAGES; }
+ db->cache_size = size;
+ db->safety_level = meta[4];
+ if( meta[6]>0 && meta[6]<=2 && db->temp_store==0 ){
+ db->temp_store = meta[6];
+ }
+ if( db->safety_level==0 ) db->safety_level = 2;
+
+ /*
+ ** file_format==1 Version 2.1.0.
+ ** file_format==2 Version 2.2.0. Add support for INTEGER PRIMARY KEY.
+ ** file_format==3 Version 2.6.0. Fix empty-string index bug.
+ ** file_format==4 Version 2.7.0. Add support for separate numeric and
+ ** text datatypes.
+ */
+ if( db->file_format==0 ){
+ /* This happens if the database was initially empty */
+ db->file_format = 4;
+ }else if( db->file_format>4 ){
+ sqliteBtreeCloseCursor(curMain);
+ sqliteSetString(pzErrMsg, "unsupported file format", (char*)0);
+ return SQLITE_ERROR;
+ }
+ }else if( iDb!=1 && (db->file_format!=meta[2] || db->file_format<4) ){
+ assert( db->file_format>=4 );
+ if( meta[2]==0 ){
+ sqliteSetString(pzErrMsg, "cannot attach empty database: ",
+ db->aDb[iDb].zName, (char*)0);
+ }else{
+ sqliteSetString(pzErrMsg, "incompatible file format in auxiliary "
+ "database: ", db->aDb[iDb].zName, (char*)0);
+ }
+ sqliteBtreeClose(db->aDb[iDb].pBt);
+ db->aDb[iDb].pBt = 0;
+ return SQLITE_FORMAT;
+ }
+ sqliteBtreeSetCacheSize(db->aDb[iDb].pBt, db->cache_size);
+ sqliteBtreeSetSafetyLevel(db->aDb[iDb].pBt, meta[4]==0 ? 2 : meta[4]);
+
+ /* Read the schema information out of the schema tables
+ */
+ assert( db->init.busy );
+ sqliteSafetyOff(db);
+
+ /* The following SQL will read the schema from the master tables.
+ ** The first version works with SQLite file formats 2 or greater.
+ ** The second version is for format 1 files.
+ **
+ ** Beginning with file format 2, the rowid for new table entries
+ ** (including entries in sqlite_master) is an increasing integer.
+ ** So for file format 2 and later, we can play back sqlite_master
+ ** and all the CREATE statements will appear in the right order.
+ ** But with file format 1, table entries were random and so we
+ ** have to make sure the CREATE TABLEs occur before their corresponding
+ ** CREATE INDEXs. (We don't have to deal with CREATE VIEW or
+ ** CREATE TRIGGER in file format 1 because those constructs did
+ ** not exist then.)
+ */
+ if( db->file_format>=2 ){
+ sqliteSetString(&zSql,
+ "SELECT type, name, rootpage, sql, ", zDbNum, " FROM \"",
+ db->aDb[iDb].zName, "\".", zMasterName, (char*)0);
+ }else{
+ sqliteSetString(&zSql,
+ "SELECT type, name, rootpage, sql, ", zDbNum, " FROM \"",
+ db->aDb[iDb].zName, "\".", zMasterName,
+ " WHERE type IN ('table', 'index')"
+ " ORDER BY CASE type WHEN 'table' THEN 0 ELSE 1 END", (char*)0);
+ }
+ rc = sqlite_exec(db, zSql, sqliteInitCallback, &initData, 0);
+
+ sqliteFree(zSql);
+ sqliteSafetyOn(db);
+ sqliteBtreeCloseCursor(curMain);
+ if( sqlite_malloc_failed ){
+ sqliteSetString(pzErrMsg, "out of memory", (char*)0);
+ rc = SQLITE_NOMEM;
+ sqliteResetInternalSchema(db, 0);
+ }
+ if( rc==SQLITE_OK ){
+ DbSetProperty(db, iDb, DB_SchemaLoaded);
+ }else{
+ sqliteResetInternalSchema(db, iDb);
+ }
+ return rc;
+}
+
+/*
+** Initialize all database files - the main database file, the file
+** used to store temporary tables, and any additional database files
+** created using ATTACH statements. Return a success code. If an
+** error occurs, write an error message into *pzErrMsg.
+**
+** After the database is initialized, the SQLITE_Initialized
+** bit is set in the flags field of the sqlite structure. An
+** attempt is made to initialize the database as soon as it
+** is opened. If that fails (perhaps because another process
+** has the sqlite_master table locked) than another attempt
+** is made the first time the database is accessed.
+*/
+int sqliteInit(sqlite *db, char **pzErrMsg){
+ int i, rc;
+
+ if( db->init.busy ) return SQLITE_OK;
+ assert( (db->flags & SQLITE_Initialized)==0 );
+ rc = SQLITE_OK;
+ db->init.busy = 1;
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue;
+ rc = sqliteInitOne(db, i, pzErrMsg);
+ if( rc ){
+ sqliteResetInternalSchema(db, i);
+ }
+ }
+
+ /* Once all the other databases have been initialised, load the schema
+ ** for the TEMP database. This is loaded last, as the TEMP database
+ ** schema may contain references to objects in other databases.
+ */
+ if( rc==SQLITE_OK && db->nDb>1 && !DbHasProperty(db, 1, DB_SchemaLoaded) ){
+ rc = sqliteInitOne(db, 1, pzErrMsg);
+ if( rc ){
+ sqliteResetInternalSchema(db, 1);
+ }
+ }
+
+ db->init.busy = 0;
+ if( rc==SQLITE_OK ){
+ db->flags |= SQLITE_Initialized;
+ sqliteCommitInternalChanges(db);
+ }
+
+ /* If the database is in formats 1 or 2, then upgrade it to
+ ** version 3. This will reconstruct all indices. If the
+ ** upgrade fails for any reason (ex: out of disk space, database
+ ** is read only, interrupt received, etc.) then fail the init.
+ */
+ if( rc==SQLITE_OK && db->file_format<3 ){
+ char *zErr = 0;
+ InitData initData;
+ int meta[SQLITE_N_BTREE_META];
+
+ db->magic = SQLITE_MAGIC_OPEN;
+ initData.db = db;
+ initData.pzErrMsg = &zErr;
+ db->file_format = 3;
+ rc = sqlite_exec(db,
+ "BEGIN; SELECT name FROM sqlite_master WHERE type='table';",
+ upgrade_3_callback,
+ &initData,
+ &zErr);
+ if( rc==SQLITE_OK ){
+ sqliteBtreeGetMeta(db->aDb[0].pBt, meta);
+ meta[2] = 4;
+ sqliteBtreeUpdateMeta(db->aDb[0].pBt, meta);
+ sqlite_exec(db, "COMMIT", 0, 0, 0);
+ }
+ if( rc!=SQLITE_OK ){
+ sqliteSetString(pzErrMsg,
+ "unable to upgrade database to the version 2.6 format",
+ zErr ? ": " : 0, zErr, (char*)0);
+ }
+ sqlite_freemem(zErr);
+ }
+
+ if( rc!=SQLITE_OK ){
+ db->flags &= ~SQLITE_Initialized;
+ }
+ return rc;
+}
+
+/*
+** The version of the library
+*/
+const char rcsid[] = "@(#) \044Id: SQLite version " SQLITE_VERSION " $";
+const char sqlite_version[] = SQLITE_VERSION;
+
+/*
+** Does the library expect data to be encoded as UTF-8 or iso8859? The
+** following global constant always lets us know.
+*/
+#ifdef SQLITE_UTF8
+const char sqlite_encoding[] = "UTF-8";
+#else
+const char sqlite_encoding[] = "iso8859";
+#endif
+
+/*
+** Open a new SQLite database. Construct an "sqlite" structure to define
+** the state of this database and return a pointer to that structure.
+**
+** An attempt is made to initialize the in-memory data structures that
+** hold the database schema. But if this fails (because the schema file
+** is locked) then that step is deferred until the first call to
+** sqlite_exec().
+*/
+sqlite *sqlite_open(const char *zFilename, int mode, char **pzErrMsg){
+ sqlite *db;
+ int rc, i;
+
+ /* Allocate the sqlite data structure */
+ db = sqliteMalloc( sizeof(sqlite) );
+ if( pzErrMsg ) *pzErrMsg = 0;
+ if( db==0 ) goto no_mem_on_open;
+ db->onError = OE_Default;
+ db->priorNewRowid = 0;
+ db->magic = SQLITE_MAGIC_BUSY;
+ db->nDb = 2;
+ db->aDb = db->aDbStatic;
+ /* db->flags |= SQLITE_ShortColNames; */
+ sqliteHashInit(&db->aFunc, SQLITE_HASH_STRING, 1);
+ for(i=0; i<db->nDb; i++){
+ sqliteHashInit(&db->aDb[i].tblHash, SQLITE_HASH_STRING, 0);
+ sqliteHashInit(&db->aDb[i].idxHash, SQLITE_HASH_STRING, 0);
+ sqliteHashInit(&db->aDb[i].trigHash, SQLITE_HASH_STRING, 0);
+ sqliteHashInit(&db->aDb[i].aFKey, SQLITE_HASH_STRING, 1);
+ }
+
+ /* Open the backend database driver */
+ if( zFilename[0]==':' && strcmp(zFilename,":memory:")==0 ){
+ db->temp_store = 2;
+ }
+ rc = sqliteBtreeFactory(db, zFilename, 0, MAX_PAGES, &db->aDb[0].pBt);
+ if( rc!=SQLITE_OK ){
+ switch( rc ){
+ default: {
+ sqliteSetString(pzErrMsg, "unable to open database: ",
+ zFilename, (char*)0);
+ }
+ }
+ sqliteFree(db);
+ sqliteStrRealloc(pzErrMsg);
+ return 0;
+ }
+ db->aDb[0].zName = "main";
+ db->aDb[1].zName = "temp";
+
+ /* Attempt to read the schema */
+ sqliteRegisterBuiltinFunctions(db);
+ rc = sqliteInit(db, pzErrMsg);
+ db->magic = SQLITE_MAGIC_OPEN;
+ if( sqlite_malloc_failed ){
+ sqlite_close(db);
+ goto no_mem_on_open;
+ }else if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
+ sqlite_close(db);
+ sqliteStrRealloc(pzErrMsg);
+ return 0;
+ }else if( pzErrMsg ){
+ sqliteFree(*pzErrMsg);
+ *pzErrMsg = 0;
+ }
+
+ /* Return a pointer to the newly opened database structure */
+ return db;
+
+no_mem_on_open:
+ sqliteSetString(pzErrMsg, "out of memory", (char*)0);
+ sqliteStrRealloc(pzErrMsg);
+ return 0;
+}
+
+/*
+** Return the ROWID of the most recent insert
+*/
+int sqlite_last_insert_rowid(sqlite *db){
+ return db->lastRowid;
+}
+
+/*
+** Return the number of changes in the most recent call to sqlite_exec().
+*/
+int sqlite_changes(sqlite *db){
+ return db->nChange;
+}
+
+/*
+** Return the number of changes produced by the last INSERT, UPDATE, or
+** DELETE statement to complete execution. The count does not include
+** changes due to SQL statements executed in trigger programs that were
+** triggered by that statement
+*/
+int sqlite_last_statement_changes(sqlite *db){
+ return db->lsChange;
+}
+
+/*
+** Close an existing SQLite database
+*/
+void sqlite_close(sqlite *db){
+ HashElem *i;
+ int j;
+ db->want_to_close = 1;
+ if( sqliteSafetyCheck(db) || sqliteSafetyOn(db) ){
+ /* printf("DID NOT CLOSE\n"); fflush(stdout); */
+ return;
+ }
+ db->magic = SQLITE_MAGIC_CLOSED;
+ for(j=0; j<db->nDb; j++){
+ struct Db *pDb = &db->aDb[j];
+ if( pDb->pBt ){
+ sqliteBtreeClose(pDb->pBt);
+ pDb->pBt = 0;
+ }
+ }
+ sqliteResetInternalSchema(db, 0);
+ assert( db->nDb<=2 );
+ assert( db->aDb==db->aDbStatic );
+ for(i=sqliteHashFirst(&db->aFunc); i; i=sqliteHashNext(i)){
+ FuncDef *pFunc, *pNext;
+ for(pFunc = (FuncDef*)sqliteHashData(i); pFunc; pFunc=pNext){
+ pNext = pFunc->pNext;
+ sqliteFree(pFunc);
+ }
+ }
+ sqliteHashClear(&db->aFunc);
+ sqliteFree(db);
+}
+
+/*
+** Rollback all database files.
+*/
+void sqliteRollbackAll(sqlite *db){
+ int i;
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].pBt ){
+ sqliteBtreeRollback(db->aDb[i].pBt);
+ db->aDb[i].inTrans = 0;
+ }
+ }
+ sqliteResetInternalSchema(db, 0);
+ /* sqliteRollbackInternalChanges(db); */
+}
+
+/*
+** Execute SQL code. Return one of the SQLITE_ success/failure
+** codes. Also write an error message into memory obtained from
+** malloc() and make *pzErrMsg point to that message.
+**
+** If the SQL is a query, then for each row in the query result
+** the xCallback() function is called. pArg becomes the first
+** argument to xCallback(). If xCallback=NULL then no callback
+** is invoked, even for queries.
+*/
+int sqlite_exec(
+ sqlite *db, /* The database on which the SQL executes */
+ const char *zSql, /* The SQL to be executed */
+ sqlite_callback xCallback, /* Invoke this callback routine */
+ void *pArg, /* First argument to xCallback() */
+ char **pzErrMsg /* Write error messages here */
+){
+ int rc = SQLITE_OK;
+ const char *zLeftover;
+ sqlite_vm *pVm;
+ int nRetry = 0;
+ int nChange = 0;
+ int nCallback;
+
+ if( zSql==0 ) return SQLITE_OK;
+ while( rc==SQLITE_OK && zSql[0] ){
+ pVm = 0;
+ rc = sqlite_compile(db, zSql, &zLeftover, &pVm, pzErrMsg);
+ if( rc!=SQLITE_OK ){
+ assert( pVm==0 || sqlite_malloc_failed );
+ return rc;
+ }
+ if( pVm==0 ){
+ /* This happens if the zSql input contained only whitespace */
+ break;
+ }
+ db->nChange += nChange;
+ nCallback = 0;
+ while(1){
+ int nArg;
+ char **azArg, **azCol;
+ rc = sqlite_step(pVm, &nArg, (const char***)&azArg,(const char***)&azCol);
+ if( rc==SQLITE_ROW ){
+ if( xCallback!=0 && xCallback(pArg, nArg, azArg, azCol) ){
+ sqlite_finalize(pVm, 0);
+ return SQLITE_ABORT;
+ }
+ nCallback++;
+ }else{
+ if( rc==SQLITE_DONE && nCallback==0
+ && (db->flags & SQLITE_NullCallback)!=0 && xCallback!=0 ){
+ xCallback(pArg, nArg, azArg, azCol);
+ }
+ rc = sqlite_finalize(pVm, pzErrMsg);
+ if( rc==SQLITE_SCHEMA && nRetry<2 ){
+ nRetry++;
+ rc = SQLITE_OK;
+ break;
+ }
+ if( db->pVdbe==0 ){
+ nChange = db->nChange;
+ }
+ nRetry = 0;
+ zSql = zLeftover;
+ while( isspace(zSql[0]) ) zSql++;
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+
+/*
+** Compile a single statement of SQL into a virtual machine. Return one
+** of the SQLITE_ success/failure codes. Also write an error message into
+** memory obtained from malloc() and make *pzErrMsg point to that message.
+*/
+int sqlite_compile(
+ sqlite *db, /* The database on which the SQL executes */
+ const char *zSql, /* The SQL to be executed */
+ const char **pzTail, /* OUT: Next statement after the first */
+ sqlite_vm **ppVm, /* OUT: The virtual machine */
+ char **pzErrMsg /* OUT: Write error messages here */
+){
+ Parse sParse;
+
+ if( pzErrMsg ) *pzErrMsg = 0;
+ if( sqliteSafetyOn(db) ) goto exec_misuse;
+ if( !db->init.busy ){
+ if( (db->flags & SQLITE_Initialized)==0 ){
+ int rc, cnt = 1;
+ while( (rc = sqliteInit(db, pzErrMsg))==SQLITE_BUSY
+ && db->xBusyCallback
+ && db->xBusyCallback(db->pBusyArg, "", cnt++)!=0 ){}
+ if( rc!=SQLITE_OK ){
+ sqliteStrRealloc(pzErrMsg);
+ sqliteSafetyOff(db);
+ return rc;
+ }
+ if( pzErrMsg ){
+ sqliteFree(*pzErrMsg);
+ *pzErrMsg = 0;
+ }
+ }
+ if( db->file_format<3 ){
+ sqliteSafetyOff(db);
+ sqliteSetString(pzErrMsg, "obsolete database file format", (char*)0);
+ return SQLITE_ERROR;
+ }
+ }
+ assert( (db->flags & SQLITE_Initialized)!=0 || db->init.busy );
+ if( db->pVdbe==0 ){ db->nChange = 0; }
+ memset(&sParse, 0, sizeof(sParse));
+ sParse.db = db;
+ sqliteRunParser(&sParse, zSql, pzErrMsg);
+ if( db->xTrace && !db->init.busy ){
+ /* Trace only the statment that was compiled.
+ ** Make a copy of that part of the SQL string since zSQL is const
+ ** and we must pass a zero terminated string to the trace function
+ ** The copy is unnecessary if the tail pointer is pointing at the
+ ** beginnig or end of the SQL string.
+ */
+ if( sParse.zTail && sParse.zTail!=zSql && *sParse.zTail ){
+ char *tmpSql = sqliteStrNDup(zSql, sParse.zTail - zSql);
+ if( tmpSql ){
+ db->xTrace(db->pTraceArg, tmpSql);
+ free(tmpSql);
+ }else{
+ /* If a memory error occurred during the copy,
+ ** trace entire SQL string and fall through to the
+ ** sqlite_malloc_failed test to report the error.
+ */
+ db->xTrace(db->pTraceArg, zSql);
+ }
+ }else{
+ db->xTrace(db->pTraceArg, zSql);
+ }
+ }
+ if( sqlite_malloc_failed ){
+ sqliteSetString(pzErrMsg, "out of memory", (char*)0);
+ sParse.rc = SQLITE_NOMEM;
+ sqliteRollbackAll(db);
+ sqliteResetInternalSchema(db, 0);
+ db->flags &= ~SQLITE_InTrans;
+ }
+ if( sParse.rc==SQLITE_DONE ) sParse.rc = SQLITE_OK;
+ if( sParse.rc!=SQLITE_OK && pzErrMsg && *pzErrMsg==0 ){
+ sqliteSetString(pzErrMsg, sqlite_error_string(sParse.rc), (char*)0);
+ }
+ sqliteStrRealloc(pzErrMsg);
+ if( sParse.rc==SQLITE_SCHEMA ){
+ sqliteResetInternalSchema(db, 0);
+ }
+ assert( ppVm );
+ *ppVm = (sqlite_vm*)sParse.pVdbe;
+ if( pzTail ) *pzTail = sParse.zTail;
+ if( sqliteSafetyOff(db) ) goto exec_misuse;
+ return sParse.rc;
+
+exec_misuse:
+ if( pzErrMsg ){
+ *pzErrMsg = 0;
+ sqliteSetString(pzErrMsg, sqlite_error_string(SQLITE_MISUSE), (char*)0);
+ sqliteStrRealloc(pzErrMsg);
+ }
+ return SQLITE_MISUSE;
+}
+
+
+/*
+** The following routine destroys a virtual machine that is created by
+** the sqlite_compile() routine.
+**
+** The integer returned is an SQLITE_ success/failure code that describes
+** the result of executing the virtual machine. An error message is
+** written into memory obtained from malloc and *pzErrMsg is made to
+** point to that error if pzErrMsg is not NULL. The calling routine
+** should use sqlite_freemem() to delete the message when it has finished
+** with it.
+*/
+int sqlite_finalize(
+ sqlite_vm *pVm, /* The virtual machine to be destroyed */
+ char **pzErrMsg /* OUT: Write error messages here */
+){
+ int rc = sqliteVdbeFinalize((Vdbe*)pVm, pzErrMsg);
+ sqliteStrRealloc(pzErrMsg);
+ return rc;
+}
+
+/*
+** Terminate the current execution of a virtual machine then
+** reset the virtual machine back to its starting state so that it
+** can be reused. Any error message resulting from the prior execution
+** is written into *pzErrMsg. A success code from the prior execution
+** is returned.
+*/
+int sqlite_reset(
+ sqlite_vm *pVm, /* The virtual machine to be destroyed */
+ char **pzErrMsg /* OUT: Write error messages here */
+){
+ int rc = sqliteVdbeReset((Vdbe*)pVm, pzErrMsg);
+ sqliteVdbeMakeReady((Vdbe*)pVm, -1, 0);
+ sqliteStrRealloc(pzErrMsg);
+ return rc;
+}
+
+/*
+** Return a static string that describes the kind of error specified in the
+** argument.
+*/
+const char *sqlite_error_string(int rc){
+ const char *z;
+ switch( rc ){
+ case SQLITE_OK: z = "not an error"; break;
+ case SQLITE_ERROR: z = "SQL logic error or missing database"; break;
+ case SQLITE_INTERNAL: z = "internal SQLite implementation flaw"; break;
+ case SQLITE_PERM: z = "access permission denied"; break;
+ case SQLITE_ABORT: z = "callback requested query abort"; break;
+ case SQLITE_BUSY: z = "database is locked"; break;
+ case SQLITE_LOCKED: z = "database table is locked"; break;
+ case SQLITE_NOMEM: z = "out of memory"; break;
+ case SQLITE_READONLY: z = "attempt to write a readonly database"; break;
+ case SQLITE_INTERRUPT: z = "interrupted"; break;
+ case SQLITE_IOERR: z = "disk I/O error"; break;
+ case SQLITE_CORRUPT: z = "database disk image is malformed"; break;
+ case SQLITE_NOTFOUND: z = "table or record not found"; break;
+ case SQLITE_FULL: z = "database is full"; break;
+ case SQLITE_CANTOPEN: z = "unable to open database file"; break;
+ case SQLITE_PROTOCOL: z = "database locking protocol failure"; break;
+ case SQLITE_EMPTY: z = "table contains no data"; break;
+ case SQLITE_SCHEMA: z = "database schema has changed"; break;
+ case SQLITE_TOOBIG: z = "too much data for one table row"; break;
+ case SQLITE_CONSTRAINT: z = "constraint failed"; break;
+ case SQLITE_MISMATCH: z = "datatype mismatch"; break;
+ case SQLITE_MISUSE: z = "library routine called out of sequence";break;
+ case SQLITE_NOLFS: z = "kernel lacks large file support"; break;
+ case SQLITE_AUTH: z = "authorization denied"; break;
+ case SQLITE_FORMAT: z = "auxiliary database format error"; break;
+ case SQLITE_RANGE: z = "bind index out of range"; break;
+ case SQLITE_NOTADB: z = "file is encrypted or is not a database";break;
+ default: z = "unknown error"; break;
+ }
+ return z;
+}
+
+/*
+** This routine implements a busy callback that sleeps and tries
+** again until a timeout value is reached. The timeout value is
+** an integer number of milliseconds passed in as the first
+** argument.
+*/
+static int sqliteDefaultBusyCallback(
+ void *Timeout, /* Maximum amount of time to wait */
+ const char *NotUsed, /* The name of the table that is busy */
+ int count /* Number of times table has been busy */
+){
+#if SQLITE_MIN_SLEEP_MS==1
+ static const char delays[] =
+ { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 50, 100};
+ static const short int totals[] =
+ { 0, 1, 3, 8, 18, 33, 53, 78, 103, 128, 178, 228, 287};
+# define NDELAY (sizeof(delays)/sizeof(delays[0]))
+ int timeout = (int)(long)Timeout;
+ int delay, prior;
+
+ if( count <= NDELAY ){
+ delay = delays[count-1];
+ prior = totals[count-1];
+ }else{
+ delay = delays[NDELAY-1];
+ prior = totals[NDELAY-1] + delay*(count-NDELAY-1);
+ }
+ if( prior + delay > timeout ){
+ delay = timeout - prior;
+ if( delay<=0 ) return 0;
+ }
+ sqliteOsSleep(delay);
+ return 1;
+#else
+ int timeout = (int)(long)Timeout;
+ if( (count+1)*1000 > timeout ){
+ return 0;
+ }
+ sqliteOsSleep(1000);
+ return 1;
+#endif
+}
+
+/*
+** This routine sets the busy callback for an Sqlite database to the
+** given callback function with the given argument.
+*/
+void sqlite_busy_handler(
+ sqlite *db,
+ int (*xBusy)(void*,const char*,int),
+ void *pArg
+){
+ db->xBusyCallback = xBusy;
+ db->pBusyArg = pArg;
+}
+
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+/*
+** This routine sets the progress callback for an Sqlite database to the
+** given callback function with the given argument. The progress callback will
+** be invoked every nOps opcodes.
+*/
+void sqlite_progress_handler(
+ sqlite *db,
+ int nOps,
+ int (*xProgress)(void*),
+ void *pArg
+){
+ if( nOps>0 ){
+ db->xProgress = xProgress;
+ db->nProgressOps = nOps;
+ db->pProgressArg = pArg;
+ }else{
+ db->xProgress = 0;
+ db->nProgressOps = 0;
+ db->pProgressArg = 0;
+ }
+}
+#endif
+
+
+/*
+** This routine installs a default busy handler that waits for the
+** specified number of milliseconds before returning 0.
+*/
+void sqlite_busy_timeout(sqlite *db, int ms){
+ if( ms>0 ){
+ sqlite_busy_handler(db, sqliteDefaultBusyCallback, (void*)(long)ms);
+ }else{
+ sqlite_busy_handler(db, 0, 0);
+ }
+}
+
+/*
+** Cause any pending operation to stop at its earliest opportunity.
+*/
+void sqlite_interrupt(sqlite *db){
+ db->flags |= SQLITE_Interrupt;
+}
+
+/*
+** Windows systems should call this routine to free memory that
+** is returned in the in the errmsg parameter of sqlite_open() when
+** SQLite is a DLL. For some reason, it does not work to call free()
+** directly.
+**
+** Note that we need to call free() not sqliteFree() here, since every
+** string that is exported from SQLite should have already passed through
+** sqliteStrRealloc().
+*/
+void sqlite_freemem(void *p){ free(p); }
+
+/*
+** Windows systems need functions to call to return the sqlite_version
+** and sqlite_encoding strings since they are unable to access constants
+** within DLLs.
+*/
+const char *sqlite_libversion(void){ return sqlite_version; }
+const char *sqlite_libencoding(void){ return sqlite_encoding; }
+
+/*
+** Create new user-defined functions. The sqlite_create_function()
+** routine creates a regular function and sqlite_create_aggregate()
+** creates an aggregate function.
+**
+** Passing a NULL xFunc argument or NULL xStep and xFinalize arguments
+** disables the function. Calling sqlite_create_function() with the
+** same name and number of arguments as a prior call to
+** sqlite_create_aggregate() disables the prior call to
+** sqlite_create_aggregate(), and vice versa.
+**
+** If nArg is -1 it means that this function will accept any number
+** of arguments, including 0. The maximum allowed value of nArg is 127.
+*/
+int sqlite_create_function(
+ sqlite *db, /* Add the function to this database connection */
+ const char *zName, /* Name of the function to add */
+ int nArg, /* Number of arguments */
+ void (*xFunc)(sqlite_func*,int,const char**), /* The implementation */
+ void *pUserData /* User data */
+){
+ FuncDef *p;
+ int nName;
+ if( db==0 || zName==0 || sqliteSafetyCheck(db) ) return 1;
+ if( nArg<-1 || nArg>127 ) return 1;
+ nName = strlen(zName);
+ if( nName>255 ) return 1;
+ p = sqliteFindFunction(db, zName, nName, nArg, 1);
+ if( p==0 ) return 1;
+ p->xFunc = xFunc;
+ p->xStep = 0;
+ p->xFinalize = 0;
+ p->pUserData = pUserData;
+ return 0;
+}
+int sqlite_create_aggregate(
+ sqlite *db, /* Add the function to this database connection */
+ const char *zName, /* Name of the function to add */
+ int nArg, /* Number of arguments */
+ void (*xStep)(sqlite_func*,int,const char**), /* The step function */
+ void (*xFinalize)(sqlite_func*), /* The finalizer */
+ void *pUserData /* User data */
+){
+ FuncDef *p;
+ int nName;
+ if( db==0 || zName==0 || sqliteSafetyCheck(db) ) return 1;
+ if( nArg<-1 || nArg>127 ) return 1;
+ nName = strlen(zName);
+ if( nName>255 ) return 1;
+ p = sqliteFindFunction(db, zName, nName, nArg, 1);
+ if( p==0 ) return 1;
+ p->xFunc = 0;
+ p->xStep = xStep;
+ p->xFinalize = xFinalize;
+ p->pUserData = pUserData;
+ return 0;
+}
+
+/*
+** Change the datatype for all functions with a given name. See the
+** header comment for the prototype of this function in sqlite.h for
+** additional information.
+*/
+int sqlite_function_type(sqlite *db, const char *zName, int dataType){
+ FuncDef *p = (FuncDef*)sqliteHashFind(&db->aFunc, zName, strlen(zName));
+ while( p ){
+ p->dataType = dataType;
+ p = p->pNext;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Register a trace function. The pArg from the previously registered trace
+** is returned.
+**
+** A NULL trace function means that no tracing is executes. A non-NULL
+** trace is a pointer to a function that is invoked at the start of each
+** sqlite_exec().
+*/
+void *sqlite_trace(sqlite *db, void (*xTrace)(void*,const char*), void *pArg){
+ void *pOld = db->pTraceArg;
+ db->xTrace = xTrace;
+ db->pTraceArg = pArg;
+ return pOld;
+}
+
+/*** EXPERIMENTAL ***
+**
+** Register a function to be invoked when a transaction comments.
+** If either function returns non-zero, then the commit becomes a
+** rollback.
+*/
+void *sqlite_commit_hook(
+ sqlite *db, /* Attach the hook to this database */
+ int (*xCallback)(void*), /* Function to invoke on each commit */
+ void *pArg /* Argument to the function */
+){
+ void *pOld = db->pCommitArg;
+ db->xCommitCallback = xCallback;
+ db->pCommitArg = pArg;
+ return pOld;
+}
+
+
+/*
+** This routine is called to create a connection to a database BTree
+** driver. If zFilename is the name of a file, then that file is
+** opened and used. If zFilename is the magic name ":memory:" then
+** the database is stored in memory (and is thus forgotten as soon as
+** the connection is closed.) If zFilename is NULL then the database
+** is for temporary use only and is deleted as soon as the connection
+** is closed.
+**
+** A temporary database can be either a disk file (that is automatically
+** deleted when the file is closed) or a set of red-black trees held in memory,
+** depending on the values of the TEMP_STORE compile-time macro and the
+** db->temp_store variable, according to the following chart:
+**
+** TEMP_STORE db->temp_store Location of temporary database
+** ---------- -------------- ------------------------------
+** 0 any file
+** 1 1 file
+** 1 2 memory
+** 1 0 file
+** 2 1 file
+** 2 2 memory
+** 2 0 memory
+** 3 any memory
+*/
+int sqliteBtreeFactory(
+ const sqlite *db, /* Main database when opening aux otherwise 0 */
+ const char *zFilename, /* Name of the file containing the BTree database */
+ int omitJournal, /* if TRUE then do not journal this file */
+ int nCache, /* How many pages in the page cache */
+ Btree **ppBtree){ /* Pointer to new Btree object written here */
+
+ assert( ppBtree != 0);
+
+#ifndef SQLITE_OMIT_INMEMORYDB
+ if( zFilename==0 ){
+ if (TEMP_STORE == 0) {
+ /* Always use file based temporary DB */
+ return sqliteBtreeOpen(0, omitJournal, nCache, ppBtree);
+ } else if (TEMP_STORE == 1 || TEMP_STORE == 2) {
+ /* Switch depending on compile-time and/or runtime settings. */
+ int location = db->temp_store==0 ? TEMP_STORE : db->temp_store;
+
+ if (location == 1) {
+ return sqliteBtreeOpen(zFilename, omitJournal, nCache, ppBtree);
+ } else {
+ return sqliteRbtreeOpen(0, 0, 0, ppBtree);
+ }
+ } else {
+ /* Always use in-core DB */
+ return sqliteRbtreeOpen(0, 0, 0, ppBtree);
+ }
+ }else if( zFilename[0]==':' && strcmp(zFilename,":memory:")==0 ){
+ return sqliteRbtreeOpen(0, 0, 0, ppBtree);
+ }else
+#endif
+ {
+ return sqliteBtreeOpen(zFilename, omitJournal, nCache, ppBtree);
+ }
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/md5.c b/usr/src/cmd/svc/configd/sqlite/src/md5.c
new file mode 100644
index 0000000000..5b61b5eb34
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/md5.c
@@ -0,0 +1,388 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** SQLite uses this code for testing only. It is not a part of
+** the SQLite library. This file implements two new TCL commands
+** "md5" and "md5file" that compute md5 checksums on arbitrary text
+** and on complete files. These commands are used by the "testfixture"
+** program to help verify the correct operation of the SQLite library.
+**
+** The original use of these TCL commands was to test the ROLLBACK
+** feature of SQLite. First compute the MD5-checksum of the database.
+** Then make some changes but rollback the changes rather than commit
+** them. Compute a second MD5-checksum of the file and verify that the
+** two checksums are the same. Such is the original use of this code.
+** New uses may have been added since this comment was written.
+*/
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest. This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ */
+#include <tcl.h>
+#include <string.h>
+#include "sqlite.h"
+
+/*
+ * If compiled on a machine that doesn't have a 32-bit integer,
+ * you just set "uint32" to the appropriate datatype for an
+ * unsigned 32-bit integer. For example:
+ *
+ * cc -Duint32='unsigned long' md5.c
+ *
+ */
+#ifndef uint32
+# define uint32 unsigned int
+#endif
+
+struct Context {
+ uint32 buf[4];
+ uint32 bits[2];
+ unsigned char in[64];
+};
+typedef char MD5Context[88];
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+static void byteReverse (unsigned char *buf, unsigned longs){
+ uint32 t;
+ do {
+ t = (uint32)((unsigned)buf[3]<<8 | buf[2]) << 16 |
+ ((unsigned)buf[1]<<8 | buf[0]);
+ *(uint32 *)buf = t;
+ buf += 4;
+ } while (--longs);
+}
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+ ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x )
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void MD5Transform(uint32 buf[4], const uint32 in[16]){
+ register uint32 a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12]+0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10]+0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
+}
+
+/*
+ * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+static void MD5Init(MD5Context *pCtx){
+ struct Context *ctx = (struct Context *)pCtx;
+ ctx->buf[0] = 0x67452301;
+ ctx->buf[1] = 0xefcdab89;
+ ctx->buf[2] = 0x98badcfe;
+ ctx->buf[3] = 0x10325476;
+ ctx->bits[0] = 0;
+ ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+static
+void MD5Update(MD5Context *pCtx, const unsigned char *buf, unsigned int len){
+ struct Context *ctx = (struct Context *)pCtx;
+ uint32 t;
+
+ /* Update bitcount */
+
+ t = ctx->bits[0];
+ if ((ctx->bits[0] = t + ((uint32)len << 3)) < t)
+ ctx->bits[1]++; /* Carry from low to high */
+ ctx->bits[1] += len >> 29;
+
+ t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */
+
+ /* Handle any leading odd-sized chunks */
+
+ if ( t ) {
+ unsigned char *p = (unsigned char *)ctx->in + t;
+
+ t = 64-t;
+ if (len < t) {
+ memcpy(p, buf, len);
+ return;
+ }
+ memcpy(p, buf, t);
+ byteReverse(ctx->in, 16);
+ MD5Transform(ctx->buf, (uint32 *)ctx->in);
+ buf += t;
+ len -= t;
+ }
+
+ /* Process data in 64-byte chunks */
+
+ while (len >= 64) {
+ memcpy(ctx->in, buf, 64);
+ byteReverse(ctx->in, 16);
+ MD5Transform(ctx->buf, (uint32 *)ctx->in);
+ buf += 64;
+ len -= 64;
+ }
+
+ /* Handle any remaining bytes of data. */
+
+ memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+static void MD5Final(unsigned char digest[16], MD5Context *pCtx){
+ struct Context *ctx = (struct Context *)pCtx;
+ unsigned count;
+ unsigned char *p;
+
+ /* Compute number of bytes mod 64 */
+ count = (ctx->bits[0] >> 3) & 0x3F;
+
+ /* Set the first char of padding to 0x80. This is safe since there is
+ always at least one byte free */
+ p = ctx->in + count;
+ *p++ = 0x80;
+
+ /* Bytes of padding needed to make 64 bytes */
+ count = 64 - 1 - count;
+
+ /* Pad out to 56 mod 64 */
+ if (count < 8) {
+ /* Two lots of padding: Pad the first block to 64 bytes */
+ memset(p, 0, count);
+ byteReverse(ctx->in, 16);
+ MD5Transform(ctx->buf, (uint32 *)ctx->in);
+
+ /* Now fill the next block with 56 bytes */
+ memset(ctx->in, 0, 56);
+ } else {
+ /* Pad block to 56 bytes */
+ memset(p, 0, count-8);
+ }
+ byteReverse(ctx->in, 14);
+
+ /* Append length in bits and transform */
+ ((uint32 *)ctx->in)[ 14 ] = ctx->bits[0];
+ ((uint32 *)ctx->in)[ 15 ] = ctx->bits[1];
+
+ MD5Transform(ctx->buf, (uint32 *)ctx->in);
+ byteReverse((unsigned char *)ctx->buf, 4);
+ memcpy(digest, ctx->buf, 16);
+ memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */
+}
+
+/*
+** Convert a digest into base-16. digest should be declared as
+** "unsigned char digest[16]" in the calling function. The MD5
+** digest is stored in the first 16 bytes. zBuf should
+** be "char zBuf[33]".
+*/
+static void DigestToBase16(unsigned char *digest, char *zBuf){
+ static char const zEncode[] = "0123456789abcdef";
+ int i, j;
+
+ for(j=i=0; i<16; i++){
+ int a = digest[i];
+ zBuf[j++] = zEncode[(a>>4)&0xf];
+ zBuf[j++] = zEncode[a & 0xf];
+ }
+ zBuf[j] = 0;
+}
+
+/*
+** A TCL command for md5. The argument is the text to be hashed. The
+** Result is the hash in base64.
+*/
+static int md5_cmd(void*cd, Tcl_Interp *interp, int argc, const char **argv){
+ MD5Context ctx;
+ unsigned char digest[16];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp,"wrong # args: should be \"", argv[0],
+ " TEXT\"", 0);
+ return TCL_ERROR;
+ }
+ MD5Init(&ctx);
+ MD5Update(&ctx, (unsigned char*)argv[1], (unsigned)strlen(argv[1]));
+ MD5Final(digest, &ctx);
+ DigestToBase16(digest, interp->result);
+ return TCL_OK;
+}
+
+/*
+** A TCL command to take the md5 hash of a file. The argument is the
+** name of the file.
+*/
+static int md5file_cmd(void*cd, Tcl_Interp*interp, int argc, const char **argv){
+ FILE *in;
+ MD5Context ctx;
+ unsigned char digest[16];
+ char zBuf[10240];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp,"wrong # args: should be \"", argv[0],
+ " FILENAME\"", 0);
+ return TCL_ERROR;
+ }
+ in = fopen(argv[1],"rb");
+ if( in==0 ){
+ Tcl_AppendResult(interp,"unable to open file \"", argv[1],
+ "\" for reading", 0);
+ return TCL_ERROR;
+ }
+ MD5Init(&ctx);
+ for(;;){
+ int n;
+ n = fread(zBuf, 1, sizeof(zBuf), in);
+ if( n<=0 ) break;
+ MD5Update(&ctx, (unsigned char*)zBuf, (unsigned)n);
+ }
+ fclose(in);
+ MD5Final(digest, &ctx);
+ DigestToBase16(digest, interp->result);
+ return TCL_OK;
+}
+
+/*
+** Register the two TCL commands above with the TCL interpreter.
+*/
+int Md5_Init(Tcl_Interp *interp){
+ Tcl_CreateCommand(interp, "md5", (Tcl_CmdProc*)md5_cmd, 0, 0);
+ Tcl_CreateCommand(interp, "md5file", (Tcl_CmdProc*)md5file_cmd, 0, 0);
+ return TCL_OK;
+}
+
+/*
+** During testing, the special md5sum() aggregate function is available.
+** inside SQLite. The following routines implement that function.
+*/
+static void md5step(sqlite_func *context, int argc, const char **argv){
+ MD5Context *p;
+ int i;
+ if( argc<1 ) return;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p==0 ) return;
+ if( sqlite_aggregate_count(context)==1 ){
+ MD5Init(p);
+ }
+ for(i=0; i<argc; i++){
+ if( argv[i] ){
+ MD5Update(p, (unsigned char*)argv[i], strlen(argv[i]));
+ }
+ }
+}
+static void md5finalize(sqlite_func *context){
+ MD5Context *p;
+ unsigned char digest[16];
+ char zBuf[33];
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ MD5Final(digest,p);
+ DigestToBase16(digest, zBuf);
+ sqlite_set_result_string(context, zBuf, strlen(zBuf));
+}
+void Md5_Register(sqlite *db){
+ sqlite_create_aggregate(db, "md5sum", -1, md5step, md5finalize, 0);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/os.c b/usr/src/cmd/svc/configd/sqlite/src/os.c
new file mode 100644
index 0000000000..93af251869
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/os.c
@@ -0,0 +1,1848 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code that is specific to particular operating
+** systems. The purpose of this file is to provide a uniform abstraction
+** on which the rest of SQLite can operate.
+*/
+#include "os.h" /* Must be first to enable large file support */
+#include "sqliteInt.h"
+
+#if OS_UNIX
+# include <time.h>
+# include <errno.h>
+# include <unistd.h>
+# ifndef O_LARGEFILE
+# define O_LARGEFILE 0
+# endif
+# ifdef SQLITE_DISABLE_LFS
+# undef O_LARGEFILE
+# define O_LARGEFILE 0
+# endif
+# ifndef O_NOFOLLOW
+# define O_NOFOLLOW 0
+# endif
+# ifndef O_BINARY
+# define O_BINARY 0
+# endif
+#endif
+
+
+#if OS_WIN
+# include <winbase.h>
+#endif
+
+#if OS_MAC
+# include <extras.h>
+# include <path2fss.h>
+# include <TextUtils.h>
+# include <FinderRegistry.h>
+# include <Folders.h>
+# include <Timer.h>
+# include <OSUtils.h>
+#endif
+
+/*
+** The DJGPP compiler environment looks mostly like Unix, but it
+** lacks the fcntl() system call. So redefine fcntl() to be something
+** that always succeeds. This means that locking does not occur under
+** DJGPP. But its DOS - what did you expect?
+*/
+#ifdef __DJGPP__
+# define fcntl(A,B,C) 0
+#endif
+
+/*
+** Macros used to determine whether or not to use threads. The
+** SQLITE_UNIX_THREADS macro is defined if we are synchronizing for
+** Posix threads and SQLITE_W32_THREADS is defined if we are
+** synchronizing using Win32 threads.
+*/
+#if OS_UNIX && defined(THREADSAFE) && THREADSAFE
+# include <pthread.h>
+# define SQLITE_UNIX_THREADS 1
+#endif
+#if OS_WIN && defined(THREADSAFE) && THREADSAFE
+# define SQLITE_W32_THREADS 1
+#endif
+#if OS_MAC && defined(THREADSAFE) && THREADSAFE
+# include <Multiprocessing.h>
+# define SQLITE_MACOS_MULTITASKING 1
+#endif
+
+/*
+** Macros for performance tracing. Normally turned off
+*/
+#if 0
+static int last_page = 0;
+__inline__ unsigned long long int hwtime(void){
+ unsigned long long int x;
+ __asm__("rdtsc\n\t"
+ "mov %%edx, %%ecx\n\t"
+ :"=A" (x));
+ return x;
+}
+static unsigned long long int g_start;
+static unsigned int elapse;
+#define TIMER_START g_start=hwtime()
+#define TIMER_END elapse=hwtime()-g_start
+#define SEEK(X) last_page=(X)
+#define TRACE1(X) fprintf(stderr,X)
+#define TRACE2(X,Y) fprintf(stderr,X,Y)
+#define TRACE3(X,Y,Z) fprintf(stderr,X,Y,Z)
+#define TRACE4(X,Y,Z,A) fprintf(stderr,X,Y,Z,A)
+#define TRACE5(X,Y,Z,A,B) fprintf(stderr,X,Y,Z,A,B)
+#else
+#define TIMER_START
+#define TIMER_END
+#define SEEK(X)
+#define TRACE1(X)
+#define TRACE2(X,Y)
+#define TRACE3(X,Y,Z)
+#define TRACE4(X,Y,Z,A)
+#define TRACE5(X,Y,Z,A,B)
+#endif
+
+
+#if OS_UNIX
+/*
+** Here is the dirt on POSIX advisory locks: ANSI STD 1003.1 (1996)
+** section 6.5.2.2 lines 483 through 490 specify that when a process
+** sets or clears a lock, that operation overrides any prior locks set
+** by the same process. It does not explicitly say so, but this implies
+** that it overrides locks set by the same process using a different
+** file descriptor. Consider this test case:
+**
+** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644);
+** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644);
+**
+** Suppose ./file1 and ./file2 are really the same file (because
+** one is a hard or symbolic link to the other) then if you set
+** an exclusive lock on fd1, then try to get an exclusive lock
+** on fd2, it works. I would have expected the second lock to
+** fail since there was already a lock on the file due to fd1.
+** But not so. Since both locks came from the same process, the
+** second overrides the first, even though they were on different
+** file descriptors opened on different file names.
+**
+** Bummer. If you ask me, this is broken. Badly broken. It means
+** that we cannot use POSIX locks to synchronize file access among
+** competing threads of the same process. POSIX locks will work fine
+** to synchronize access for threads in separate processes, but not
+** threads within the same process.
+**
+** To work around the problem, SQLite has to manage file locks internally
+** on its own. Whenever a new database is opened, we have to find the
+** specific inode of the database file (the inode is determined by the
+** st_dev and st_ino fields of the stat structure that fstat() fills in)
+** and check for locks already existing on that inode. When locks are
+** created or removed, we have to look at our own internal record of the
+** locks to see if another thread has previously set a lock on that same
+** inode.
+**
+** The OsFile structure for POSIX is no longer just an integer file
+** descriptor. It is now a structure that holds the integer file
+** descriptor and a pointer to a structure that describes the internal
+** locks on the corresponding inode. There is one locking structure
+** per inode, so if the same inode is opened twice, both OsFile structures
+** point to the same locking structure. The locking structure keeps
+** a reference count (so we will know when to delete it) and a "cnt"
+** field that tells us its internal lock status. cnt==0 means the
+** file is unlocked. cnt==-1 means the file has an exclusive lock.
+** cnt>0 means there are cnt shared locks on the file.
+**
+** Any attempt to lock or unlock a file first checks the locking
+** structure. The fcntl() system call is only invoked to set a
+** POSIX lock if the internal lock structure transitions between
+** a locked and an unlocked state.
+**
+** 2004-Jan-11:
+** More recent discoveries about POSIX advisory locks. (The more
+** I discover, the more I realize the a POSIX advisory locks are
+** an abomination.)
+**
+** If you close a file descriptor that points to a file that has locks,
+** all locks on that file that are owned by the current process are
+** released. To work around this problem, each OsFile structure contains
+** a pointer to an openCnt structure. There is one openCnt structure
+** per open inode, which means that multiple OsFiles can point to a single
+** openCnt. When an attempt is made to close an OsFile, if there are
+** other OsFiles open on the same inode that are holding locks, the call
+** to close() the file descriptor is deferred until all of the locks clear.
+** The openCnt structure keeps a list of file descriptors that need to
+** be closed and that list is walked (and cleared) when the last lock
+** clears.
+**
+** First, under Linux threads, because each thread has a separate
+** process ID, lock operations in one thread do not override locks
+** to the same file in other threads. Linux threads behave like
+** separate processes in this respect. But, if you close a file
+** descriptor in linux threads, all locks are cleared, even locks
+** on other threads and even though the other threads have different
+** process IDs. Linux threads is inconsistent in this respect.
+** (I'm beginning to think that linux threads is an abomination too.)
+** The consequence of this all is that the hash table for the lockInfo
+** structure has to include the process id as part of its key because
+** locks in different threads are treated as distinct. But the
+** openCnt structure should not include the process id in its
+** key because close() clears lock on all threads, not just the current
+** thread. Were it not for this goofiness in linux threads, we could
+** combine the lockInfo and openCnt structures into a single structure.
+*/
+
+/*
+** An instance of the following structure serves as the key used
+** to locate a particular lockInfo structure given its inode. Note
+** that we have to include the process ID as part of the key. On some
+** threading implementations (ex: linux), each thread has a separate
+** process ID.
+*/
+struct lockKey {
+ dev_t dev; /* Device number */
+ ino_t ino; /* Inode number */
+ pid_t pid; /* Process ID */
+};
+
+/*
+** An instance of the following structure is allocated for each open
+** inode on each thread with a different process ID. (Threads have
+** different process IDs on linux, but not on most other unixes.)
+**
+** A single inode can have multiple file descriptors, so each OsFile
+** structure contains a pointer to an instance of this object and this
+** object keeps a count of the number of OsFiles pointing to it.
+*/
+struct lockInfo {
+ struct lockKey key; /* The lookup key */
+ int cnt; /* 0: unlocked. -1: write lock. 1...: read lock. */
+ int nRef; /* Number of pointers to this structure */
+};
+
+/*
+** An instance of the following structure serves as the key used
+** to locate a particular openCnt structure given its inode. This
+** is the same as the lockKey except that the process ID is omitted.
+*/
+struct openKey {
+ dev_t dev; /* Device number */
+ ino_t ino; /* Inode number */
+};
+
+/*
+** An instance of the following structure is allocated for each open
+** inode. This structure keeps track of the number of locks on that
+** inode. If a close is attempted against an inode that is holding
+** locks, the close is deferred until all locks clear by adding the
+** file descriptor to be closed to the pending list.
+*/
+struct openCnt {
+ struct openKey key; /* The lookup key */
+ int nRef; /* Number of pointers to this structure */
+ int nLock; /* Number of outstanding locks */
+ int nPending; /* Number of pending close() operations */
+ int *aPending; /* Malloced space holding fd's awaiting a close() */
+};
+
+/*
+** These hash table maps inodes and process IDs into lockInfo and openCnt
+** structures. Access to these hash tables must be protected by a mutex.
+*/
+static Hash lockHash = { SQLITE_HASH_BINARY, 0, 0, 0, 0, 0 };
+static Hash openHash = { SQLITE_HASH_BINARY, 0, 0, 0, 0, 0 };
+
+/*
+** Release a lockInfo structure previously allocated by findLockInfo().
+*/
+static void releaseLockInfo(struct lockInfo *pLock){
+ pLock->nRef--;
+ if( pLock->nRef==0 ){
+ sqliteHashInsert(&lockHash, &pLock->key, sizeof(pLock->key), 0);
+ sqliteFree(pLock);
+ }
+}
+
+/*
+** Release a openCnt structure previously allocated by findLockInfo().
+*/
+static void releaseOpenCnt(struct openCnt *pOpen){
+ pOpen->nRef--;
+ if( pOpen->nRef==0 ){
+ sqliteHashInsert(&openHash, &pOpen->key, sizeof(pOpen->key), 0);
+ sqliteFree(pOpen->aPending);
+ sqliteFree(pOpen);
+ }
+}
+
+/*
+** Given a file descriptor, locate lockInfo and openCnt structures that
+** describes that file descriptor. Create a new ones if necessary. The
+** return values might be unset if an error occurs.
+**
+** Return the number of errors.
+*/
+int findLockInfo(
+ int fd, /* The file descriptor used in the key */
+ struct lockInfo **ppLock, /* Return the lockInfo structure here */
+ struct openCnt **ppOpen /* Return the openCnt structure here */
+){
+ int rc;
+ struct lockKey key1;
+ struct openKey key2;
+ struct stat statbuf;
+ struct lockInfo *pLock;
+ struct openCnt *pOpen;
+ rc = fstat(fd, &statbuf);
+ if( rc!=0 ) return 1;
+ memset(&key1, 0, sizeof(key1));
+ key1.dev = statbuf.st_dev;
+ key1.ino = statbuf.st_ino;
+ key1.pid = getpid();
+ memset(&key2, 0, sizeof(key2));
+ key2.dev = statbuf.st_dev;
+ key2.ino = statbuf.st_ino;
+ pLock = (struct lockInfo*)sqliteHashFind(&lockHash, &key1, sizeof(key1));
+ if( pLock==0 ){
+ struct lockInfo *pOld;
+ pLock = sqliteMallocRaw( sizeof(*pLock) );
+ if( pLock==0 ) return 1;
+ pLock->key = key1;
+ pLock->nRef = 1;
+ pLock->cnt = 0;
+ pOld = sqliteHashInsert(&lockHash, &pLock->key, sizeof(key1), pLock);
+ if( pOld!=0 ){
+ assert( pOld==pLock );
+ sqliteFree(pLock);
+ return 1;
+ }
+ }else{
+ pLock->nRef++;
+ }
+ *ppLock = pLock;
+ pOpen = (struct openCnt*)sqliteHashFind(&openHash, &key2, sizeof(key2));
+ if( pOpen==0 ){
+ struct openCnt *pOld;
+ pOpen = sqliteMallocRaw( sizeof(*pOpen) );
+ if( pOpen==0 ){
+ releaseLockInfo(pLock);
+ return 1;
+ }
+ pOpen->key = key2;
+ pOpen->nRef = 1;
+ pOpen->nLock = 0;
+ pOpen->nPending = 0;
+ pOpen->aPending = 0;
+ pOld = sqliteHashInsert(&openHash, &pOpen->key, sizeof(key2), pOpen);
+ if( pOld!=0 ){
+ assert( pOld==pOpen );
+ sqliteFree(pOpen);
+ releaseLockInfo(pLock);
+ return 1;
+ }
+ }else{
+ pOpen->nRef++;
+ }
+ *ppOpen = pOpen;
+ return 0;
+}
+
+#endif /** POSIX advisory lock work-around **/
+
+/*
+** If we compile with the SQLITE_TEST macro set, then the following block
+** of code will give us the ability to simulate a disk I/O error. This
+** is used for testing the I/O recovery logic.
+*/
+#ifdef SQLITE_TEST
+int sqlite_io_error_pending = 0;
+#define SimulateIOError(A) \
+ if( sqlite_io_error_pending ) \
+ if( sqlite_io_error_pending-- == 1 ){ local_ioerr(); return A; }
+static void local_ioerr(){
+ sqlite_io_error_pending = 0; /* Really just a place to set a breakpoint */
+}
+#else
+#define SimulateIOError(A)
+#endif
+
+/*
+** When testing, keep a count of the number of open files.
+*/
+#ifdef SQLITE_TEST
+int sqlite_open_file_count = 0;
+#define OpenCounter(X) sqlite_open_file_count+=(X)
+#else
+#define OpenCounter(X)
+#endif
+
+
+/*
+** Delete the named file
+*/
+int sqliteOsDelete(const char *zFilename){
+#if OS_UNIX
+ unlink(zFilename);
+#endif
+#if OS_WIN
+ DeleteFile(zFilename);
+#endif
+#if OS_MAC
+ unlink(zFilename);
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if the named file exists.
+*/
+int sqliteOsFileExists(const char *zFilename){
+#if OS_UNIX
+ return access(zFilename, 0)==0;
+#endif
+#if OS_WIN
+ return GetFileAttributes(zFilename) != 0xffffffff;
+#endif
+#if OS_MAC
+ return access(zFilename, 0)==0;
+#endif
+}
+
+
+#if 0 /* NOT USED */
+/*
+** Change the name of an existing file.
+*/
+int sqliteOsFileRename(const char *zOldName, const char *zNewName){
+#if OS_UNIX
+ if( link(zOldName, zNewName) ){
+ return SQLITE_ERROR;
+ }
+ unlink(zOldName);
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ if( !MoveFile(zOldName, zNewName) ){
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ /**** FIX ME ***/
+ return SQLITE_ERROR;
+#endif
+}
+#endif /* NOT USED */
+
+/*
+** Attempt to open a file for both reading and writing. If that
+** fails, try opening it read-only. If the file does not exist,
+** try to create it.
+**
+** On success, a handle for the open file is written to *id
+** and *pReadonly is set to 0 if the file was opened for reading and
+** writing or 1 if the file was opened read-only. The function returns
+** SQLITE_OK.
+**
+** On failure, the function returns SQLITE_CANTOPEN and leaves
+** *id and *pReadonly unchanged.
+*/
+int sqliteOsOpenReadWrite(
+ const char *zFilename,
+ OsFile *id,
+ int *pReadonly
+){
+#if OS_UNIX
+ int rc;
+ id->dirfd = -1;
+ id->fd = open(zFilename, O_RDWR|O_CREAT|O_LARGEFILE|O_BINARY, 0644);
+ if( id->fd<0 ){
+#ifdef EISDIR
+ if( errno==EISDIR ){
+ return SQLITE_CANTOPEN;
+ }
+#endif
+ id->fd = open(zFilename, O_RDONLY|O_LARGEFILE|O_BINARY);
+ if( id->fd<0 ){
+ return SQLITE_CANTOPEN;
+ }
+ *pReadonly = 1;
+ }else{
+ *pReadonly = 0;
+ }
+ sqliteOsEnterMutex();
+ rc = findLockInfo(id->fd, &id->pLock, &id->pOpen);
+ sqliteOsLeaveMutex();
+ if( rc ){
+ close(id->fd);
+ return SQLITE_NOMEM;
+ }
+ id->locked = 0;
+ TRACE3("OPEN %-3d %s\n", id->fd, zFilename);
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ HANDLE h = CreateFile(zFilename,
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,
+ NULL
+ );
+ if( h==INVALID_HANDLE_VALUE ){
+ h = CreateFile(zFilename,
+ GENERIC_READ,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,
+ NULL
+ );
+ if( h==INVALID_HANDLE_VALUE ){
+ return SQLITE_CANTOPEN;
+ }
+ *pReadonly = 1;
+ }else{
+ *pReadonly = 0;
+ }
+ id->h = h;
+ id->locked = 0;
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ FSSpec fsSpec;
+# ifdef _LARGE_FILE
+ HFSUniStr255 dfName;
+ FSRef fsRef;
+ if( __path2fss(zFilename, &fsSpec) != noErr ){
+ if( HCreate(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, 'SQLI', cDocumentFile) != noErr )
+ return SQLITE_CANTOPEN;
+ }
+ if( FSpMakeFSRef(&fsSpec, &fsRef) != noErr )
+ return SQLITE_CANTOPEN;
+ FSGetDataForkName(&dfName);
+ if( FSOpenFork(&fsRef, dfName.length, dfName.unicode,
+ fsRdWrShPerm, &(id->refNum)) != noErr ){
+ if( FSOpenFork(&fsRef, dfName.length, dfName.unicode,
+ fsRdWrPerm, &(id->refNum)) != noErr ){
+ if (FSOpenFork(&fsRef, dfName.length, dfName.unicode,
+ fsRdPerm, &(id->refNum)) != noErr )
+ return SQLITE_CANTOPEN;
+ else
+ *pReadonly = 1;
+ } else
+ *pReadonly = 0;
+ } else
+ *pReadonly = 0;
+# else
+ __path2fss(zFilename, &fsSpec);
+ if( !sqliteOsFileExists(zFilename) ){
+ if( HCreate(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, 'SQLI', cDocumentFile) != noErr )
+ return SQLITE_CANTOPEN;
+ }
+ if( HOpenDF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdWrShPerm, &(id->refNum)) != noErr ){
+ if( HOpenDF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdWrPerm, &(id->refNum)) != noErr ){
+ if( HOpenDF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdPerm, &(id->refNum)) != noErr )
+ return SQLITE_CANTOPEN;
+ else
+ *pReadonly = 1;
+ } else
+ *pReadonly = 0;
+ } else
+ *pReadonly = 0;
+# endif
+ if( HOpenRF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdWrShPerm, &(id->refNumRF)) != noErr){
+ id->refNumRF = -1;
+ }
+ id->locked = 0;
+ id->delOnClose = 0;
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+}
+
+
+/*
+** Attempt to open a new file for exclusive access by this process.
+** The file will be opened for both reading and writing. To avoid
+** a potential security problem, we do not allow the file to have
+** previously existed. Nor do we allow the file to be a symbolic
+** link.
+**
+** If delFlag is true, then make arrangements to automatically delete
+** the file when it is closed.
+**
+** On success, write the file handle into *id and return SQLITE_OK.
+**
+** On failure, return SQLITE_CANTOPEN.
+*/
+int sqliteOsOpenExclusive(const char *zFilename, OsFile *id, int delFlag){
+#if OS_UNIX
+ int rc;
+ if( access(zFilename, 0)==0 ){
+ return SQLITE_CANTOPEN;
+ }
+ id->dirfd = -1;
+ id->fd = open(zFilename,
+ O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW|O_LARGEFILE|O_BINARY, 0600);
+ if( id->fd<0 ){
+ return SQLITE_CANTOPEN;
+ }
+ sqliteOsEnterMutex();
+ rc = findLockInfo(id->fd, &id->pLock, &id->pOpen);
+ sqliteOsLeaveMutex();
+ if( rc ){
+ close(id->fd);
+ unlink(zFilename);
+ return SQLITE_NOMEM;
+ }
+ id->locked = 0;
+ if( delFlag ){
+ unlink(zFilename);
+ }
+ TRACE3("OPEN-EX %-3d %s\n", id->fd, zFilename);
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ HANDLE h;
+ int fileflags;
+ if( delFlag ){
+ fileflags = FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_RANDOM_ACCESS
+ | FILE_FLAG_DELETE_ON_CLOSE;
+ }else{
+ fileflags = FILE_FLAG_RANDOM_ACCESS;
+ }
+ h = CreateFile(zFilename,
+ GENERIC_READ | GENERIC_WRITE,
+ 0,
+ NULL,
+ CREATE_ALWAYS,
+ fileflags,
+ NULL
+ );
+ if( h==INVALID_HANDLE_VALUE ){
+ return SQLITE_CANTOPEN;
+ }
+ id->h = h;
+ id->locked = 0;
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ FSSpec fsSpec;
+# ifdef _LARGE_FILE
+ HFSUniStr255 dfName;
+ FSRef fsRef;
+ __path2fss(zFilename, &fsSpec);
+ if( HCreate(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, 'SQLI', cDocumentFile) != noErr )
+ return SQLITE_CANTOPEN;
+ if( FSpMakeFSRef(&fsSpec, &fsRef) != noErr )
+ return SQLITE_CANTOPEN;
+ FSGetDataForkName(&dfName);
+ if( FSOpenFork(&fsRef, dfName.length, dfName.unicode,
+ fsRdWrPerm, &(id->refNum)) != noErr )
+ return SQLITE_CANTOPEN;
+# else
+ __path2fss(zFilename, &fsSpec);
+ if( HCreate(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, 'SQLI', cDocumentFile) != noErr )
+ return SQLITE_CANTOPEN;
+ if( HOpenDF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdWrPerm, &(id->refNum)) != noErr )
+ return SQLITE_CANTOPEN;
+# endif
+ id->refNumRF = -1;
+ id->locked = 0;
+ id->delOnClose = delFlag;
+ if (delFlag)
+ id->pathToDel = sqliteOsFullPathname(zFilename);
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+}
+
+/*
+** Attempt to open a new file for read-only access.
+**
+** On success, write the file handle into *id and return SQLITE_OK.
+**
+** On failure, return SQLITE_CANTOPEN.
+*/
+int sqliteOsOpenReadOnly(const char *zFilename, OsFile *id){
+#if OS_UNIX
+ int rc;
+ id->dirfd = -1;
+ id->fd = open(zFilename, O_RDONLY|O_LARGEFILE|O_BINARY);
+ if( id->fd<0 ){
+ return SQLITE_CANTOPEN;
+ }
+ sqliteOsEnterMutex();
+ rc = findLockInfo(id->fd, &id->pLock, &id->pOpen);
+ sqliteOsLeaveMutex();
+ if( rc ){
+ close(id->fd);
+ return SQLITE_NOMEM;
+ }
+ id->locked = 0;
+ TRACE3("OPEN-RO %-3d %s\n", id->fd, zFilename);
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ HANDLE h = CreateFile(zFilename,
+ GENERIC_READ,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,
+ NULL
+ );
+ if( h==INVALID_HANDLE_VALUE ){
+ return SQLITE_CANTOPEN;
+ }
+ id->h = h;
+ id->locked = 0;
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ FSSpec fsSpec;
+# ifdef _LARGE_FILE
+ HFSUniStr255 dfName;
+ FSRef fsRef;
+ if( __path2fss(zFilename, &fsSpec) != noErr )
+ return SQLITE_CANTOPEN;
+ if( FSpMakeFSRef(&fsSpec, &fsRef) != noErr )
+ return SQLITE_CANTOPEN;
+ FSGetDataForkName(&dfName);
+ if( FSOpenFork(&fsRef, dfName.length, dfName.unicode,
+ fsRdPerm, &(id->refNum)) != noErr )
+ return SQLITE_CANTOPEN;
+# else
+ __path2fss(zFilename, &fsSpec);
+ if( HOpenDF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdPerm, &(id->refNum)) != noErr )
+ return SQLITE_CANTOPEN;
+# endif
+ if( HOpenRF(fsSpec.vRefNum, fsSpec.parID, fsSpec.name, fsRdWrShPerm, &(id->refNumRF)) != noErr){
+ id->refNumRF = -1;
+ }
+ id->locked = 0;
+ id->delOnClose = 0;
+ OpenCounter(+1);
+ return SQLITE_OK;
+#endif
+}
+
+/*
+** Attempt to open a file descriptor for the directory that contains a
+** file. This file descriptor can be used to fsync() the directory
+** in order to make sure the creation of a new file is actually written
+** to disk.
+**
+** This routine is only meaningful for Unix. It is a no-op under
+** windows since windows does not support hard links.
+**
+** On success, a handle for a previously open file is at *id is
+** updated with the new directory file descriptor and SQLITE_OK is
+** returned.
+**
+** On failure, the function returns SQLITE_CANTOPEN and leaves
+** *id unchanged.
+*/
+int sqliteOsOpenDirectory(
+ const char *zDirname,
+ OsFile *id
+){
+#if OS_UNIX
+ if( id->fd<0 ){
+ /* Do not open the directory if the corresponding file is not already
+ ** open. */
+ return SQLITE_CANTOPEN;
+ }
+ assert( id->dirfd<0 );
+ id->dirfd = open(zDirname, O_RDONLY|O_BINARY, 0644);
+ if( id->dirfd<0 ){
+ return SQLITE_CANTOPEN;
+ }
+ TRACE3("OPENDIR %-3d %s\n", id->dirfd, zDirname);
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** If the following global variable points to a string which is the
+** name of a directory, then that directory will be used to store
+** temporary files.
+*/
+const char *sqlite_temp_directory = 0;
+
+/*
+** Create a temporary file name in zBuf. zBuf must be big enough to
+** hold at least SQLITE_TEMPNAME_SIZE characters.
+*/
+int sqliteOsTempFileName(char *zBuf){
+#if OS_UNIX
+ static const char *azDirs[] = {
+ 0,
+ "/var/tmp",
+ "/usr/tmp",
+ "/tmp",
+ ".",
+ };
+ static unsigned char zChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789";
+ int i, j;
+ struct stat buf;
+ const char *zDir = ".";
+ azDirs[0] = sqlite_temp_directory;
+ for(i=0; i<sizeof(azDirs)/sizeof(azDirs[0]); i++){
+ if( azDirs[i]==0 ) continue;
+ if( stat(azDirs[i], &buf) ) continue;
+ if( !S_ISDIR(buf.st_mode) ) continue;
+ if( access(azDirs[i], 07) ) continue;
+ zDir = azDirs[i];
+ break;
+ }
+ do{
+ sprintf(zBuf, "%s/"TEMP_FILE_PREFIX, zDir);
+ j = strlen(zBuf);
+ sqliteRandomness(15, &zBuf[j]);
+ for(i=0; i<15; i++, j++){
+ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
+ }
+ zBuf[j] = 0;
+ }while( access(zBuf,0)==0 );
+#endif
+#if OS_WIN
+ static char zChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789";
+ int i, j;
+ const char *zDir;
+ char zTempPath[SQLITE_TEMPNAME_SIZE];
+ if( sqlite_temp_directory==0 ){
+ GetTempPath(SQLITE_TEMPNAME_SIZE-30, zTempPath);
+ for(i=strlen(zTempPath); i>0 && zTempPath[i-1]=='\\'; i--){}
+ zTempPath[i] = 0;
+ zDir = zTempPath;
+ }else{
+ zDir = sqlite_temp_directory;
+ }
+ for(;;){
+ sprintf(zBuf, "%s\\"TEMP_FILE_PREFIX, zDir);
+ j = strlen(zBuf);
+ sqliteRandomness(15, &zBuf[j]);
+ for(i=0; i<15; i++, j++){
+ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
+ }
+ zBuf[j] = 0;
+ if( !sqliteOsFileExists(zBuf) ) break;
+ }
+#endif
+#if OS_MAC
+ static char zChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789";
+ int i, j;
+ char *zDir;
+ char zTempPath[SQLITE_TEMPNAME_SIZE];
+ char zdirName[32];
+ CInfoPBRec infoRec;
+ Str31 dirName;
+ memset(&infoRec, 0, sizeof(infoRec));
+ memset(zTempPath, 0, SQLITE_TEMPNAME_SIZE);
+ if( sqlite_temp_directory!=0 ){
+ zDir = sqlite_temp_directory;
+ }else if( FindFolder(kOnSystemDisk, kTemporaryFolderType, kCreateFolder,
+ &(infoRec.dirInfo.ioVRefNum), &(infoRec.dirInfo.ioDrParID)) == noErr ){
+ infoRec.dirInfo.ioNamePtr = dirName;
+ do{
+ infoRec.dirInfo.ioFDirIndex = -1;
+ infoRec.dirInfo.ioDrDirID = infoRec.dirInfo.ioDrParID;
+ if( PBGetCatInfoSync(&infoRec) == noErr ){
+ CopyPascalStringToC(dirName, zdirName);
+ i = strlen(zdirName);
+ memmove(&(zTempPath[i+1]), zTempPath, strlen(zTempPath));
+ strcpy(zTempPath, zdirName);
+ zTempPath[i] = ':';
+ }else{
+ *zTempPath = 0;
+ break;
+ }
+ } while( infoRec.dirInfo.ioDrDirID != fsRtDirID );
+ zDir = zTempPath;
+ }
+ if( zDir[0]==0 ){
+ getcwd(zTempPath, SQLITE_TEMPNAME_SIZE-24);
+ zDir = zTempPath;
+ }
+ for(;;){
+ sprintf(zBuf, "%s"TEMP_FILE_PREFIX, zDir);
+ j = strlen(zBuf);
+ sqliteRandomness(15, &zBuf[j]);
+ for(i=0; i<15; i++, j++){
+ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
+ }
+ zBuf[j] = 0;
+ if( !sqliteOsFileExists(zBuf) ) break;
+ }
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** Close a file.
+*/
+int sqliteOsClose(OsFile *id){
+#if OS_UNIX
+ sqliteOsUnlock(id);
+ if( id->dirfd>=0 ) close(id->dirfd);
+ id->dirfd = -1;
+ sqliteOsEnterMutex();
+ if( id->pOpen->nLock ){
+ /* If there are outstanding locks, do not actually close the file just
+ ** yet because that would clear those locks. Instead, add the file
+ ** descriptor to pOpen->aPending. It will be automatically closed when
+ ** the last lock is cleared.
+ */
+ int *aNew;
+ struct openCnt *pOpen = id->pOpen;
+ pOpen->nPending++;
+ aNew = sqliteRealloc( pOpen->aPending, pOpen->nPending*sizeof(int) );
+ if( aNew==0 ){
+ /* If a malloc fails, just leak the file descriptor */
+ }else{
+ pOpen->aPending = aNew;
+ pOpen->aPending[pOpen->nPending-1] = id->fd;
+ }
+ }else{
+ /* There are no outstanding locks so we can close the file immediately */
+ close(id->fd);
+ }
+ releaseLockInfo(id->pLock);
+ releaseOpenCnt(id->pOpen);
+ sqliteOsLeaveMutex();
+ TRACE2("CLOSE %-3d\n", id->fd);
+ OpenCounter(-1);
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ CloseHandle(id->h);
+ OpenCounter(-1);
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ if( id->refNumRF!=-1 )
+ FSClose(id->refNumRF);
+# ifdef _LARGE_FILE
+ FSCloseFork(id->refNum);
+# else
+ FSClose(id->refNum);
+# endif
+ if( id->delOnClose ){
+ unlink(id->pathToDel);
+ sqliteFree(id->pathToDel);
+ }
+ OpenCounter(-1);
+ return SQLITE_OK;
+#endif
+}
+
+/*
+** Read data from a file into a buffer. Return SQLITE_OK if all
+** bytes were read successfully and SQLITE_IOERR if anything goes
+** wrong.
+*/
+int sqliteOsRead(OsFile *id, void *pBuf, int amt){
+#if OS_UNIX
+ int got;
+ SimulateIOError(SQLITE_IOERR);
+ TIMER_START;
+ got = read(id->fd, pBuf, amt);
+ TIMER_END;
+ TRACE4("READ %-3d %7d %d\n", id->fd, last_page, elapse);
+ SEEK(0);
+ /* if( got<0 ) got = 0; */
+ if( got==amt ){
+ return SQLITE_OK;
+ }else{
+ return SQLITE_IOERR;
+ }
+#endif
+#if OS_WIN
+ DWORD got;
+ SimulateIOError(SQLITE_IOERR);
+ TRACE2("READ %d\n", last_page);
+ if( !ReadFile(id->h, pBuf, amt, &got, 0) ){
+ got = 0;
+ }
+ if( got==(DWORD)amt ){
+ return SQLITE_OK;
+ }else{
+ return SQLITE_IOERR;
+ }
+#endif
+#if OS_MAC
+ int got;
+ SimulateIOError(SQLITE_IOERR);
+ TRACE2("READ %d\n", last_page);
+# ifdef _LARGE_FILE
+ FSReadFork(id->refNum, fsAtMark, 0, (ByteCount)amt, pBuf, (ByteCount*)&got);
+# else
+ got = amt;
+ FSRead(id->refNum, &got, pBuf);
+# endif
+ if( got==amt ){
+ return SQLITE_OK;
+ }else{
+ return SQLITE_IOERR;
+ }
+#endif
+}
+
+/*
+** Write data from a buffer into a file. Return SQLITE_OK on success
+** or some other error code on failure.
+*/
+int sqliteOsWrite(OsFile *id, const void *pBuf, int amt){
+#if OS_UNIX
+ int wrote = 0;
+ SimulateIOError(SQLITE_IOERR);
+ TIMER_START;
+ while( amt>0 && (wrote = write(id->fd, pBuf, amt))>0 ){
+ amt -= wrote;
+ pBuf = &((char*)pBuf)[wrote];
+ }
+ TIMER_END;
+ TRACE4("WRITE %-3d %7d %d\n", id->fd, last_page, elapse);
+ SEEK(0);
+ if( amt>0 ){
+ return SQLITE_FULL;
+ }
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ int rc;
+ DWORD wrote;
+ SimulateIOError(SQLITE_IOERR);
+ TRACE2("WRITE %d\n", last_page);
+ while( amt>0 && (rc = WriteFile(id->h, pBuf, amt, &wrote, 0))!=0 && wrote>0 ){
+ amt -= wrote;
+ pBuf = &((char*)pBuf)[wrote];
+ }
+ if( !rc || amt>(int)wrote ){
+ return SQLITE_FULL;
+ }
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ OSErr oserr;
+ int wrote = 0;
+ SimulateIOError(SQLITE_IOERR);
+ TRACE2("WRITE %d\n", last_page);
+ while( amt>0 ){
+# ifdef _LARGE_FILE
+ oserr = FSWriteFork(id->refNum, fsAtMark, 0,
+ (ByteCount)amt, pBuf, (ByteCount*)&wrote);
+# else
+ wrote = amt;
+ oserr = FSWrite(id->refNum, &wrote, pBuf);
+# endif
+ if( wrote == 0 || oserr != noErr)
+ break;
+ amt -= wrote;
+ pBuf = &((char*)pBuf)[wrote];
+ }
+ if( oserr != noErr || amt>wrote ){
+ return SQLITE_FULL;
+ }
+ return SQLITE_OK;
+#endif
+}
+
+/*
+** Move the read/write pointer in a file.
+*/
+int sqliteOsSeek(OsFile *id, off_t offset){
+ SEEK(offset/1024 + 1);
+#if OS_UNIX
+ lseek(id->fd, offset, SEEK_SET);
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ {
+ LONG upperBits = offset>>32;
+ LONG lowerBits = offset & 0xffffffff;
+ DWORD rc;
+ rc = SetFilePointer(id->h, lowerBits, &upperBits, FILE_BEGIN);
+ /* TRACE3("SEEK rc=0x%x upper=0x%x\n", rc, upperBits); */
+ }
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+ {
+ off_t curSize;
+ if( sqliteOsFileSize(id, &curSize) != SQLITE_OK ){
+ return SQLITE_IOERR;
+ }
+ if( offset >= curSize ){
+ if( sqliteOsTruncate(id, offset+1) != SQLITE_OK ){
+ return SQLITE_IOERR;
+ }
+ }
+# ifdef _LARGE_FILE
+ if( FSSetForkPosition(id->refNum, fsFromStart, offset) != noErr ){
+# else
+ if( SetFPos(id->refNum, fsFromStart, offset) != noErr ){
+# endif
+ return SQLITE_IOERR;
+ }else{
+ return SQLITE_OK;
+ }
+ }
+#endif
+}
+
+/*
+** Make sure all writes to a particular file are committed to disk.
+**
+** Under Unix, also make sure that the directory entry for the file
+** has been created by fsync-ing the directory that contains the file.
+** If we do not do this and we encounter a power failure, the directory
+** entry for the journal might not exist after we reboot. The next
+** SQLite to access the file will not know that the journal exists (because
+** the directory entry for the journal was never created) and the transaction
+** will not roll back - possibly leading to database corruption.
+*/
+int sqliteOsSync(OsFile *id){
+#if OS_UNIX
+ SimulateIOError(SQLITE_IOERR);
+ TRACE2("SYNC %-3d\n", id->fd);
+ if( fsync(id->fd) ){
+ return SQLITE_IOERR;
+ }else{
+ if( id->dirfd>=0 ){
+ TRACE2("DIRSYNC %-3d\n", id->dirfd);
+ fsync(id->dirfd);
+ close(id->dirfd); /* Only need to sync once, so close the directory */
+ id->dirfd = -1; /* when we are done. */
+ }
+ return SQLITE_OK;
+ }
+#endif
+#if OS_WIN
+ if( FlushFileBuffers(id->h) ){
+ return SQLITE_OK;
+ }else{
+ return SQLITE_IOERR;
+ }
+#endif
+#if OS_MAC
+# ifdef _LARGE_FILE
+ if( FSFlushFork(id->refNum) != noErr ){
+# else
+ ParamBlockRec params;
+ memset(&params, 0, sizeof(ParamBlockRec));
+ params.ioParam.ioRefNum = id->refNum;
+ if( PBFlushFileSync(&params) != noErr ){
+# endif
+ return SQLITE_IOERR;
+ }else{
+ return SQLITE_OK;
+ }
+#endif
+}
+
+/*
+** Truncate an open file to a specified size
+*/
+int sqliteOsTruncate(OsFile *id, off_t nByte){
+ SimulateIOError(SQLITE_IOERR);
+#if OS_UNIX
+ return ftruncate(id->fd, nByte)==0 ? SQLITE_OK : SQLITE_IOERR;
+#endif
+#if OS_WIN
+ {
+ LONG upperBits = nByte>>32;
+ SetFilePointer(id->h, nByte, &upperBits, FILE_BEGIN);
+ SetEndOfFile(id->h);
+ }
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+# ifdef _LARGE_FILE
+ if( FSSetForkSize(id->refNum, fsFromStart, nByte) != noErr){
+# else
+ if( SetEOF(id->refNum, nByte) != noErr ){
+# endif
+ return SQLITE_IOERR;
+ }else{
+ return SQLITE_OK;
+ }
+#endif
+}
+
+/*
+** Determine the current size of a file in bytes
+*/
+int sqliteOsFileSize(OsFile *id, off_t *pSize){
+#if OS_UNIX
+ struct stat buf;
+ SimulateIOError(SQLITE_IOERR);
+ if( fstat(id->fd, &buf)!=0 ){
+ return SQLITE_IOERR;
+ }
+ *pSize = buf.st_size;
+ return SQLITE_OK;
+#endif
+#if OS_WIN
+ DWORD upperBits, lowerBits;
+ SimulateIOError(SQLITE_IOERR);
+ lowerBits = GetFileSize(id->h, &upperBits);
+ *pSize = (((off_t)upperBits)<<32) + lowerBits;
+ return SQLITE_OK;
+#endif
+#if OS_MAC
+# ifdef _LARGE_FILE
+ if( FSGetForkSize(id->refNum, pSize) != noErr){
+# else
+ if( GetEOF(id->refNum, pSize) != noErr ){
+# endif
+ return SQLITE_IOERR;
+ }else{
+ return SQLITE_OK;
+ }
+#endif
+}
+
+#if OS_WIN
+/*
+** Return true (non-zero) if we are running under WinNT, Win2K or WinXP.
+** Return false (zero) for Win95, Win98, or WinME.
+**
+** Here is an interesting observation: Win95, Win98, and WinME lack
+** the LockFileEx() API. But we can still statically link against that
+** API as long as we don't call it win running Win95/98/ME. A call to
+** this routine is used to determine if the host is Win95/98/ME or
+** WinNT/2K/XP so that we will know whether or not we can safely call
+** the LockFileEx() API.
+*/
+int isNT(void){
+ static int osType = 0; /* 0=unknown 1=win95 2=winNT */
+ if( osType==0 ){
+ OSVERSIONINFO sInfo;
+ sInfo.dwOSVersionInfoSize = sizeof(sInfo);
+ GetVersionEx(&sInfo);
+ osType = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1;
+ }
+ return osType==2;
+}
+#endif
+
+/*
+** Windows file locking notes: [similar issues apply to MacOS]
+**
+** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because
+** those functions are not available. So we use only LockFile() and
+** UnlockFile().
+**
+** LockFile() prevents not just writing but also reading by other processes.
+** (This is a design error on the part of Windows, but there is nothing
+** we can do about that.) So the region used for locking is at the
+** end of the file where it is unlikely to ever interfere with an
+** actual read attempt.
+**
+** A database read lock is obtained by locking a single randomly-chosen
+** byte out of a specific range of bytes. The lock byte is obtained at
+** random so two separate readers can probably access the file at the
+** same time, unless they are unlucky and choose the same lock byte.
+** A database write lock is obtained by locking all bytes in the range.
+** There can only be one writer.
+**
+** A lock is obtained on the first byte of the lock range before acquiring
+** either a read lock or a write lock. This prevents two processes from
+** attempting to get a lock at a same time. The semantics of
+** sqliteOsReadLock() require that if there is already a write lock, that
+** lock is converted into a read lock atomically. The lock on the first
+** byte allows us to drop the old write lock and get the read lock without
+** another process jumping into the middle and messing us up. The same
+** argument applies to sqliteOsWriteLock().
+**
+** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available,
+** which means we can use reader/writer locks. When reader writer locks
+** are used, the lock is placed on the same range of bytes that is used
+** for probabilistic locking in Win95/98/ME. Hence, the locking scheme
+** will support two or more Win95 readers or two or more WinNT readers.
+** But a single Win95 reader will lock out all WinNT readers and a single
+** WinNT reader will lock out all other Win95 readers.
+**
+** Note: On MacOS we use the resource fork for locking.
+**
+** The following #defines specify the range of bytes used for locking.
+** N_LOCKBYTE is the number of bytes available for doing the locking.
+** The first byte used to hold the lock while the lock is changing does
+** not count toward this number. FIRST_LOCKBYTE is the address of
+** the first byte in the range of bytes used for locking.
+*/
+#define N_LOCKBYTE 10239
+#if OS_MAC
+# define FIRST_LOCKBYTE (0x000fffff - N_LOCKBYTE)
+#else
+# define FIRST_LOCKBYTE (0xffffffff - N_LOCKBYTE)
+#endif
+
+/*
+** Change the status of the lock on the file "id" to be a readlock.
+** If the file was write locked, then this reduces the lock to a read.
+** If the file was read locked, then this acquires a new read lock.
+**
+** Return SQLITE_OK on success and SQLITE_BUSY on failure. If this
+** library was compiled with large file support (LFS) but LFS is not
+** available on the host, then an SQLITE_NOLFS is returned.
+*/
+int sqliteOsReadLock(OsFile *id){
+#if OS_UNIX
+ int rc;
+ sqliteOsEnterMutex();
+ if( id->pLock->cnt>0 ){
+ if( !id->locked ){
+ id->pLock->cnt++;
+ id->locked = 1;
+ id->pOpen->nLock++;
+ }
+ rc = SQLITE_OK;
+ }else if( id->locked || id->pLock->cnt==0 ){
+ struct flock lock;
+ int s;
+ lock.l_type = F_RDLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = lock.l_len = 0L;
+ s = fcntl(id->fd, F_SETLK, &lock);
+ if( s!=0 ){
+ rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY;
+ }else{
+ rc = SQLITE_OK;
+ if( !id->locked ){
+ id->pOpen->nLock++;
+ id->locked = 1;
+ }
+ id->pLock->cnt = 1;
+ }
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ sqliteOsLeaveMutex();
+ return rc;
+#endif
+#if OS_WIN
+ int rc;
+ if( id->locked>0 ){
+ rc = SQLITE_OK;
+ }else{
+ int lk;
+ int res;
+ int cnt = 100;
+ sqliteRandomness(sizeof(lk), &lk);
+ lk = (lk & 0x7fffffff)%N_LOCKBYTE + 1;
+ while( cnt-->0 && (res = LockFile(id->h, FIRST_LOCKBYTE, 0, 1, 0))==0 ){
+ Sleep(1);
+ }
+ if( res ){
+ UnlockFile(id->h, FIRST_LOCKBYTE+1, 0, N_LOCKBYTE, 0);
+ if( isNT() ){
+ OVERLAPPED ovlp;
+ ovlp.Offset = FIRST_LOCKBYTE+1;
+ ovlp.OffsetHigh = 0;
+ ovlp.hEvent = 0;
+ res = LockFileEx(id->h, LOCKFILE_FAIL_IMMEDIATELY,
+ 0, N_LOCKBYTE, 0, &ovlp);
+ }else{
+ res = LockFile(id->h, FIRST_LOCKBYTE+lk, 0, 1, 0);
+ }
+ UnlockFile(id->h, FIRST_LOCKBYTE, 0, 1, 0);
+ }
+ if( res ){
+ id->locked = lk;
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ }
+ return rc;
+#endif
+#if OS_MAC
+ int rc;
+ if( id->locked>0 || id->refNumRF == -1 ){
+ rc = SQLITE_OK;
+ }else{
+ int lk;
+ OSErr res;
+ int cnt = 5;
+ ParamBlockRec params;
+ sqliteRandomness(sizeof(lk), &lk);
+ lk = (lk & 0x7fffffff)%N_LOCKBYTE + 1;
+ memset(&params, 0, sizeof(params));
+ params.ioParam.ioRefNum = id->refNumRF;
+ params.ioParam.ioPosMode = fsFromStart;
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE;
+ params.ioParam.ioReqCount = 1;
+ while( cnt-->0 && (res = PBLockRangeSync(&params))!=noErr ){
+ UInt32 finalTicks;
+ Delay(1, &finalTicks); /* 1/60 sec */
+ }
+ if( res == noErr ){
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE+1;
+ params.ioParam.ioReqCount = N_LOCKBYTE;
+ PBUnlockRangeSync(&params);
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE+lk;
+ params.ioParam.ioReqCount = 1;
+ res = PBLockRangeSync(&params);
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE;
+ params.ioParam.ioReqCount = 1;
+ PBUnlockRangeSync(&params);
+ }
+ if( res == noErr ){
+ id->locked = lk;
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ }
+ return rc;
+#endif
+}
+
+/*
+** Change the lock status to be an exclusive or write lock. Return
+** SQLITE_OK on success and SQLITE_BUSY on a failure. If this
+** library was compiled with large file support (LFS) but LFS is not
+** available on the host, then an SQLITE_NOLFS is returned.
+*/
+int sqliteOsWriteLock(OsFile *id){
+#if OS_UNIX
+ int rc;
+ sqliteOsEnterMutex();
+ if( id->pLock->cnt==0 || (id->pLock->cnt==1 && id->locked==1) ){
+ struct flock lock;
+ int s;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = lock.l_len = 0L;
+ s = fcntl(id->fd, F_SETLK, &lock);
+ if( s!=0 ){
+ rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY;
+ }else{
+ rc = SQLITE_OK;
+ if( !id->locked ){
+ id->pOpen->nLock++;
+ id->locked = 1;
+ }
+ id->pLock->cnt = -1;
+ }
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ sqliteOsLeaveMutex();
+ return rc;
+#endif
+#if OS_WIN
+ int rc;
+ if( id->locked<0 ){
+ rc = SQLITE_OK;
+ }else{
+ int res;
+ int cnt = 100;
+ while( cnt-->0 && (res = LockFile(id->h, FIRST_LOCKBYTE, 0, 1, 0))==0 ){
+ Sleep(1);
+ }
+ if( res ){
+ if( id->locked>0 ){
+ if( isNT() ){
+ UnlockFile(id->h, FIRST_LOCKBYTE+1, 0, N_LOCKBYTE, 0);
+ }else{
+ res = UnlockFile(id->h, FIRST_LOCKBYTE + id->locked, 0, 1, 0);
+ }
+ }
+ if( res ){
+ res = LockFile(id->h, FIRST_LOCKBYTE+1, 0, N_LOCKBYTE, 0);
+ }else{
+ res = 0;
+ }
+ UnlockFile(id->h, FIRST_LOCKBYTE, 0, 1, 0);
+ }
+ if( res ){
+ id->locked = -1;
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ }
+ return rc;
+#endif
+#if OS_MAC
+ int rc;
+ if( id->locked<0 || id->refNumRF == -1 ){
+ rc = SQLITE_OK;
+ }else{
+ OSErr res;
+ int cnt = 5;
+ ParamBlockRec params;
+ memset(&params, 0, sizeof(params));
+ params.ioParam.ioRefNum = id->refNumRF;
+ params.ioParam.ioPosMode = fsFromStart;
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE;
+ params.ioParam.ioReqCount = 1;
+ while( cnt-->0 && (res = PBLockRangeSync(&params))!=noErr ){
+ UInt32 finalTicks;
+ Delay(1, &finalTicks); /* 1/60 sec */
+ }
+ if( res == noErr ){
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE + id->locked;
+ params.ioParam.ioReqCount = 1;
+ if( id->locked==0
+ || PBUnlockRangeSync(&params)==noErr ){
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE+1;
+ params.ioParam.ioReqCount = N_LOCKBYTE;
+ res = PBLockRangeSync(&params);
+ }else{
+ res = afpRangeNotLocked;
+ }
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE;
+ params.ioParam.ioReqCount = 1;
+ PBUnlockRangeSync(&params);
+ }
+ if( res == noErr ){
+ id->locked = -1;
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ }
+ return rc;
+#endif
+}
+
+/*
+** Unlock the given file descriptor. If the file descriptor was
+** not previously locked, then this routine is a no-op. If this
+** library was compiled with large file support (LFS) but LFS is not
+** available on the host, then an SQLITE_NOLFS is returned.
+*/
+int sqliteOsUnlock(OsFile *id){
+#if OS_UNIX
+ int rc;
+ if( !id->locked ) return SQLITE_OK;
+ sqliteOsEnterMutex();
+ assert( id->pLock->cnt!=0 );
+ if( id->pLock->cnt>1 ){
+ id->pLock->cnt--;
+ rc = SQLITE_OK;
+ }else{
+ struct flock lock;
+ int s;
+ lock.l_type = F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = lock.l_len = 0L;
+ s = fcntl(id->fd, F_SETLK, &lock);
+ if( s!=0 ){
+ rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY;
+ }else{
+ rc = SQLITE_OK;
+ id->pLock->cnt = 0;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ /* Decrement the count of locks against this same file. When the
+ ** count reaches zero, close any other file descriptors whose close
+ ** was deferred because of outstanding locks.
+ */
+ struct openCnt *pOpen = id->pOpen;
+ pOpen->nLock--;
+ assert( pOpen->nLock>=0 );
+ if( pOpen->nLock==0 && pOpen->nPending>0 ){
+ int i;
+ for(i=0; i<pOpen->nPending; i++){
+ close(pOpen->aPending[i]);
+ }
+ sqliteFree(pOpen->aPending);
+ pOpen->nPending = 0;
+ pOpen->aPending = 0;
+ }
+ }
+ sqliteOsLeaveMutex();
+ id->locked = 0;
+ return rc;
+#endif
+#if OS_WIN
+ int rc;
+ if( id->locked==0 ){
+ rc = SQLITE_OK;
+ }else if( isNT() || id->locked<0 ){
+ UnlockFile(id->h, FIRST_LOCKBYTE+1, 0, N_LOCKBYTE, 0);
+ rc = SQLITE_OK;
+ id->locked = 0;
+ }else{
+ UnlockFile(id->h, FIRST_LOCKBYTE+id->locked, 0, 1, 0);
+ rc = SQLITE_OK;
+ id->locked = 0;
+ }
+ return rc;
+#endif
+#if OS_MAC
+ int rc;
+ ParamBlockRec params;
+ memset(&params, 0, sizeof(params));
+ params.ioParam.ioRefNum = id->refNumRF;
+ params.ioParam.ioPosMode = fsFromStart;
+ if( id->locked==0 || id->refNumRF == -1 ){
+ rc = SQLITE_OK;
+ }else if( id->locked<0 ){
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE+1;
+ params.ioParam.ioReqCount = N_LOCKBYTE;
+ PBUnlockRangeSync(&params);
+ rc = SQLITE_OK;
+ id->locked = 0;
+ }else{
+ params.ioParam.ioPosOffset = FIRST_LOCKBYTE+id->locked;
+ params.ioParam.ioReqCount = 1;
+ PBUnlockRangeSync(&params);
+ rc = SQLITE_OK;
+ id->locked = 0;
+ }
+ return rc;
+#endif
+}
+
+/*
+** Get information to seed the random number generator. The seed
+** is written into the buffer zBuf[256]. The calling function must
+** supply a sufficiently large buffer.
+*/
+int sqliteOsRandomSeed(char *zBuf){
+ /* We have to initialize zBuf to prevent valgrind from reporting
+ ** errors. The reports issued by valgrind are incorrect - we would
+ ** prefer that the randomness be increased by making use of the
+ ** uninitialized space in zBuf - but valgrind errors tend to worry
+ ** some users. Rather than argue, it seems easier just to initialize
+ ** the whole array and silence valgrind, even if that means less randomness
+ ** in the random seed.
+ **
+ ** When testing, initializing zBuf[] to zero is all we do. That means
+ ** that we always use the same random number sequence.* This makes the
+ ** tests repeatable.
+ */
+ memset(zBuf, 0, 256);
+#if OS_UNIX && !defined(SQLITE_TEST)
+ {
+ int pid;
+ time((time_t*)zBuf);
+ pid = getpid();
+ memcpy(&zBuf[sizeof(time_t)], &pid, sizeof(pid));
+ }
+#endif
+#if OS_WIN && !defined(SQLITE_TEST)
+ GetSystemTime((LPSYSTEMTIME)zBuf);
+#endif
+#if OS_MAC
+ {
+ int pid;
+ Microseconds((UnsignedWide*)zBuf);
+ pid = getpid();
+ memcpy(&zBuf[sizeof(UnsignedWide)], &pid, sizeof(pid));
+ }
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** Sleep for a little while. Return the amount of time slept.
+*/
+int sqliteOsSleep(int ms){
+#if OS_UNIX
+#if defined(HAVE_USLEEP) && HAVE_USLEEP
+ usleep(ms*1000);
+ return ms;
+#else
+ sleep((ms+999)/1000);
+ return 1000*((ms+999)/1000);
+#endif
+#endif
+#if OS_WIN
+ Sleep(ms);
+ return ms;
+#endif
+#if OS_MAC
+ UInt32 finalTicks;
+ UInt32 ticks = (((UInt32)ms+16)*3)/50; /* 1/60 sec per tick */
+ Delay(ticks, &finalTicks);
+ return (int)((ticks*50)/3);
+#endif
+}
+
+/*
+** Static variables used for thread synchronization
+*/
+static int inMutex = 0;
+#ifdef SQLITE_UNIX_THREADS
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+#ifdef SQLITE_W32_THREADS
+ static CRITICAL_SECTION cs;
+#endif
+#ifdef SQLITE_MACOS_MULTITASKING
+ static MPCriticalRegionID criticalRegion;
+#endif
+
+/*
+** The following pair of routine implement mutual exclusion for
+** multi-threaded processes. Only a single thread is allowed to
+** executed code that is surrounded by EnterMutex() and LeaveMutex().
+**
+** SQLite uses only a single Mutex. There is not much critical
+** code and what little there is executes quickly and without blocking.
+*/
+void sqliteOsEnterMutex(){
+#ifdef SQLITE_UNIX_THREADS
+ pthread_mutex_lock(&mutex);
+#endif
+#ifdef SQLITE_W32_THREADS
+ static int isInit = 0;
+ while( !isInit ){
+ static long lock = 0;
+ if( InterlockedIncrement(&lock)==1 ){
+ InitializeCriticalSection(&cs);
+ isInit = 1;
+ }else{
+ Sleep(1);
+ }
+ }
+ EnterCriticalSection(&cs);
+#endif
+#ifdef SQLITE_MACOS_MULTITASKING
+ static volatile int notInit = 1;
+ if( notInit ){
+ if( notInit == 2 ) /* as close as you can get to thread safe init */
+ MPYield();
+ else{
+ notInit = 2;
+ MPCreateCriticalRegion(&criticalRegion);
+ notInit = 0;
+ }
+ }
+ MPEnterCriticalRegion(criticalRegion, kDurationForever);
+#endif
+ assert( !inMutex );
+ inMutex = 1;
+}
+void sqliteOsLeaveMutex(){
+ assert( inMutex );
+ inMutex = 0;
+#ifdef SQLITE_UNIX_THREADS
+ pthread_mutex_unlock(&mutex);
+#endif
+#ifdef SQLITE_W32_THREADS
+ LeaveCriticalSection(&cs);
+#endif
+#ifdef SQLITE_MACOS_MULTITASKING
+ MPExitCriticalRegion(criticalRegion);
+#endif
+}
+
+/*
+** Turn a relative pathname into a full pathname. Return a pointer
+** to the full pathname stored in space obtained from sqliteMalloc().
+** The calling function is responsible for freeing this space once it
+** is no longer needed.
+*/
+char *sqliteOsFullPathname(const char *zRelative){
+#if OS_UNIX
+ char *zFull = 0;
+ if( zRelative[0]=='/' ){
+ sqliteSetString(&zFull, zRelative, (char*)0);
+ }else{
+ char zBuf[5000];
+ sqliteSetString(&zFull, getcwd(zBuf, sizeof(zBuf)), "/", zRelative,
+ (char*)0);
+ }
+ return zFull;
+#endif
+#if OS_WIN
+ char *zNotUsed;
+ char *zFull;
+ int nByte;
+ nByte = GetFullPathName(zRelative, 0, 0, &zNotUsed) + 1;
+ zFull = sqliteMalloc( nByte );
+ if( zFull==0 ) return 0;
+ GetFullPathName(zRelative, nByte, zFull, &zNotUsed);
+ return zFull;
+#endif
+#if OS_MAC
+ char *zFull = 0;
+ if( zRelative[0]==':' ){
+ char zBuf[_MAX_PATH+1];
+ sqliteSetString(&zFull, getcwd(zBuf, sizeof(zBuf)), &(zRelative[1]),
+ (char*)0);
+ }else{
+ if( strchr(zRelative, ':') ){
+ sqliteSetString(&zFull, zRelative, (char*)0);
+ }else{
+ char zBuf[_MAX_PATH+1];
+ sqliteSetString(&zFull, getcwd(zBuf, sizeof(zBuf)), zRelative, (char*)0);
+ }
+ }
+ return zFull;
+#endif
+}
+
+/*
+** The following variable, if set to a non-zero value, becomes the result
+** returned from sqliteOsCurrentTime(). This is used for testing.
+*/
+#ifdef SQLITE_TEST
+int sqlite_current_time = 0;
+#endif
+
+/*
+** Find the current time (in Universal Coordinated Time). Write the
+** current time and date as a Julian Day number into *prNow and
+** return 0. Return 1 if the time and date cannot be found.
+*/
+int sqliteOsCurrentTime(double *prNow){
+#if OS_UNIX
+ time_t t;
+ time(&t);
+ *prNow = t/86400.0 + 2440587.5;
+#endif
+#if OS_WIN
+ FILETIME ft;
+ /* FILETIME structure is a 64-bit value representing the number of
+ 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5).
+ */
+ double now;
+ GetSystemTimeAsFileTime( &ft );
+ now = ((double)ft.dwHighDateTime) * 4294967296.0;
+ *prNow = (now + ft.dwLowDateTime)/864000000000.0 + 2305813.5;
+#endif
+#ifdef SQLITE_TEST
+ if( sqlite_current_time ){
+ *prNow = sqlite_current_time/86400.0 + 2440587.5;
+ }
+#endif
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/os.h b/usr/src/cmd/svc/configd/sqlite/src/os.h
new file mode 100644
index 0000000000..e50e3d713b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/os.h
@@ -0,0 +1,194 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This header file (together with is companion C source-code file
+** "os.c") attempt to abstract the underlying operating system so that
+** the SQLite library will work on both POSIX and windows systems.
+*/
+#ifndef _SQLITE_OS_H_
+#define _SQLITE_OS_H_
+
+/*
+** Helpful hint: To get this to compile on HP/UX, add -D_INCLUDE_POSIX_SOURCE
+** to the compiler command line.
+*/
+
+/*
+** These #defines should enable >2GB file support on Posix if the
+** underlying operating system supports it. If the OS lacks
+** large file support, or if the OS is windows, these should be no-ops.
+**
+** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch
+** on the compiler command line. This is necessary if you are compiling
+** on a recent machine (ex: RedHat 7.2) but you want your code to work
+** on an older machine (ex: RedHat 6.0). If you compile on RedHat 7.2
+** without this option, LFS is enable. But LFS does not exist in the kernel
+** in RedHat 6.0, so the code won't work. Hence, for maximum binary
+** portability you should omit LFS.
+**
+** Similar is true for MacOS. LFS is only supported on MacOS 9 and later.
+*/
+#ifndef SQLITE_DISABLE_LFS
+# define _LARGE_FILE 1
+# ifndef _FILE_OFFSET_BITS
+# define _FILE_OFFSET_BITS 64
+# endif
+# define _LARGEFILE_SOURCE 1
+#endif
+
+/*
+** Temporary files are named starting with this prefix followed by 16 random
+** alphanumeric characters, and no file extension. They are stored in the
+** OS's standard temporary file directory, and are deleted prior to exit.
+** If sqlite is being embedded in another program, you may wish to change the
+** prefix to reflect your program's name, so that if your program exits
+** prematurely, old temporary files can be easily identified. This can be done
+** using -DTEMP_FILE_PREFIX=myprefix_ on the compiler command line.
+*/
+#ifndef TEMP_FILE_PREFIX
+# define TEMP_FILE_PREFIX "sqlite_"
+#endif
+
+/*
+** Figure out if we are dealing with Unix, Windows or MacOS.
+**
+** N.B. MacOS means Mac Classic (or Carbon). Treat Darwin (OS X) as Unix.
+** The MacOS build is designed to use CodeWarrior (tested with v8)
+*/
+#ifndef OS_UNIX
+# ifndef OS_WIN
+# ifndef OS_MAC
+# if defined(__MACOS__)
+# define OS_MAC 1
+# define OS_WIN 0
+# define OS_UNIX 0
+# elif defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__)
+# define OS_MAC 0
+# define OS_WIN 1
+# define OS_UNIX 0
+# else
+# define OS_MAC 0
+# define OS_WIN 0
+# define OS_UNIX 1
+# endif
+# else
+# define OS_WIN 0
+# define OS_UNIX 0
+# endif
+# else
+# define OS_MAC 0
+# define OS_UNIX 0
+# endif
+#else
+# define OS_MAC 0
+# ifndef OS_WIN
+# define OS_WIN 0
+# endif
+#endif
+
+/*
+** A handle for an open file is stored in an OsFile object.
+*/
+#if OS_UNIX
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <fcntl.h>
+# include <unistd.h>
+ typedef struct OsFile OsFile;
+ struct OsFile {
+ struct openCnt *pOpen; /* Info about all open fd's on this inode */
+ struct lockInfo *pLock; /* Info about locks on this inode */
+ int fd; /* The file descriptor */
+ int locked; /* True if this instance holds the lock */
+ int dirfd; /* File descriptor for the directory */
+ };
+# define SQLITE_TEMPNAME_SIZE 200
+# if defined(HAVE_USLEEP) && HAVE_USLEEP
+# define SQLITE_MIN_SLEEP_MS 1
+# else
+# define SQLITE_MIN_SLEEP_MS 1000
+# endif
+#endif
+
+#if OS_WIN
+#include <windows.h>
+#include <winbase.h>
+ typedef struct OsFile OsFile;
+ struct OsFile {
+ HANDLE h; /* Handle for accessing the file */
+ int locked; /* 0: unlocked, <0: write lock, >0: read lock */
+ };
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+ typedef __int64 off_t;
+# else
+# if !defined(_CYGWIN_TYPES_H)
+ typedef long long off_t;
+# if defined(__MINGW32__)
+# define _OFF_T_
+# endif
+# endif
+# endif
+# define SQLITE_TEMPNAME_SIZE (MAX_PATH+50)
+# define SQLITE_MIN_SLEEP_MS 1
+#endif
+
+#if OS_MAC
+# include <unistd.h>
+# include <Files.h>
+ typedef struct OsFile OsFile;
+ struct OsFile {
+ SInt16 refNum; /* Data fork/file reference number */
+ SInt16 refNumRF; /* Resource fork reference number (for locking) */
+ int locked; /* 0: unlocked, <0: write lock, >0: read lock */
+ int delOnClose; /* True if file is to be deleted on close */
+ char *pathToDel; /* Name of file to delete on close */
+ };
+# ifdef _LARGE_FILE
+ typedef SInt64 off_t;
+# else
+ typedef SInt32 off_t;
+# endif
+# define SQLITE_TEMPNAME_SIZE _MAX_PATH
+# define SQLITE_MIN_SLEEP_MS 17
+#endif
+
+int sqliteOsDelete(const char*);
+int sqliteOsFileExists(const char*);
+int sqliteOsFileRename(const char*, const char*);
+int sqliteOsOpenReadWrite(const char*, OsFile*, int*);
+int sqliteOsOpenExclusive(const char*, OsFile*, int);
+int sqliteOsOpenReadOnly(const char*, OsFile*);
+int sqliteOsOpenDirectory(const char*, OsFile*);
+int sqliteOsTempFileName(char*);
+int sqliteOsClose(OsFile*);
+int sqliteOsRead(OsFile*, void*, int amt);
+int sqliteOsWrite(OsFile*, const void*, int amt);
+int sqliteOsSeek(OsFile*, off_t offset);
+int sqliteOsSync(OsFile*);
+int sqliteOsTruncate(OsFile*, off_t size);
+int sqliteOsFileSize(OsFile*, off_t *pSize);
+int sqliteOsReadLock(OsFile*);
+int sqliteOsWriteLock(OsFile*);
+int sqliteOsUnlock(OsFile*);
+int sqliteOsRandomSeed(char*);
+int sqliteOsSleep(int ms);
+int sqliteOsCurrentTime(double*);
+void sqliteOsEnterMutex(void);
+void sqliteOsLeaveMutex(void);
+char *sqliteOsFullPathname(const char*);
+
+
+
+#endif /* _SQLITE_OS_H_ */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/pager.c b/usr/src/cmd/svc/configd/sqlite/src/pager.c
new file mode 100644
index 0000000000..276dd0ae42
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/pager.c
@@ -0,0 +1,2229 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the implementation of the page cache subsystem or "pager".
+**
+** The pager is used to access a database disk file. It implements
+** atomic commit and rollback through the use of a journal file that
+** is separate from the database file. The pager also implements file
+** locking to prevent two processes from writing the same database
+** file simultaneously, or one process from reading the database while
+** another is writing.
+**
+** @(#) $Id: pager.c,v 1.101 2004/02/25 02:20:41 drh Exp $
+*/
+#include "os.h" /* Must be first to enable large file support */
+#include "sqliteInt.h"
+#include "pager.h"
+#include <assert.h>
+#include <string.h>
+
+/*
+** Macros for troubleshooting. Normally turned off
+*/
+#if 0
+static Pager *mainPager = 0;
+#define SET_PAGER(X) if( mainPager==0 ) mainPager = (X)
+#define CLR_PAGER(X) if( mainPager==(X) ) mainPager = 0
+#define TRACE1(X) if( pPager==mainPager ) fprintf(stderr,X)
+#define TRACE2(X,Y) if( pPager==mainPager ) fprintf(stderr,X,Y)
+#define TRACE3(X,Y,Z) if( pPager==mainPager ) fprintf(stderr,X,Y,Z)
+#else
+#define SET_PAGER(X)
+#define CLR_PAGER(X)
+#define TRACE1(X)
+#define TRACE2(X,Y)
+#define TRACE3(X,Y,Z)
+#endif
+
+
+/*
+** The page cache as a whole is always in one of the following
+** states:
+**
+** SQLITE_UNLOCK The page cache is not currently reading or
+** writing the database file. There is no
+** data held in memory. This is the initial
+** state.
+**
+** SQLITE_READLOCK The page cache is reading the database.
+** Writing is not permitted. There can be
+** multiple readers accessing the same database
+** file at the same time.
+**
+** SQLITE_WRITELOCK The page cache is writing the database.
+** Access is exclusive. No other processes or
+** threads can be reading or writing while one
+** process is writing.
+**
+** The page cache comes up in SQLITE_UNLOCK. The first time a
+** sqlite_page_get() occurs, the state transitions to SQLITE_READLOCK.
+** After all pages have been released using sqlite_page_unref(),
+** the state transitions back to SQLITE_UNLOCK. The first time
+** that sqlite_page_write() is called, the state transitions to
+** SQLITE_WRITELOCK. (Note that sqlite_page_write() can only be
+** called on an outstanding page which means that the pager must
+** be in SQLITE_READLOCK before it transitions to SQLITE_WRITELOCK.)
+** The sqlite_page_rollback() and sqlite_page_commit() functions
+** transition the state from SQLITE_WRITELOCK back to SQLITE_READLOCK.
+*/
+#define SQLITE_UNLOCK 0
+#define SQLITE_READLOCK 1
+#define SQLITE_WRITELOCK 2
+
+
+/*
+** Each in-memory image of a page begins with the following header.
+** This header is only visible to this pager module. The client
+** code that calls pager sees only the data that follows the header.
+**
+** Client code should call sqlitepager_write() on a page prior to making
+** any modifications to that page. The first time sqlitepager_write()
+** is called, the original page contents are written into the rollback
+** journal and PgHdr.inJournal and PgHdr.needSync are set. Later, once
+** the journal page has made it onto the disk surface, PgHdr.needSync
+** is cleared. The modified page cannot be written back into the original
+** database file until the journal pages has been synced to disk and the
+** PgHdr.needSync has been cleared.
+**
+** The PgHdr.dirty flag is set when sqlitepager_write() is called and
+** is cleared again when the page content is written back to the original
+** database file.
+*/
+typedef struct PgHdr PgHdr;
+struct PgHdr {
+ Pager *pPager; /* The pager to which this page belongs */
+ Pgno pgno; /* The page number for this page */
+ PgHdr *pNextHash, *pPrevHash; /* Hash collision chain for PgHdr.pgno */
+ int nRef; /* Number of users of this page */
+ PgHdr *pNextFree, *pPrevFree; /* Freelist of pages where nRef==0 */
+ PgHdr *pNextAll, *pPrevAll; /* A list of all pages */
+ PgHdr *pNextCkpt, *pPrevCkpt; /* List of pages in the checkpoint journal */
+ u8 inJournal; /* TRUE if has been written to journal */
+ u8 inCkpt; /* TRUE if written to the checkpoint journal */
+ u8 dirty; /* TRUE if we need to write back changes */
+ u8 needSync; /* Sync journal before writing this page */
+ u8 alwaysRollback; /* Disable dont_rollback() for this page */
+ PgHdr *pDirty; /* Dirty pages sorted by PgHdr.pgno */
+ /* SQLITE_PAGE_SIZE bytes of page data follow this header */
+ /* Pager.nExtra bytes of local data follow the page data */
+};
+
+
+/*
+** A macro used for invoking the codec if there is one
+*/
+#ifdef SQLITE_HAS_CODEC
+# define CODEC(P,D,N,X) if( P->xCodec ){ P->xCodec(P->pCodecArg,D,N,X); }
+#else
+# define CODEC(P,D,N,X)
+#endif
+
+/*
+** Convert a pointer to a PgHdr into a pointer to its data
+** and back again.
+*/
+#define PGHDR_TO_DATA(P) ((void*)(&(P)[1]))
+#define DATA_TO_PGHDR(D) (&((PgHdr*)(D))[-1])
+#define PGHDR_TO_EXTRA(P) ((void*)&((char*)(&(P)[1]))[SQLITE_PAGE_SIZE])
+
+/*
+** How big to make the hash table used for locating in-memory pages
+** by page number.
+*/
+#define N_PG_HASH 2048
+
+/*
+** Hash a page number
+*/
+#define pager_hash(PN) ((PN)&(N_PG_HASH-1))
+
+/*
+** A open page cache is an instance of the following structure.
+*/
+struct Pager {
+ char *zFilename; /* Name of the database file */
+ char *zJournal; /* Name of the journal file */
+ char *zDirectory; /* Directory hold database and journal files */
+ OsFile fd, jfd; /* File descriptors for database and journal */
+ OsFile cpfd; /* File descriptor for the checkpoint journal */
+ int dbSize; /* Number of pages in the file */
+ int origDbSize; /* dbSize before the current change */
+ int ckptSize; /* Size of database (in pages) at ckpt_begin() */
+ off_t ckptJSize; /* Size of journal at ckpt_begin() */
+ int nRec; /* Number of pages written to the journal */
+ u32 cksumInit; /* Quasi-random value added to every checksum */
+ int ckptNRec; /* Number of records in the checkpoint journal */
+ int nExtra; /* Add this many bytes to each in-memory page */
+ void (*xDestructor)(void*); /* Call this routine when freeing pages */
+ int nPage; /* Total number of in-memory pages */
+ int nRef; /* Number of in-memory pages with PgHdr.nRef>0 */
+ int mxPage; /* Maximum number of pages to hold in cache */
+ int nHit, nMiss, nOvfl; /* Cache hits, missing, and LRU overflows */
+ void (*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */
+ void *pCodecArg; /* First argument to xCodec() */
+ u8 journalOpen; /* True if journal file descriptors is valid */
+ u8 journalStarted; /* True if header of journal is synced */
+ u8 useJournal; /* Use a rollback journal on this file */
+ u8 ckptOpen; /* True if the checkpoint journal is open */
+ u8 ckptInUse; /* True we are in a checkpoint */
+ u8 ckptAutoopen; /* Open ckpt journal when main journal is opened*/
+ u8 noSync; /* Do not sync the journal if true */
+ u8 fullSync; /* Do extra syncs of the journal for robustness */
+ u8 state; /* SQLITE_UNLOCK, _READLOCK or _WRITELOCK */
+ u8 errMask; /* One of several kinds of errors */
+ u8 tempFile; /* zFilename is a temporary file */
+ u8 readOnly; /* True for a read-only database */
+ u8 needSync; /* True if an fsync() is needed on the journal */
+ u8 dirtyFile; /* True if database file has changed in any way */
+ u8 alwaysRollback; /* Disable dont_rollback() for all pages */
+ u8 *aInJournal; /* One bit for each page in the database file */
+ u8 *aInCkpt; /* One bit for each page in the database */
+ PgHdr *pFirst, *pLast; /* List of free pages */
+ PgHdr *pFirstSynced; /* First free page with PgHdr.needSync==0 */
+ PgHdr *pAll; /* List of all pages */
+ PgHdr *pCkpt; /* List of pages in the checkpoint journal */
+ PgHdr *aHash[N_PG_HASH]; /* Hash table to map page number of PgHdr */
+};
+
+/*
+** These are bits that can be set in Pager.errMask.
+*/
+#define PAGER_ERR_FULL 0x01 /* a write() failed */
+#define PAGER_ERR_MEM 0x02 /* malloc() failed */
+#define PAGER_ERR_LOCK 0x04 /* error in the locking protocol */
+#define PAGER_ERR_CORRUPT 0x08 /* database or journal corruption */
+#define PAGER_ERR_DISK 0x10 /* general disk I/O error - bad hard drive? */
+
+/*
+** The journal file contains page records in the following
+** format.
+**
+** Actually, this structure is the complete page record for pager
+** formats less than 3. Beginning with format 3, this record is surrounded
+** by two checksums.
+*/
+typedef struct PageRecord PageRecord;
+struct PageRecord {
+ Pgno pgno; /* The page number */
+ char aData[SQLITE_PAGE_SIZE]; /* Original data for page pgno */
+};
+
+/*
+** Journal files begin with the following magic string. The data
+** was obtained from /dev/random. It is used only as a sanity check.
+**
+** There are three journal formats (so far). The 1st journal format writes
+** 32-bit integers in the byte-order of the host machine. New
+** formats writes integers as big-endian. All new journals use the
+** new format, but we have to be able to read an older journal in order
+** to rollback journals created by older versions of the library.
+**
+** The 3rd journal format (added for 2.8.0) adds additional sanity
+** checking information to the journal. If the power fails while the
+** journal is being written, semi-random garbage data might appear in
+** the journal file after power is restored. If an attempt is then made
+** to roll the journal back, the database could be corrupted. The additional
+** sanity checking data is an attempt to discover the garbage in the
+** journal and ignore it.
+**
+** The sanity checking information for the 3rd journal format consists
+** of a 32-bit checksum on each page of data. The checksum covers both
+** the page number and the SQLITE_PAGE_SIZE bytes of data for the page.
+** This cksum is initialized to a 32-bit random value that appears in the
+** journal file right after the header. The random initializer is important,
+** because garbage data that appears at the end of a journal is likely
+** data that was once in other files that have now been deleted. If the
+** garbage data came from an obsolete journal file, the checksums might
+** be correct. But by initializing the checksum to random value which
+** is different for every journal, we minimize that risk.
+*/
+static const unsigned char aJournalMagic1[] = {
+ 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd4,
+};
+static const unsigned char aJournalMagic2[] = {
+ 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd5,
+};
+static const unsigned char aJournalMagic3[] = {
+ 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd6,
+};
+#define JOURNAL_FORMAT_1 1
+#define JOURNAL_FORMAT_2 2
+#define JOURNAL_FORMAT_3 3
+
+/*
+** The following integer determines what format to use when creating
+** new primary journal files. By default we always use format 3.
+** When testing, we can set this value to older journal formats in order to
+** make sure that newer versions of the library are able to rollback older
+** journal files.
+**
+** Note that checkpoint journals always use format 2 and omit the header.
+*/
+#ifdef SQLITE_TEST
+int journal_format = 3;
+#else
+# define journal_format 3
+#endif
+
+/*
+** The size of the header and of each page in the journal varies according
+** to which journal format is being used. The following macros figure out
+** the sizes based on format numbers.
+*/
+#define JOURNAL_HDR_SZ(X) \
+ (sizeof(aJournalMagic1) + sizeof(Pgno) + ((X)>=3)*2*sizeof(u32))
+#define JOURNAL_PG_SZ(X) \
+ (SQLITE_PAGE_SIZE + sizeof(Pgno) + ((X)>=3)*sizeof(u32))
+
+/*
+** Enable reference count tracking here:
+*/
+#ifdef SQLITE_TEST
+ int pager_refinfo_enable = 0;
+ static void pager_refinfo(PgHdr *p){
+ static int cnt = 0;
+ if( !pager_refinfo_enable ) return;
+ printf(
+ "REFCNT: %4d addr=0x%08x nRef=%d\n",
+ p->pgno, (int)PGHDR_TO_DATA(p), p->nRef
+ );
+ cnt++; /* Something to set a breakpoint on */
+ }
+# define REFINFO(X) pager_refinfo(X)
+#else
+# define REFINFO(X)
+#endif
+
+/*
+** Read a 32-bit integer from the given file descriptor. Store the integer
+** that is read in *pRes. Return SQLITE_OK if everything worked, or an
+** error code is something goes wrong.
+**
+** If the journal format is 2 or 3, read a big-endian integer. If the
+** journal format is 1, read an integer in the native byte-order of the
+** host machine.
+*/
+static int read32bits(int format, OsFile *fd, u32 *pRes){
+ u32 res;
+ int rc;
+ rc = sqliteOsRead(fd, &res, sizeof(res));
+ if( rc==SQLITE_OK && format>JOURNAL_FORMAT_1 ){
+ unsigned char ac[4];
+ memcpy(ac, &res, 4);
+ res = (ac[0]<<24) | (ac[1]<<16) | (ac[2]<<8) | ac[3];
+ }
+ *pRes = res;
+ return rc;
+}
+
+/*
+** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK
+** on success or an error code is something goes wrong.
+**
+** If the journal format is 2 or 3, write the integer as 4 big-endian
+** bytes. If the journal format is 1, write the integer in the native
+** byte order. In normal operation, only formats 2 and 3 are used.
+** Journal format 1 is only used for testing.
+*/
+static int write32bits(OsFile *fd, u32 val){
+ unsigned char ac[4];
+ if( journal_format<=1 ){
+ return sqliteOsWrite(fd, &val, 4);
+ }
+ ac[0] = (val>>24) & 0xff;
+ ac[1] = (val>>16) & 0xff;
+ ac[2] = (val>>8) & 0xff;
+ ac[3] = val & 0xff;
+ return sqliteOsWrite(fd, ac, 4);
+}
+
+/*
+** Write a 32-bit integer into a page header right before the
+** page data. This will overwrite the PgHdr.pDirty pointer.
+**
+** The integer is big-endian for formats 2 and 3 and native byte order
+** for journal format 1.
+*/
+static void store32bits(u32 val, PgHdr *p, int offset){
+ unsigned char *ac;
+ ac = &((unsigned char*)PGHDR_TO_DATA(p))[offset];
+ if( journal_format<=1 ){
+ memcpy(ac, &val, 4);
+ }else{
+ ac[0] = (val>>24) & 0xff;
+ ac[1] = (val>>16) & 0xff;
+ ac[2] = (val>>8) & 0xff;
+ ac[3] = val & 0xff;
+ }
+}
+
+
+/*
+** Convert the bits in the pPager->errMask into an approprate
+** return code.
+*/
+static int pager_errcode(Pager *pPager){
+ int rc = SQLITE_OK;
+ if( pPager->errMask & PAGER_ERR_LOCK ) rc = SQLITE_PROTOCOL;
+ if( pPager->errMask & PAGER_ERR_DISK ) rc = SQLITE_IOERR;
+ if( pPager->errMask & PAGER_ERR_FULL ) rc = SQLITE_FULL;
+ if( pPager->errMask & PAGER_ERR_MEM ) rc = SQLITE_NOMEM;
+ if( pPager->errMask & PAGER_ERR_CORRUPT ) rc = SQLITE_CORRUPT;
+ return rc;
+}
+
+/*
+** Add or remove a page from the list of all pages that are in the
+** checkpoint journal.
+**
+** The Pager keeps a separate list of pages that are currently in
+** the checkpoint journal. This helps the sqlitepager_ckpt_commit()
+** routine run MUCH faster for the common case where there are many
+** pages in memory but only a few are in the checkpoint journal.
+*/
+static void page_add_to_ckpt_list(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ if( pPg->inCkpt ) return;
+ assert( pPg->pPrevCkpt==0 && pPg->pNextCkpt==0 );
+ pPg->pPrevCkpt = 0;
+ if( pPager->pCkpt ){
+ pPager->pCkpt->pPrevCkpt = pPg;
+ }
+ pPg->pNextCkpt = pPager->pCkpt;
+ pPager->pCkpt = pPg;
+ pPg->inCkpt = 1;
+}
+static void page_remove_from_ckpt_list(PgHdr *pPg){
+ if( !pPg->inCkpt ) return;
+ if( pPg->pPrevCkpt ){
+ assert( pPg->pPrevCkpt->pNextCkpt==pPg );
+ pPg->pPrevCkpt->pNextCkpt = pPg->pNextCkpt;
+ }else{
+ assert( pPg->pPager->pCkpt==pPg );
+ pPg->pPager->pCkpt = pPg->pNextCkpt;
+ }
+ if( pPg->pNextCkpt ){
+ assert( pPg->pNextCkpt->pPrevCkpt==pPg );
+ pPg->pNextCkpt->pPrevCkpt = pPg->pPrevCkpt;
+ }
+ pPg->pNextCkpt = 0;
+ pPg->pPrevCkpt = 0;
+ pPg->inCkpt = 0;
+}
+
+/*
+** Find a page in the hash table given its page number. Return
+** a pointer to the page or NULL if not found.
+*/
+static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){
+ PgHdr *p = pPager->aHash[pager_hash(pgno)];
+ while( p && p->pgno!=pgno ){
+ p = p->pNextHash;
+ }
+ return p;
+}
+
+/*
+** Unlock the database and clear the in-memory cache. This routine
+** sets the state of the pager back to what it was when it was first
+** opened. Any outstanding pages are invalidated and subsequent attempts
+** to access those pages will likely result in a coredump.
+*/
+static void pager_reset(Pager *pPager){
+ PgHdr *pPg, *pNext;
+ for(pPg=pPager->pAll; pPg; pPg=pNext){
+ pNext = pPg->pNextAll;
+ sqliteFree(pPg);
+ }
+ pPager->pFirst = 0;
+ pPager->pFirstSynced = 0;
+ pPager->pLast = 0;
+ pPager->pAll = 0;
+ memset(pPager->aHash, 0, sizeof(pPager->aHash));
+ pPager->nPage = 0;
+ if( pPager->state>=SQLITE_WRITELOCK ){
+ sqlitepager_rollback(pPager);
+ }
+ sqliteOsUnlock(&pPager->fd);
+ pPager->state = SQLITE_UNLOCK;
+ pPager->dbSize = -1;
+ pPager->nRef = 0;
+ assert( pPager->journalOpen==0 );
+}
+
+/*
+** When this routine is called, the pager has the journal file open and
+** a write lock on the database. This routine releases the database
+** write lock and acquires a read lock in its place. The journal file
+** is deleted and closed.
+**
+** TODO: Consider keeping the journal file open for temporary databases.
+** This might give a performance improvement on windows where opening
+** a file is an expensive operation.
+*/
+static int pager_unwritelock(Pager *pPager){
+ int rc;
+ PgHdr *pPg;
+ if( pPager->state<SQLITE_WRITELOCK ) return SQLITE_OK;
+ sqlitepager_ckpt_commit(pPager);
+ if( pPager->ckptOpen ){
+ sqliteOsClose(&pPager->cpfd);
+ pPager->ckptOpen = 0;
+ }
+ if( pPager->journalOpen ){
+ sqliteOsClose(&pPager->jfd);
+ pPager->journalOpen = 0;
+ sqliteOsDelete(pPager->zJournal);
+ sqliteFree( pPager->aInJournal );
+ pPager->aInJournal = 0;
+ for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){
+ pPg->inJournal = 0;
+ pPg->dirty = 0;
+ pPg->needSync = 0;
+ }
+ }else{
+ assert( pPager->dirtyFile==0 || pPager->useJournal==0 );
+ }
+ rc = sqliteOsReadLock(&pPager->fd);
+ if( rc==SQLITE_OK ){
+ pPager->state = SQLITE_READLOCK;
+ }else{
+ /* This can only happen if a process does a BEGIN, then forks and the
+ ** child process does the COMMIT. Because of the semantics of unix
+ ** file locking, the unlock will fail.
+ */
+ pPager->state = SQLITE_UNLOCK;
+ }
+ return rc;
+}
+
+/*
+** Compute and return a checksum for the page of data.
+**
+** This is not a real checksum. It is really just the sum of the
+** random initial value and the page number. We considered do a checksum
+** of the database, but that was found to be too slow.
+*/
+static u32 pager_cksum(Pager *pPager, Pgno pgno, const char *aData){
+ u32 cksum = pPager->cksumInit + pgno;
+ return cksum;
+}
+
+/*
+** Read a single page from the journal file opened on file descriptor
+** jfd. Playback this one page.
+**
+** There are three different journal formats. The format parameter determines
+** which format is used by the journal that is played back.
+*/
+static int pager_playback_one_page(Pager *pPager, OsFile *jfd, int format){
+ int rc;
+ PgHdr *pPg; /* An existing page in the cache */
+ PageRecord pgRec;
+ u32 cksum;
+
+ rc = read32bits(format, jfd, &pgRec.pgno);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = sqliteOsRead(jfd, &pgRec.aData, sizeof(pgRec.aData));
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Sanity checking on the page. This is more important that I originally
+ ** thought. If a power failure occurs while the journal is being written,
+ ** it could cause invalid data to be written into the journal. We need to
+ ** detect this invalid data (with high probability) and ignore it.
+ */
+ if( pgRec.pgno==0 ){
+ return SQLITE_DONE;
+ }
+ if( pgRec.pgno>(unsigned)pPager->dbSize ){
+ return SQLITE_OK;
+ }
+ if( format>=JOURNAL_FORMAT_3 ){
+ rc = read32bits(format, jfd, &cksum);
+ if( rc ) return rc;
+ if( pager_cksum(pPager, pgRec.pgno, pgRec.aData)!=cksum ){
+ return SQLITE_DONE;
+ }
+ }
+
+ /* Playback the page. Update the in-memory copy of the page
+ ** at the same time, if there is one.
+ */
+ pPg = pager_lookup(pPager, pgRec.pgno);
+ TRACE2("PLAYBACK %d\n", pgRec.pgno);
+ sqliteOsSeek(&pPager->fd, (pgRec.pgno-1)*(off_t)SQLITE_PAGE_SIZE);
+ rc = sqliteOsWrite(&pPager->fd, pgRec.aData, SQLITE_PAGE_SIZE);
+ if( pPg ){
+ /* No page should ever be rolled back that is in use, except for page
+ ** 1 which is held in use in order to keep the lock on the database
+ ** active. However, such a page may be rolled back as a result of an
+ ** internal error resulting in an automatic call to
+ ** sqlitepager_rollback(), so we can't assert() it.
+ */
+ /* assert( pPg->nRef==0 || pPg->pgno==1 ) */
+ memcpy(PGHDR_TO_DATA(pPg), pgRec.aData, SQLITE_PAGE_SIZE);
+ memset(PGHDR_TO_EXTRA(pPg), 0, pPager->nExtra);
+ pPg->dirty = 0;
+ pPg->needSync = 0;
+ CODEC(pPager, PGHDR_TO_DATA(pPg), pPg->pgno, 3);
+ }
+ return rc;
+}
+
+/*
+** Playback the journal and thus restore the database file to
+** the state it was in before we started making changes.
+**
+** The journal file format is as follows:
+**
+** * 8 byte prefix. One of the aJournalMagic123 vectors defined
+** above. The format of the journal file is determined by which
+** of the three prefix vectors is seen.
+** * 4 byte big-endian integer which is the number of valid page records
+** in the journal. If this value is 0xffffffff, then compute the
+** number of page records from the journal size. This field appears
+** in format 3 only.
+** * 4 byte big-endian integer which is the initial value for the
+** sanity checksum. This field appears in format 3 only.
+** * 4 byte integer which is the number of pages to truncate the
+** database to during a rollback.
+** * Zero or more pages instances, each as follows:
+** + 4 byte page number.
+** + SQLITE_PAGE_SIZE bytes of data.
+** + 4 byte checksum (format 3 only)
+**
+** When we speak of the journal header, we mean the first 4 bullets above.
+** Each entry in the journal is an instance of the 5th bullet. Note that
+** bullets 2 and 3 only appear in format-3 journals.
+**
+** Call the value from the second bullet "nRec". nRec is the number of
+** valid page entries in the journal. In most cases, you can compute the
+** value of nRec from the size of the journal file. But if a power
+** failure occurred while the journal was being written, it could be the
+** case that the size of the journal file had already been increased but
+** the extra entries had not yet made it safely to disk. In such a case,
+** the value of nRec computed from the file size would be too large. For
+** that reason, we always use the nRec value in the header.
+**
+** If the nRec value is 0xffffffff it means that nRec should be computed
+** from the file size. This value is used when the user selects the
+** no-sync option for the journal. A power failure could lead to corruption
+** in this case. But for things like temporary table (which will be
+** deleted when the power is restored) we don't care.
+**
+** Journal formats 1 and 2 do not have an nRec value in the header so we
+** have to compute nRec from the file size. This has risks (as described
+** above) which is why all persistent tables have been changed to use
+** format 3.
+**
+** If the file opened as the journal file is not a well-formed
+** journal file then the database will likely already be
+** corrupted, so the PAGER_ERR_CORRUPT bit is set in pPager->errMask
+** and SQLITE_CORRUPT is returned. If it all works, then this routine
+** returns SQLITE_OK.
+*/
+static int pager_playback(Pager *pPager, int useJournalSize){
+ off_t szJ; /* Size of the journal file in bytes */
+ int nRec; /* Number of Records in the journal */
+ int i; /* Loop counter */
+ Pgno mxPg = 0; /* Size of the original file in pages */
+ int format; /* Format of the journal file. */
+ unsigned char aMagic[sizeof(aJournalMagic1)];
+ int rc;
+
+ /* Figure out how many records are in the journal. Abort early if
+ ** the journal is empty.
+ */
+ assert( pPager->journalOpen );
+ sqliteOsSeek(&pPager->jfd, 0);
+ rc = sqliteOsFileSize(&pPager->jfd, &szJ);
+ if( rc!=SQLITE_OK ){
+ goto end_playback;
+ }
+
+ /* If the journal file is too small to contain a complete header,
+ ** it must mean that the process that created the journal was just
+ ** beginning to write the journal file when it died. In that case,
+ ** the database file should have still been completely unchanged.
+ ** Nothing needs to be rolled back. We can safely ignore this journal.
+ */
+ if( szJ < sizeof(aMagic)+sizeof(Pgno) ){
+ goto end_playback;
+ }
+
+ /* Read the beginning of the journal and truncate the
+ ** database file back to its original size.
+ */
+ rc = sqliteOsRead(&pPager->jfd, aMagic, sizeof(aMagic));
+ if( rc!=SQLITE_OK ){
+ rc = SQLITE_PROTOCOL;
+ goto end_playback;
+ }
+ if( memcmp(aMagic, aJournalMagic3, sizeof(aMagic))==0 ){
+ format = JOURNAL_FORMAT_3;
+ }else if( memcmp(aMagic, aJournalMagic2, sizeof(aMagic))==0 ){
+ format = JOURNAL_FORMAT_2;
+ }else if( memcmp(aMagic, aJournalMagic1, sizeof(aMagic))==0 ){
+ format = JOURNAL_FORMAT_1;
+ }else{
+ rc = SQLITE_PROTOCOL;
+ goto end_playback;
+ }
+ if( format>=JOURNAL_FORMAT_3 ){
+ if( szJ < sizeof(aMagic) + 3*sizeof(u32) ){
+ /* Ignore the journal if it is too small to contain a complete
+ ** header. We already did this test once above, but at the prior
+ ** test, we did not know the journal format and so we had to assume
+ ** the smallest possible header. Now we know the header is bigger
+ ** than the minimum so we test again.
+ */
+ goto end_playback;
+ }
+ rc = read32bits(format, &pPager->jfd, (u32*)&nRec);
+ if( rc ) goto end_playback;
+ rc = read32bits(format, &pPager->jfd, &pPager->cksumInit);
+ if( rc ) goto end_playback;
+ if( nRec==0xffffffff || useJournalSize ){
+ nRec = (szJ - JOURNAL_HDR_SZ(3))/JOURNAL_PG_SZ(3);
+ }
+ }else{
+ nRec = (szJ - JOURNAL_HDR_SZ(2))/JOURNAL_PG_SZ(2);
+ assert( nRec*JOURNAL_PG_SZ(2)+JOURNAL_HDR_SZ(2)==szJ );
+ }
+ rc = read32bits(format, &pPager->jfd, &mxPg);
+ if( rc!=SQLITE_OK ){
+ goto end_playback;
+ }
+ assert( pPager->origDbSize==0 || pPager->origDbSize==mxPg );
+ rc = sqliteOsTruncate(&pPager->fd, SQLITE_PAGE_SIZE*(off_t)mxPg);
+ if( rc!=SQLITE_OK ){
+ goto end_playback;
+ }
+ pPager->dbSize = mxPg;
+
+ /* Copy original pages out of the journal and back into the database file.
+ */
+ for(i=0; i<nRec; i++){
+ rc = pager_playback_one_page(pPager, &pPager->jfd, format);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ }
+ break;
+ }
+ }
+
+ /* Pages that have been written to the journal but never synced
+ ** where not restored by the loop above. We have to restore those
+ ** pages by reading them back from the original database.
+ */
+ if( rc==SQLITE_OK ){
+ PgHdr *pPg;
+ for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){
+ char zBuf[SQLITE_PAGE_SIZE];
+ if( !pPg->dirty ) continue;
+ if( (int)pPg->pgno <= pPager->origDbSize ){
+ sqliteOsSeek(&pPager->fd, SQLITE_PAGE_SIZE*(off_t)(pPg->pgno-1));
+ rc = sqliteOsRead(&pPager->fd, zBuf, SQLITE_PAGE_SIZE);
+ TRACE2("REFETCH %d\n", pPg->pgno);
+ CODEC(pPager, zBuf, pPg->pgno, 2);
+ if( rc ) break;
+ }else{
+ memset(zBuf, 0, SQLITE_PAGE_SIZE);
+ }
+ if( pPg->nRef==0 || memcmp(zBuf, PGHDR_TO_DATA(pPg), SQLITE_PAGE_SIZE) ){
+ memcpy(PGHDR_TO_DATA(pPg), zBuf, SQLITE_PAGE_SIZE);
+ memset(PGHDR_TO_EXTRA(pPg), 0, pPager->nExtra);
+ }
+ pPg->needSync = 0;
+ pPg->dirty = 0;
+ }
+ }
+
+end_playback:
+ if( rc!=SQLITE_OK ){
+ pager_unwritelock(pPager);
+ pPager->errMask |= PAGER_ERR_CORRUPT;
+ rc = SQLITE_CORRUPT;
+ }else{
+ rc = pager_unwritelock(pPager);
+ }
+ return rc;
+}
+
+/*
+** Playback the checkpoint journal.
+**
+** This is similar to playing back the transaction journal but with
+** a few extra twists.
+**
+** (1) The number of pages in the database file at the start of
+** the checkpoint is stored in pPager->ckptSize, not in the
+** journal file itself.
+**
+** (2) In addition to playing back the checkpoint journal, also
+** playback all pages of the transaction journal beginning
+** at offset pPager->ckptJSize.
+*/
+static int pager_ckpt_playback(Pager *pPager){
+ off_t szJ; /* Size of the full journal */
+ int nRec; /* Number of Records */
+ int i; /* Loop counter */
+ int rc;
+
+ /* Truncate the database back to its original size.
+ */
+ rc = sqliteOsTruncate(&pPager->fd, SQLITE_PAGE_SIZE*(off_t)pPager->ckptSize);
+ pPager->dbSize = pPager->ckptSize;
+
+ /* Figure out how many records are in the checkpoint journal.
+ */
+ assert( pPager->ckptInUse && pPager->journalOpen );
+ sqliteOsSeek(&pPager->cpfd, 0);
+ nRec = pPager->ckptNRec;
+
+ /* Copy original pages out of the checkpoint journal and back into the
+ ** database file. Note that the checkpoint journal always uses format
+ ** 2 instead of format 3 since it does not need to be concerned with
+ ** power failures corrupting the journal and can thus omit the checksums.
+ */
+ for(i=nRec-1; i>=0; i--){
+ rc = pager_playback_one_page(pPager, &pPager->cpfd, 2);
+ assert( rc!=SQLITE_DONE );
+ if( rc!=SQLITE_OK ) goto end_ckpt_playback;
+ }
+
+ /* Figure out how many pages need to be copied out of the transaction
+ ** journal.
+ */
+ rc = sqliteOsSeek(&pPager->jfd, pPager->ckptJSize);
+ if( rc!=SQLITE_OK ){
+ goto end_ckpt_playback;
+ }
+ rc = sqliteOsFileSize(&pPager->jfd, &szJ);
+ if( rc!=SQLITE_OK ){
+ goto end_ckpt_playback;
+ }
+ nRec = (szJ - pPager->ckptJSize)/JOURNAL_PG_SZ(journal_format);
+ for(i=nRec-1; i>=0; i--){
+ rc = pager_playback_one_page(pPager, &pPager->jfd, journal_format);
+ if( rc!=SQLITE_OK ){
+ assert( rc!=SQLITE_DONE );
+ goto end_ckpt_playback;
+ }
+ }
+
+end_ckpt_playback:
+ if( rc!=SQLITE_OK ){
+ pPager->errMask |= PAGER_ERR_CORRUPT;
+ rc = SQLITE_CORRUPT;
+ }
+ return rc;
+}
+
+/*
+** Change the maximum number of in-memory pages that are allowed.
+**
+** The maximum number is the absolute value of the mxPage parameter.
+** If mxPage is negative, the noSync flag is also set. noSync bypasses
+** calls to sqliteOsSync(). The pager runs much faster with noSync on,
+** but if the operating system crashes or there is an abrupt power
+** failure, the database file might be left in an inconsistent and
+** unrepairable state.
+*/
+void sqlitepager_set_cachesize(Pager *pPager, int mxPage){
+ if( mxPage>=0 ){
+ pPager->noSync = pPager->tempFile;
+ if( pPager->noSync==0 ) pPager->needSync = 0;
+ }else{
+ pPager->noSync = 1;
+ mxPage = -mxPage;
+ }
+ if( mxPage>10 ){
+ pPager->mxPage = mxPage;
+ }
+}
+
+/*
+** Adjust the robustness of the database to damage due to OS crashes
+** or power failures by changing the number of syncs()s when writing
+** the rollback journal. There are three levels:
+**
+** OFF sqliteOsSync() is never called. This is the default
+** for temporary and transient files.
+**
+** NORMAL The journal is synced once before writes begin on the
+** database. This is normally adequate protection, but
+** it is theoretically possible, though very unlikely,
+** that an inopertune power failure could leave the journal
+** in a state which would cause damage to the database
+** when it is rolled back.
+**
+** FULL The journal is synced twice before writes begin on the
+** database (with some additional information - the nRec field
+** of the journal header - being written in between the two
+** syncs). If we assume that writing a
+** single disk sector is atomic, then this mode provides
+** assurance that the journal will not be corrupted to the
+** point of causing damage to the database during rollback.
+**
+** Numeric values associated with these states are OFF==1, NORMAL=2,
+** and FULL=3.
+*/
+void sqlitepager_set_safety_level(Pager *pPager, int level){
+ pPager->noSync = level==1 || pPager->tempFile;
+ pPager->fullSync = level==3 && !pPager->tempFile;
+ if( pPager->noSync==0 ) pPager->needSync = 0;
+}
+
+/*
+** Open a temporary file. Write the name of the file into zName
+** (zName must be at least SQLITE_TEMPNAME_SIZE bytes long.) Write
+** the file descriptor into *fd. Return SQLITE_OK on success or some
+** other error code if we fail.
+**
+** The OS will automatically delete the temporary file when it is
+** closed.
+*/
+static int sqlitepager_opentemp(char *zFile, OsFile *fd){
+ int cnt = 8;
+ int rc;
+ do{
+ cnt--;
+ sqliteOsTempFileName(zFile);
+ rc = sqliteOsOpenExclusive(zFile, fd, 1);
+ }while( cnt>0 && rc!=SQLITE_OK );
+ return rc;
+}
+
+/*
+** Create a new page cache and put a pointer to the page cache in *ppPager.
+** The file to be cached need not exist. The file is not locked until
+** the first call to sqlitepager_get() and is only held open until the
+** last page is released using sqlitepager_unref().
+**
+** If zFilename is NULL then a randomly-named temporary file is created
+** and used as the file to be cached. The file will be deleted
+** automatically when it is closed.
+*/
+int sqlitepager_open(
+ Pager **ppPager, /* Return the Pager structure here */
+ const char *zFilename, /* Name of the database file to open */
+ int mxPage, /* Max number of in-memory cache pages */
+ int nExtra, /* Extra bytes append to each in-memory page */
+ int useJournal /* TRUE to use a rollback journal on this file */
+){
+ Pager *pPager;
+ char *zFullPathname;
+ int nameLen;
+ OsFile fd;
+ int rc, i;
+ int tempFile;
+ int readOnly = 0;
+ char zTemp[SQLITE_TEMPNAME_SIZE];
+
+ *ppPager = 0;
+ if( sqlite_malloc_failed ){
+ return SQLITE_NOMEM;
+ }
+ if( zFilename && zFilename[0] ){
+ zFullPathname = sqliteOsFullPathname(zFilename);
+ rc = sqliteOsOpenReadWrite(zFullPathname, &fd, &readOnly);
+ tempFile = 0;
+ }else{
+ rc = sqlitepager_opentemp(zTemp, &fd);
+ zFilename = zTemp;
+ zFullPathname = sqliteOsFullPathname(zFilename);
+ tempFile = 1;
+ }
+ if( sqlite_malloc_failed ){
+ return SQLITE_NOMEM;
+ }
+ if( rc!=SQLITE_OK ){
+ sqliteFree(zFullPathname);
+ return SQLITE_CANTOPEN;
+ }
+ nameLen = strlen(zFullPathname);
+ pPager = sqliteMalloc( sizeof(*pPager) + nameLen*3 + 30 );
+ if( pPager==0 ){
+ sqliteOsClose(&fd);
+ sqliteFree(zFullPathname);
+ return SQLITE_NOMEM;
+ }
+ SET_PAGER(pPager);
+ pPager->zFilename = (char*)&pPager[1];
+ pPager->zDirectory = &pPager->zFilename[nameLen+1];
+ pPager->zJournal = &pPager->zDirectory[nameLen+1];
+ strcpy(pPager->zFilename, zFullPathname);
+ strcpy(pPager->zDirectory, zFullPathname);
+ for(i=nameLen; i>0 && pPager->zDirectory[i-1]!='/'; i--){}
+ if( i>0 ) pPager->zDirectory[i-1] = 0;
+ strcpy(pPager->zJournal, zFullPathname);
+ sqliteFree(zFullPathname);
+ strcpy(&pPager->zJournal[nameLen], "-journal");
+ pPager->fd = fd;
+ pPager->journalOpen = 0;
+ pPager->useJournal = useJournal;
+ pPager->ckptOpen = 0;
+ pPager->ckptInUse = 0;
+ pPager->nRef = 0;
+ pPager->dbSize = -1;
+ pPager->ckptSize = 0;
+ pPager->ckptJSize = 0;
+ pPager->nPage = 0;
+ pPager->mxPage = mxPage>5 ? mxPage : 10;
+ pPager->state = SQLITE_UNLOCK;
+ pPager->errMask = 0;
+ pPager->tempFile = tempFile;
+ pPager->readOnly = readOnly;
+ pPager->needSync = 0;
+ pPager->noSync = pPager->tempFile || !useJournal;
+ pPager->pFirst = 0;
+ pPager->pFirstSynced = 0;
+ pPager->pLast = 0;
+ pPager->nExtra = nExtra;
+ memset(pPager->aHash, 0, sizeof(pPager->aHash));
+ *ppPager = pPager;
+ return SQLITE_OK;
+}
+
+/*
+** Set the destructor for this pager. If not NULL, the destructor is called
+** when the reference count on each page reaches zero. The destructor can
+** be used to clean up information in the extra segment appended to each page.
+**
+** The destructor is not called as a result sqlitepager_close().
+** Destructors are only called by sqlitepager_unref().
+*/
+void sqlitepager_set_destructor(Pager *pPager, void (*xDesc)(void*)){
+ pPager->xDestructor = xDesc;
+}
+
+/*
+** Return the total number of pages in the disk file associated with
+** pPager.
+*/
+int sqlitepager_pagecount(Pager *pPager){
+ off_t n;
+ assert( pPager!=0 );
+ if( pPager->dbSize>=0 ){
+ return pPager->dbSize;
+ }
+ if( sqliteOsFileSize(&pPager->fd, &n)!=SQLITE_OK ){
+ pPager->errMask |= PAGER_ERR_DISK;
+ return 0;
+ }
+ n /= SQLITE_PAGE_SIZE;
+ if( pPager->state!=SQLITE_UNLOCK ){
+ pPager->dbSize = n;
+ }
+ return n;
+}
+
+/*
+** Forward declaration
+*/
+static int syncJournal(Pager*);
+
+/*
+** Truncate the file to the number of pages specified.
+*/
+int sqlitepager_truncate(Pager *pPager, Pgno nPage){
+ int rc;
+ if( pPager->dbSize<0 ){
+ sqlitepager_pagecount(pPager);
+ }
+ if( pPager->errMask!=0 ){
+ rc = pager_errcode(pPager);
+ return rc;
+ }
+ if( nPage>=(unsigned)pPager->dbSize ){
+ return SQLITE_OK;
+ }
+ syncJournal(pPager);
+ rc = sqliteOsTruncate(&pPager->fd, SQLITE_PAGE_SIZE*(off_t)nPage);
+ if( rc==SQLITE_OK ){
+ pPager->dbSize = nPage;
+ }
+ return rc;
+}
+
+/*
+** Shutdown the page cache. Free all memory and close all files.
+**
+** If a transaction was in progress when this routine is called, that
+** transaction is rolled back. All outstanding pages are invalidated
+** and their memory is freed. Any attempt to use a page associated
+** with this page cache after this function returns will likely
+** result in a coredump.
+*/
+int sqlitepager_close(Pager *pPager){
+ PgHdr *pPg, *pNext;
+ switch( pPager->state ){
+ case SQLITE_WRITELOCK: {
+ sqlitepager_rollback(pPager);
+ sqliteOsUnlock(&pPager->fd);
+ assert( pPager->journalOpen==0 );
+ break;
+ }
+ case SQLITE_READLOCK: {
+ sqliteOsUnlock(&pPager->fd);
+ break;
+ }
+ default: {
+ /* Do nothing */
+ break;
+ }
+ }
+ for(pPg=pPager->pAll; pPg; pPg=pNext){
+ pNext = pPg->pNextAll;
+ sqliteFree(pPg);
+ }
+ sqliteOsClose(&pPager->fd);
+ assert( pPager->journalOpen==0 );
+ /* Temp files are automatically deleted by the OS
+ ** if( pPager->tempFile ){
+ ** sqliteOsDelete(pPager->zFilename);
+ ** }
+ */
+ CLR_PAGER(pPager);
+ if( pPager->zFilename!=(char*)&pPager[1] ){
+ assert( 0 ); /* Cannot happen */
+ sqliteFree(pPager->zFilename);
+ sqliteFree(pPager->zJournal);
+ sqliteFree(pPager->zDirectory);
+ }
+ sqliteFree(pPager);
+ return SQLITE_OK;
+}
+
+/*
+** Return the page number for the given page data.
+*/
+Pgno sqlitepager_pagenumber(void *pData){
+ PgHdr *p = DATA_TO_PGHDR(pData);
+ return p->pgno;
+}
+
+/*
+** Increment the reference count for a page. If the page is
+** currently on the freelist (the reference count is zero) then
+** remove it from the freelist.
+*/
+#define page_ref(P) ((P)->nRef==0?_page_ref(P):(void)(P)->nRef++)
+static void _page_ref(PgHdr *pPg){
+ if( pPg->nRef==0 ){
+ /* The page is currently on the freelist. Remove it. */
+ if( pPg==pPg->pPager->pFirstSynced ){
+ PgHdr *p = pPg->pNextFree;
+ while( p && p->needSync ){ p = p->pNextFree; }
+ pPg->pPager->pFirstSynced = p;
+ }
+ if( pPg->pPrevFree ){
+ pPg->pPrevFree->pNextFree = pPg->pNextFree;
+ }else{
+ pPg->pPager->pFirst = pPg->pNextFree;
+ }
+ if( pPg->pNextFree ){
+ pPg->pNextFree->pPrevFree = pPg->pPrevFree;
+ }else{
+ pPg->pPager->pLast = pPg->pPrevFree;
+ }
+ pPg->pPager->nRef++;
+ }
+ pPg->nRef++;
+ REFINFO(pPg);
+}
+
+/*
+** Increment the reference count for a page. The input pointer is
+** a reference to the page data.
+*/
+int sqlitepager_ref(void *pData){
+ PgHdr *pPg = DATA_TO_PGHDR(pData);
+ page_ref(pPg);
+ return SQLITE_OK;
+}
+
+/*
+** Sync the journal. In other words, make sure all the pages that have
+** been written to the journal have actually reached the surface of the
+** disk. It is not safe to modify the original database file until after
+** the journal has been synced. If the original database is modified before
+** the journal is synced and a power failure occurs, the unsynced journal
+** data would be lost and we would be unable to completely rollback the
+** database changes. Database corruption would occur.
+**
+** This routine also updates the nRec field in the header of the journal.
+** (See comments on the pager_playback() routine for additional information.)
+** If the sync mode is FULL, two syncs will occur. First the whole journal
+** is synced, then the nRec field is updated, then a second sync occurs.
+**
+** For temporary databases, we do not care if we are able to rollback
+** after a power failure, so sync occurs.
+**
+** This routine clears the needSync field of every page current held in
+** memory.
+*/
+static int syncJournal(Pager *pPager){
+ PgHdr *pPg;
+ int rc = SQLITE_OK;
+
+ /* Sync the journal before modifying the main database
+ ** (assuming there is a journal and it needs to be synced.)
+ */
+ if( pPager->needSync ){
+ if( !pPager->tempFile ){
+ assert( pPager->journalOpen );
+ /* assert( !pPager->noSync ); // noSync might be set if synchronous
+ ** was turned off after the transaction was started. Ticket #615 */
+#ifndef NDEBUG
+ {
+ /* Make sure the pPager->nRec counter we are keeping agrees
+ ** with the nRec computed from the size of the journal file.
+ */
+ off_t hdrSz, pgSz, jSz;
+ hdrSz = JOURNAL_HDR_SZ(journal_format);
+ pgSz = JOURNAL_PG_SZ(journal_format);
+ rc = sqliteOsFileSize(&pPager->jfd, &jSz);
+ if( rc!=0 ) return rc;
+ assert( pPager->nRec*pgSz+hdrSz==jSz );
+ }
+#endif
+ if( journal_format>=3 ){
+ /* Write the nRec value into the journal file header */
+ off_t szJ;
+ if( pPager->fullSync ){
+ TRACE1("SYNC\n");
+ rc = sqliteOsSync(&pPager->jfd);
+ if( rc!=0 ) return rc;
+ }
+ sqliteOsSeek(&pPager->jfd, sizeof(aJournalMagic1));
+ rc = write32bits(&pPager->jfd, pPager->nRec);
+ if( rc ) return rc;
+ szJ = JOURNAL_HDR_SZ(journal_format) +
+ pPager->nRec*JOURNAL_PG_SZ(journal_format);
+ sqliteOsSeek(&pPager->jfd, szJ);
+ }
+ TRACE1("SYNC\n");
+ rc = sqliteOsSync(&pPager->jfd);
+ if( rc!=0 ) return rc;
+ pPager->journalStarted = 1;
+ }
+ pPager->needSync = 0;
+
+ /* Erase the needSync flag from every page.
+ */
+ for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){
+ pPg->needSync = 0;
+ }
+ pPager->pFirstSynced = pPager->pFirst;
+ }
+
+#ifndef NDEBUG
+ /* If the Pager.needSync flag is clear then the PgHdr.needSync
+ ** flag must also be clear for all pages. Verify that this
+ ** invariant is true.
+ */
+ else{
+ for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){
+ assert( pPg->needSync==0 );
+ }
+ assert( pPager->pFirstSynced==pPager->pFirst );
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** Given a list of pages (connected by the PgHdr.pDirty pointer) write
+** every one of those pages out to the database file and mark them all
+** as clean.
+*/
+static int pager_write_pagelist(PgHdr *pList){
+ Pager *pPager;
+ int rc;
+
+ if( pList==0 ) return SQLITE_OK;
+ pPager = pList->pPager;
+ while( pList ){
+ assert( pList->dirty );
+ sqliteOsSeek(&pPager->fd, (pList->pgno-1)*(off_t)SQLITE_PAGE_SIZE);
+ CODEC(pPager, PGHDR_TO_DATA(pList), pList->pgno, 6);
+ TRACE2("STORE %d\n", pList->pgno);
+ rc = sqliteOsWrite(&pPager->fd, PGHDR_TO_DATA(pList), SQLITE_PAGE_SIZE);
+ CODEC(pPager, PGHDR_TO_DATA(pList), pList->pgno, 0);
+ if( rc ) return rc;
+ pList->dirty = 0;
+ pList = pList->pDirty;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Collect every dirty page into a dirty list and
+** return a pointer to the head of that list. All pages are
+** collected even if they are still in use.
+*/
+static PgHdr *pager_get_all_dirty_pages(Pager *pPager){
+ PgHdr *p, *pList;
+ pList = 0;
+ for(p=pPager->pAll; p; p=p->pNextAll){
+ if( p->dirty ){
+ p->pDirty = pList;
+ pList = p;
+ }
+ }
+ return pList;
+}
+
+/*
+** Acquire a page.
+**
+** A read lock on the disk file is obtained when the first page is acquired.
+** This read lock is dropped when the last page is released.
+**
+** A _get works for any page number greater than 0. If the database
+** file is smaller than the requested page, then no actual disk
+** read occurs and the memory image of the page is initialized to
+** all zeros. The extra data appended to a page is always initialized
+** to zeros the first time a page is loaded into memory.
+**
+** The acquisition might fail for several reasons. In all cases,
+** an appropriate error code is returned and *ppPage is set to NULL.
+**
+** See also sqlitepager_lookup(). Both this routine and _lookup() attempt
+** to find a page in the in-memory cache first. If the page is not already
+** in memory, this routine goes to disk to read it in whereas _lookup()
+** just returns 0. This routine acquires a read-lock the first time it
+** has to go to disk, and could also playback an old journal if necessary.
+** Since _lookup() never goes to disk, it never has to deal with locks
+** or journal files.
+*/
+int sqlitepager_get(Pager *pPager, Pgno pgno, void **ppPage){
+ PgHdr *pPg;
+ int rc;
+
+ /* Make sure we have not hit any critical errors.
+ */
+ assert( pPager!=0 );
+ assert( pgno!=0 );
+ *ppPage = 0;
+ if( pPager->errMask & ~(PAGER_ERR_FULL) ){
+ return pager_errcode(pPager);
+ }
+
+ /* If this is the first page accessed, then get a read lock
+ ** on the database file.
+ */
+ if( pPager->nRef==0 ){
+ rc = sqliteOsReadLock(&pPager->fd);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pPager->state = SQLITE_READLOCK;
+
+ /* If a journal file exists, try to play it back.
+ */
+ if( pPager->useJournal && sqliteOsFileExists(pPager->zJournal) ){
+ int rc;
+
+ /* Get a write lock on the database
+ */
+ rc = sqliteOsWriteLock(&pPager->fd);
+ if( rc!=SQLITE_OK ){
+ if( sqliteOsUnlock(&pPager->fd)!=SQLITE_OK ){
+ /* This should never happen! */
+ rc = SQLITE_INTERNAL;
+ }
+ return rc;
+ }
+ pPager->state = SQLITE_WRITELOCK;
+
+ /* Open the journal for reading only. Return SQLITE_BUSY if
+ ** we are unable to open the journal file.
+ **
+ ** The journal file does not need to be locked itself. The
+ ** journal file is never open unless the main database file holds
+ ** a write lock, so there is never any chance of two or more
+ ** processes opening the journal at the same time.
+ */
+ rc = sqliteOsOpenReadOnly(pPager->zJournal, &pPager->jfd);
+ if( rc!=SQLITE_OK ){
+ rc = sqliteOsUnlock(&pPager->fd);
+ assert( rc==SQLITE_OK );
+ return SQLITE_BUSY;
+ }
+ pPager->journalOpen = 1;
+ pPager->journalStarted = 0;
+
+ /* Playback and delete the journal. Drop the database write
+ ** lock and reacquire the read lock.
+ */
+ rc = pager_playback(pPager, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+ pPg = 0;
+ }else{
+ /* Search for page in cache */
+ pPg = pager_lookup(pPager, pgno);
+ }
+ if( pPg==0 ){
+ /* The requested page is not in the page cache. */
+ int h;
+ pPager->nMiss++;
+ if( pPager->nPage<pPager->mxPage || pPager->pFirst==0 ){
+ /* Create a new page */
+ pPg = sqliteMallocRaw( sizeof(*pPg) + SQLITE_PAGE_SIZE
+ + sizeof(u32) + pPager->nExtra );
+ if( pPg==0 ){
+ pager_unwritelock(pPager);
+ pPager->errMask |= PAGER_ERR_MEM;
+ return SQLITE_NOMEM;
+ }
+ memset(pPg, 0, sizeof(*pPg));
+ pPg->pPager = pPager;
+ pPg->pNextAll = pPager->pAll;
+ if( pPager->pAll ){
+ pPager->pAll->pPrevAll = pPg;
+ }
+ pPg->pPrevAll = 0;
+ pPager->pAll = pPg;
+ pPager->nPage++;
+ }else{
+ /* Find a page to recycle. Try to locate a page that does not
+ ** require us to do an fsync() on the journal.
+ */
+ pPg = pPager->pFirstSynced;
+
+ /* If we could not find a page that does not require an fsync()
+ ** on the journal file then fsync the journal file. This is a
+ ** very slow operation, so we work hard to avoid it. But sometimes
+ ** it can't be helped.
+ */
+ if( pPg==0 ){
+ int rc = syncJournal(pPager);
+ if( rc!=0 ){
+ sqlitepager_rollback(pPager);
+ return SQLITE_IOERR;
+ }
+ pPg = pPager->pFirst;
+ }
+ assert( pPg->nRef==0 );
+
+ /* Write the page to the database file if it is dirty.
+ */
+ if( pPg->dirty ){
+ assert( pPg->needSync==0 );
+ pPg->pDirty = 0;
+ rc = pager_write_pagelist( pPg );
+ if( rc!=SQLITE_OK ){
+ sqlitepager_rollback(pPager);
+ return SQLITE_IOERR;
+ }
+ }
+ assert( pPg->dirty==0 );
+
+ /* If the page we are recycling is marked as alwaysRollback, then
+ ** set the global alwaysRollback flag, thus disabling the
+ ** sqlite_dont_rollback() optimization for the rest of this transaction.
+ ** It is necessary to do this because the page marked alwaysRollback
+ ** might be reloaded at a later time but at that point we won't remember
+ ** that is was marked alwaysRollback. This means that all pages must
+ ** be marked as alwaysRollback from here on out.
+ */
+ if( pPg->alwaysRollback ){
+ pPager->alwaysRollback = 1;
+ }
+
+ /* Unlink the old page from the free list and the hash table
+ */
+ if( pPg==pPager->pFirstSynced ){
+ PgHdr *p = pPg->pNextFree;
+ while( p && p->needSync ){ p = p->pNextFree; }
+ pPager->pFirstSynced = p;
+ }
+ if( pPg->pPrevFree ){
+ pPg->pPrevFree->pNextFree = pPg->pNextFree;
+ }else{
+ assert( pPager->pFirst==pPg );
+ pPager->pFirst = pPg->pNextFree;
+ }
+ if( pPg->pNextFree ){
+ pPg->pNextFree->pPrevFree = pPg->pPrevFree;
+ }else{
+ assert( pPager->pLast==pPg );
+ pPager->pLast = pPg->pPrevFree;
+ }
+ pPg->pNextFree = pPg->pPrevFree = 0;
+ if( pPg->pNextHash ){
+ pPg->pNextHash->pPrevHash = pPg->pPrevHash;
+ }
+ if( pPg->pPrevHash ){
+ pPg->pPrevHash->pNextHash = pPg->pNextHash;
+ }else{
+ h = pager_hash(pPg->pgno);
+ assert( pPager->aHash[h]==pPg );
+ pPager->aHash[h] = pPg->pNextHash;
+ }
+ pPg->pNextHash = pPg->pPrevHash = 0;
+ pPager->nOvfl++;
+ }
+ pPg->pgno = pgno;
+ if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){
+ sqliteCheckMemory(pPager->aInJournal, pgno/8);
+ assert( pPager->journalOpen );
+ pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0;
+ pPg->needSync = 0;
+ }else{
+ pPg->inJournal = 0;
+ pPg->needSync = 0;
+ }
+ if( pPager->aInCkpt && (int)pgno<=pPager->ckptSize
+ && (pPager->aInCkpt[pgno/8] & (1<<(pgno&7)))!=0 ){
+ page_add_to_ckpt_list(pPg);
+ }else{
+ page_remove_from_ckpt_list(pPg);
+ }
+ pPg->dirty = 0;
+ pPg->nRef = 1;
+ REFINFO(pPg);
+ pPager->nRef++;
+ h = pager_hash(pgno);
+ pPg->pNextHash = pPager->aHash[h];
+ pPager->aHash[h] = pPg;
+ if( pPg->pNextHash ){
+ assert( pPg->pNextHash->pPrevHash==0 );
+ pPg->pNextHash->pPrevHash = pPg;
+ }
+ if( pPager->nExtra>0 ){
+ memset(PGHDR_TO_EXTRA(pPg), 0, pPager->nExtra);
+ }
+ if( pPager->dbSize<0 ) sqlitepager_pagecount(pPager);
+ if( pPager->errMask!=0 ){
+ sqlitepager_unref(PGHDR_TO_DATA(pPg));
+ rc = pager_errcode(pPager);
+ return rc;
+ }
+ if( pPager->dbSize<(int)pgno ){
+ memset(PGHDR_TO_DATA(pPg), 0, SQLITE_PAGE_SIZE);
+ }else{
+ int rc;
+ sqliteOsSeek(&pPager->fd, (pgno-1)*(off_t)SQLITE_PAGE_SIZE);
+ rc = sqliteOsRead(&pPager->fd, PGHDR_TO_DATA(pPg), SQLITE_PAGE_SIZE);
+ TRACE2("FETCH %d\n", pPg->pgno);
+ CODEC(pPager, PGHDR_TO_DATA(pPg), pPg->pgno, 3);
+ if( rc!=SQLITE_OK ){
+ off_t fileSize;
+ if( sqliteOsFileSize(&pPager->fd,&fileSize)!=SQLITE_OK
+ || fileSize>=pgno*SQLITE_PAGE_SIZE ){
+ sqlitepager_unref(PGHDR_TO_DATA(pPg));
+ return rc;
+ }else{
+ memset(PGHDR_TO_DATA(pPg), 0, SQLITE_PAGE_SIZE);
+ }
+ }
+ }
+ }else{
+ /* The requested page is in the page cache. */
+ pPager->nHit++;
+ page_ref(pPg);
+ }
+ *ppPage = PGHDR_TO_DATA(pPg);
+ return SQLITE_OK;
+}
+
+/*
+** Acquire a page if it is already in the in-memory cache. Do
+** not read the page from disk. Return a pointer to the page,
+** or 0 if the page is not in cache.
+**
+** See also sqlitepager_get(). The difference between this routine
+** and sqlitepager_get() is that _get() will go to the disk and read
+** in the page if the page is not already in cache. This routine
+** returns NULL if the page is not in cache or if a disk I/O error
+** has ever happened.
+*/
+void *sqlitepager_lookup(Pager *pPager, Pgno pgno){
+ PgHdr *pPg;
+
+ assert( pPager!=0 );
+ assert( pgno!=0 );
+ if( pPager->errMask & ~(PAGER_ERR_FULL) ){
+ return 0;
+ }
+ /* if( pPager->nRef==0 ){
+ ** return 0;
+ ** }
+ */
+ pPg = pager_lookup(pPager, pgno);
+ if( pPg==0 ) return 0;
+ page_ref(pPg);
+ return PGHDR_TO_DATA(pPg);
+}
+
+/*
+** Release a page.
+**
+** If the number of references to the page drop to zero, then the
+** page is added to the LRU list. When all references to all pages
+** are released, a rollback occurs and the lock on the database is
+** removed.
+*/
+int sqlitepager_unref(void *pData){
+ PgHdr *pPg;
+
+ /* Decrement the reference count for this page
+ */
+ pPg = DATA_TO_PGHDR(pData);
+ assert( pPg->nRef>0 );
+ pPg->nRef--;
+ REFINFO(pPg);
+
+ /* When the number of references to a page reach 0, call the
+ ** destructor and add the page to the freelist.
+ */
+ if( pPg->nRef==0 ){
+ Pager *pPager;
+ pPager = pPg->pPager;
+ pPg->pNextFree = 0;
+ pPg->pPrevFree = pPager->pLast;
+ pPager->pLast = pPg;
+ if( pPg->pPrevFree ){
+ pPg->pPrevFree->pNextFree = pPg;
+ }else{
+ pPager->pFirst = pPg;
+ }
+ if( pPg->needSync==0 && pPager->pFirstSynced==0 ){
+ pPager->pFirstSynced = pPg;
+ }
+ if( pPager->xDestructor ){
+ pPager->xDestructor(pData);
+ }
+
+ /* When all pages reach the freelist, drop the read lock from
+ ** the database file.
+ */
+ pPager->nRef--;
+ assert( pPager->nRef>=0 );
+ if( pPager->nRef==0 ){
+ pager_reset(pPager);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Create a journal file for pPager. There should already be a write
+** lock on the database file when this routine is called.
+**
+** Return SQLITE_OK if everything. Return an error code and release the
+** write lock if anything goes wrong.
+*/
+static int pager_open_journal(Pager *pPager){
+ int rc;
+ assert( pPager->state==SQLITE_WRITELOCK );
+ assert( pPager->journalOpen==0 );
+ assert( pPager->useJournal );
+ sqlitepager_pagecount(pPager);
+ pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 );
+ if( pPager->aInJournal==0 ){
+ sqliteOsReadLock(&pPager->fd);
+ pPager->state = SQLITE_READLOCK;
+ return SQLITE_NOMEM;
+ }
+ rc = sqliteOsOpenExclusive(pPager->zJournal, &pPager->jfd,pPager->tempFile);
+ if( rc!=SQLITE_OK ){
+ sqliteFree(pPager->aInJournal);
+ pPager->aInJournal = 0;
+ sqliteOsReadLock(&pPager->fd);
+ pPager->state = SQLITE_READLOCK;
+ return SQLITE_CANTOPEN;
+ }
+ sqliteOsOpenDirectory(pPager->zDirectory, &pPager->jfd);
+ pPager->journalOpen = 1;
+ pPager->journalStarted = 0;
+ pPager->needSync = 0;
+ pPager->alwaysRollback = 0;
+ pPager->nRec = 0;
+ if( pPager->errMask!=0 ){
+ rc = pager_errcode(pPager);
+ return rc;
+ }
+ pPager->origDbSize = pPager->dbSize;
+ if( journal_format==JOURNAL_FORMAT_3 ){
+ rc = sqliteOsWrite(&pPager->jfd, aJournalMagic3, sizeof(aJournalMagic3));
+ if( rc==SQLITE_OK ){
+ rc = write32bits(&pPager->jfd, pPager->noSync ? 0xffffffff : 0);
+ }
+ if( rc==SQLITE_OK ){
+ sqliteRandomness(sizeof(pPager->cksumInit), &pPager->cksumInit);
+ rc = write32bits(&pPager->jfd, pPager->cksumInit);
+ }
+ }else if( journal_format==JOURNAL_FORMAT_2 ){
+ rc = sqliteOsWrite(&pPager->jfd, aJournalMagic2, sizeof(aJournalMagic2));
+ }else{
+ assert( journal_format==JOURNAL_FORMAT_1 );
+ rc = sqliteOsWrite(&pPager->jfd, aJournalMagic1, sizeof(aJournalMagic1));
+ }
+ if( rc==SQLITE_OK ){
+ rc = write32bits(&pPager->jfd, pPager->dbSize);
+ }
+ if( pPager->ckptAutoopen && rc==SQLITE_OK ){
+ rc = sqlitepager_ckpt_begin(pPager);
+ }
+ if( rc!=SQLITE_OK ){
+ rc = pager_unwritelock(pPager);
+ if( rc==SQLITE_OK ){
+ rc = SQLITE_FULL;
+ }
+ }
+ return rc;
+}
+
+/*
+** Acquire a write-lock on the database. The lock is removed when
+** the any of the following happen:
+**
+** * sqlitepager_commit() is called.
+** * sqlitepager_rollback() is called.
+** * sqlitepager_close() is called.
+** * sqlitepager_unref() is called to on every outstanding page.
+**
+** The parameter to this routine is a pointer to any open page of the
+** database file. Nothing changes about the page - it is used merely
+** to acquire a pointer to the Pager structure and as proof that there
+** is already a read-lock on the database.
+**
+** A journal file is opened if this is not a temporary file. For
+** temporary files, the opening of the journal file is deferred until
+** there is an actual need to write to the journal.
+**
+** If the database is already write-locked, this routine is a no-op.
+*/
+int sqlitepager_begin(void *pData){
+ PgHdr *pPg = DATA_TO_PGHDR(pData);
+ Pager *pPager = pPg->pPager;
+ int rc = SQLITE_OK;
+ assert( pPg->nRef>0 );
+ assert( pPager->state!=SQLITE_UNLOCK );
+ if( pPager->state==SQLITE_READLOCK ){
+ assert( pPager->aInJournal==0 );
+ rc = sqliteOsWriteLock(&pPager->fd);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pPager->state = SQLITE_WRITELOCK;
+ pPager->dirtyFile = 0;
+ TRACE1("TRANSACTION\n");
+ if( pPager->useJournal && !pPager->tempFile ){
+ rc = pager_open_journal(pPager);
+ }
+ }
+ return rc;
+}
+
+/*
+** Mark a data page as writeable. The page is written into the journal
+** if it is not there already. This routine must be called before making
+** changes to a page.
+**
+** The first time this routine is called, the pager creates a new
+** journal and acquires a write lock on the database. If the write
+** lock could not be acquired, this routine returns SQLITE_BUSY. The
+** calling routine must check for that return value and be careful not to
+** change any page data until this routine returns SQLITE_OK.
+**
+** If the journal file could not be written because the disk is full,
+** then this routine returns SQLITE_FULL and does an immediate rollback.
+** All subsequent write attempts also return SQLITE_FULL until there
+** is a call to sqlitepager_commit() or sqlitepager_rollback() to
+** reset.
+*/
+int sqlitepager_write(void *pData){
+ PgHdr *pPg = DATA_TO_PGHDR(pData);
+ Pager *pPager = pPg->pPager;
+ int rc = SQLITE_OK;
+
+ /* Check for errors
+ */
+ if( pPager->errMask ){
+ return pager_errcode(pPager);
+ }
+ if( pPager->readOnly ){
+ return SQLITE_PERM;
+ }
+
+ /* Mark the page as dirty. If the page has already been written
+ ** to the journal then we can return right away.
+ */
+ pPg->dirty = 1;
+ if( pPg->inJournal && (pPg->inCkpt || pPager->ckptInUse==0) ){
+ pPager->dirtyFile = 1;
+ return SQLITE_OK;
+ }
+
+ /* If we get this far, it means that the page needs to be
+ ** written to the transaction journal or the ckeckpoint journal
+ ** or both.
+ **
+ ** First check to see that the transaction journal exists and
+ ** create it if it does not.
+ */
+ assert( pPager->state!=SQLITE_UNLOCK );
+ rc = sqlitepager_begin(pData);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ assert( pPager->state==SQLITE_WRITELOCK );
+ if( !pPager->journalOpen && pPager->useJournal ){
+ rc = pager_open_journal(pPager);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ assert( pPager->journalOpen || !pPager->useJournal );
+ pPager->dirtyFile = 1;
+
+ /* The transaction journal now exists and we have a write lock on the
+ ** main database file. Write the current page to the transaction
+ ** journal if it is not there already.
+ */
+ if( !pPg->inJournal && pPager->useJournal ){
+ if( (int)pPg->pgno <= pPager->origDbSize ){
+ int szPg;
+ u32 saved;
+ if( journal_format>=JOURNAL_FORMAT_3 ){
+ u32 cksum = pager_cksum(pPager, pPg->pgno, pData);
+ saved = *(u32*)PGHDR_TO_EXTRA(pPg);
+ store32bits(cksum, pPg, SQLITE_PAGE_SIZE);
+ szPg = SQLITE_PAGE_SIZE+8;
+ }else{
+ szPg = SQLITE_PAGE_SIZE+4;
+ }
+ store32bits(pPg->pgno, pPg, -4);
+ CODEC(pPager, pData, pPg->pgno, 7);
+ rc = sqliteOsWrite(&pPager->jfd, &((char*)pData)[-4], szPg);
+ TRACE3("JOURNAL %d %d\n", pPg->pgno, pPg->needSync);
+ CODEC(pPager, pData, pPg->pgno, 0);
+ if( journal_format>=JOURNAL_FORMAT_3 ){
+ *(u32*)PGHDR_TO_EXTRA(pPg) = saved;
+ }
+ if( rc!=SQLITE_OK ){
+ sqlitepager_rollback(pPager);
+ pPager->errMask |= PAGER_ERR_FULL;
+ return rc;
+ }
+ pPager->nRec++;
+ assert( pPager->aInJournal!=0 );
+ pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7);
+ pPg->needSync = !pPager->noSync;
+ pPg->inJournal = 1;
+ if( pPager->ckptInUse ){
+ pPager->aInCkpt[pPg->pgno/8] |= 1<<(pPg->pgno&7);
+ page_add_to_ckpt_list(pPg);
+ }
+ }else{
+ pPg->needSync = !pPager->journalStarted && !pPager->noSync;
+ TRACE3("APPEND %d %d\n", pPg->pgno, pPg->needSync);
+ }
+ if( pPg->needSync ){
+ pPager->needSync = 1;
+ }
+ }
+
+ /* If the checkpoint journal is open and the page is not in it,
+ ** then write the current page to the checkpoint journal. Note that
+ ** the checkpoint journal always uses the simplier format 2 that lacks
+ ** checksums. The header is also omitted from the checkpoint journal.
+ */
+ if( pPager->ckptInUse && !pPg->inCkpt && (int)pPg->pgno<=pPager->ckptSize ){
+ assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize );
+ store32bits(pPg->pgno, pPg, -4);
+ CODEC(pPager, pData, pPg->pgno, 7);
+ rc = sqliteOsWrite(&pPager->cpfd, &((char*)pData)[-4], SQLITE_PAGE_SIZE+4);
+ TRACE2("CKPT-JOURNAL %d\n", pPg->pgno);
+ CODEC(pPager, pData, pPg->pgno, 0);
+ if( rc!=SQLITE_OK ){
+ sqlitepager_rollback(pPager);
+ pPager->errMask |= PAGER_ERR_FULL;
+ return rc;
+ }
+ pPager->ckptNRec++;
+ assert( pPager->aInCkpt!=0 );
+ pPager->aInCkpt[pPg->pgno/8] |= 1<<(pPg->pgno&7);
+ page_add_to_ckpt_list(pPg);
+ }
+
+ /* Update the database size and return.
+ */
+ if( pPager->dbSize<(int)pPg->pgno ){
+ pPager->dbSize = pPg->pgno;
+ }
+ return rc;
+}
+
+/*
+** Return TRUE if the page given in the argument was previously passed
+** to sqlitepager_write(). In other words, return TRUE if it is ok
+** to change the content of the page.
+*/
+int sqlitepager_iswriteable(void *pData){
+ PgHdr *pPg = DATA_TO_PGHDR(pData);
+ return pPg->dirty;
+}
+
+/*
+** Replace the content of a single page with the information in the third
+** argument.
+*/
+int sqlitepager_overwrite(Pager *pPager, Pgno pgno, void *pData){
+ void *pPage;
+ int rc;
+
+ rc = sqlitepager_get(pPager, pgno, &pPage);
+ if( rc==SQLITE_OK ){
+ rc = sqlitepager_write(pPage);
+ if( rc==SQLITE_OK ){
+ memcpy(pPage, pData, SQLITE_PAGE_SIZE);
+ }
+ sqlitepager_unref(pPage);
+ }
+ return rc;
+}
+
+/*
+** A call to this routine tells the pager that it is not necessary to
+** write the information on page "pgno" back to the disk, even though
+** that page might be marked as dirty.
+**
+** The overlying software layer calls this routine when all of the data
+** on the given page is unused. The pager marks the page as clean so
+** that it does not get written to disk.
+**
+** Tests show that this optimization, together with the
+** sqlitepager_dont_rollback() below, more than double the speed
+** of large INSERT operations and quadruple the speed of large DELETEs.
+**
+** When this routine is called, set the alwaysRollback flag to true.
+** Subsequent calls to sqlitepager_dont_rollback() for the same page
+** will thereafter be ignored. This is necessary to avoid a problem
+** where a page with data is added to the freelist during one part of
+** a transaction then removed from the freelist during a later part
+** of the same transaction and reused for some other purpose. When it
+** is first added to the freelist, this routine is called. When reused,
+** the dont_rollback() routine is called. But because the page contains
+** critical data, we still need to be sure it gets rolled back in spite
+** of the dont_rollback() call.
+*/
+void sqlitepager_dont_write(Pager *pPager, Pgno pgno){
+ PgHdr *pPg;
+
+ pPg = pager_lookup(pPager, pgno);
+ pPg->alwaysRollback = 1;
+ if( pPg && pPg->dirty ){
+ if( pPager->dbSize==(int)pPg->pgno && pPager->origDbSize<pPager->dbSize ){
+ /* If this pages is the last page in the file and the file has grown
+ ** during the current transaction, then do NOT mark the page as clean.
+ ** When the database file grows, we must make sure that the last page
+ ** gets written at least once so that the disk file will be the correct
+ ** size. If you do not write this page and the size of the file
+ ** on the disk ends up being too small, that can lead to database
+ ** corruption during the next transaction.
+ */
+ }else{
+ TRACE2("DONT_WRITE %d\n", pgno);
+ pPg->dirty = 0;
+ }
+ }
+}
+
+/*
+** A call to this routine tells the pager that if a rollback occurs,
+** it is not necessary to restore the data on the given page. This
+** means that the pager does not have to record the given page in the
+** rollback journal.
+*/
+void sqlitepager_dont_rollback(void *pData){
+ PgHdr *pPg = DATA_TO_PGHDR(pData);
+ Pager *pPager = pPg->pPager;
+
+ if( pPager->state!=SQLITE_WRITELOCK || pPager->journalOpen==0 ) return;
+ if( pPg->alwaysRollback || pPager->alwaysRollback ) return;
+ if( !pPg->inJournal && (int)pPg->pgno <= pPager->origDbSize ){
+ assert( pPager->aInJournal!=0 );
+ pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7);
+ pPg->inJournal = 1;
+ if( pPager->ckptInUse ){
+ pPager->aInCkpt[pPg->pgno/8] |= 1<<(pPg->pgno&7);
+ page_add_to_ckpt_list(pPg);
+ }
+ TRACE2("DONT_ROLLBACK %d\n", pPg->pgno);
+ }
+ if( pPager->ckptInUse && !pPg->inCkpt && (int)pPg->pgno<=pPager->ckptSize ){
+ assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize );
+ assert( pPager->aInCkpt!=0 );
+ pPager->aInCkpt[pPg->pgno/8] |= 1<<(pPg->pgno&7);
+ page_add_to_ckpt_list(pPg);
+ }
+}
+
+/*
+** Commit all changes to the database and release the write lock.
+**
+** If the commit fails for any reason, a rollback attempt is made
+** and an error code is returned. If the commit worked, SQLITE_OK
+** is returned.
+*/
+int sqlitepager_commit(Pager *pPager){
+ int rc;
+ PgHdr *pPg;
+
+ if( pPager->errMask==PAGER_ERR_FULL ){
+ rc = sqlitepager_rollback(pPager);
+ if( rc==SQLITE_OK ){
+ rc = SQLITE_FULL;
+ }
+ return rc;
+ }
+ if( pPager->errMask!=0 ){
+ rc = pager_errcode(pPager);
+ return rc;
+ }
+ if( pPager->state!=SQLITE_WRITELOCK ){
+ return SQLITE_ERROR;
+ }
+ TRACE1("COMMIT\n");
+ if( pPager->dirtyFile==0 ){
+ /* Exit early (without doing the time-consuming sqliteOsSync() calls)
+ ** if there have been no changes to the database file. */
+ assert( pPager->needSync==0 );
+ rc = pager_unwritelock(pPager);
+ pPager->dbSize = -1;
+ return rc;
+ }
+ assert( pPager->journalOpen );
+ rc = syncJournal(pPager);
+ if( rc!=SQLITE_OK ){
+ goto commit_abort;
+ }
+ pPg = pager_get_all_dirty_pages(pPager);
+ if( pPg ){
+ rc = pager_write_pagelist(pPg);
+ if( rc || (!pPager->noSync && sqliteOsSync(&pPager->fd)!=SQLITE_OK) ){
+ goto commit_abort;
+ }
+ }
+ rc = pager_unwritelock(pPager);
+ pPager->dbSize = -1;
+ return rc;
+
+ /* Jump here if anything goes wrong during the commit process.
+ */
+commit_abort:
+ rc = sqlitepager_rollback(pPager);
+ if( rc==SQLITE_OK ){
+ rc = SQLITE_FULL;
+ }
+ return rc;
+}
+
+/*
+** Rollback all changes. The database falls back to read-only mode.
+** All in-memory cache pages revert to their original data contents.
+** The journal is deleted.
+**
+** This routine cannot fail unless some other process is not following
+** the correct locking protocol (SQLITE_PROTOCOL) or unless some other
+** process is writing trash into the journal file (SQLITE_CORRUPT) or
+** unless a prior malloc() failed (SQLITE_NOMEM). Appropriate error
+** codes are returned for all these occasions. Otherwise,
+** SQLITE_OK is returned.
+*/
+int sqlitepager_rollback(Pager *pPager){
+ int rc;
+ TRACE1("ROLLBACK\n");
+ if( !pPager->dirtyFile || !pPager->journalOpen ){
+ rc = pager_unwritelock(pPager);
+ pPager->dbSize = -1;
+ return rc;
+ }
+
+ if( pPager->errMask!=0 && pPager->errMask!=PAGER_ERR_FULL ){
+ if( pPager->state>=SQLITE_WRITELOCK ){
+ pager_playback(pPager, 1);
+ }
+ return pager_errcode(pPager);
+ }
+ if( pPager->state!=SQLITE_WRITELOCK ){
+ return SQLITE_OK;
+ }
+ rc = pager_playback(pPager, 1);
+ if( rc!=SQLITE_OK ){
+ rc = SQLITE_CORRUPT;
+ pPager->errMask |= PAGER_ERR_CORRUPT;
+ }
+ pPager->dbSize = -1;
+ return rc;
+}
+
+/*
+** Return TRUE if the database file is opened read-only. Return FALSE
+** if the database is (in theory) writable.
+*/
+int sqlitepager_isreadonly(Pager *pPager){
+ return pPager->readOnly;
+}
+
+/*
+** This routine is used for testing and analysis only.
+*/
+int *sqlitepager_stats(Pager *pPager){
+ static int a[9];
+ a[0] = pPager->nRef;
+ a[1] = pPager->nPage;
+ a[2] = pPager->mxPage;
+ a[3] = pPager->dbSize;
+ a[4] = pPager->state;
+ a[5] = pPager->errMask;
+ a[6] = pPager->nHit;
+ a[7] = pPager->nMiss;
+ a[8] = pPager->nOvfl;
+ return a;
+}
+
+/*
+** Set the checkpoint.
+**
+** This routine should be called with the transaction journal already
+** open. A new checkpoint journal is created that can be used to rollback
+** changes of a single SQL command within a larger transaction.
+*/
+int sqlitepager_ckpt_begin(Pager *pPager){
+ int rc;
+ char zTemp[SQLITE_TEMPNAME_SIZE];
+ if( !pPager->journalOpen ){
+ pPager->ckptAutoopen = 1;
+ return SQLITE_OK;
+ }
+ assert( pPager->journalOpen );
+ assert( !pPager->ckptInUse );
+ pPager->aInCkpt = sqliteMalloc( pPager->dbSize/8 + 1 );
+ if( pPager->aInCkpt==0 ){
+ sqliteOsReadLock(&pPager->fd);
+ return SQLITE_NOMEM;
+ }
+#ifndef NDEBUG
+ rc = sqliteOsFileSize(&pPager->jfd, &pPager->ckptJSize);
+ if( rc ) goto ckpt_begin_failed;
+ assert( pPager->ckptJSize ==
+ pPager->nRec*JOURNAL_PG_SZ(journal_format)+JOURNAL_HDR_SZ(journal_format) );
+#endif
+ pPager->ckptJSize = pPager->nRec*JOURNAL_PG_SZ(journal_format)
+ + JOURNAL_HDR_SZ(journal_format);
+ pPager->ckptSize = pPager->dbSize;
+ if( !pPager->ckptOpen ){
+ rc = sqlitepager_opentemp(zTemp, &pPager->cpfd);
+ if( rc ) goto ckpt_begin_failed;
+ pPager->ckptOpen = 1;
+ pPager->ckptNRec = 0;
+ }
+ pPager->ckptInUse = 1;
+ return SQLITE_OK;
+
+ckpt_begin_failed:
+ if( pPager->aInCkpt ){
+ sqliteFree(pPager->aInCkpt);
+ pPager->aInCkpt = 0;
+ }
+ return rc;
+}
+
+/*
+** Commit a checkpoint.
+*/
+int sqlitepager_ckpt_commit(Pager *pPager){
+ if( pPager->ckptInUse ){
+ PgHdr *pPg, *pNext;
+ sqliteOsSeek(&pPager->cpfd, 0);
+ /* sqliteOsTruncate(&pPager->cpfd, 0); */
+ pPager->ckptNRec = 0;
+ pPager->ckptInUse = 0;
+ sqliteFree( pPager->aInCkpt );
+ pPager->aInCkpt = 0;
+ for(pPg=pPager->pCkpt; pPg; pPg=pNext){
+ pNext = pPg->pNextCkpt;
+ assert( pPg->inCkpt );
+ pPg->inCkpt = 0;
+ pPg->pPrevCkpt = pPg->pNextCkpt = 0;
+ }
+ pPager->pCkpt = 0;
+ }
+ pPager->ckptAutoopen = 0;
+ return SQLITE_OK;
+}
+
+/*
+** Rollback a checkpoint.
+*/
+int sqlitepager_ckpt_rollback(Pager *pPager){
+ int rc;
+ if( pPager->ckptInUse ){
+ rc = pager_ckpt_playback(pPager);
+ sqlitepager_ckpt_commit(pPager);
+ }else{
+ rc = SQLITE_OK;
+ }
+ pPager->ckptAutoopen = 0;
+ return rc;
+}
+
+/*
+** Return the full pathname of the database file.
+*/
+const char *sqlitepager_filename(Pager *pPager){
+ return pPager->zFilename;
+}
+
+/*
+** Set the codec for this pager
+*/
+void sqlitepager_set_codec(
+ Pager *pPager,
+ void (*xCodec)(void*,void*,Pgno,int),
+ void *pCodecArg
+){
+ pPager->xCodec = xCodec;
+ pPager->pCodecArg = pCodecArg;
+}
+
+#ifdef SQLITE_TEST
+/*
+** Print a listing of all referenced pages and their ref count.
+*/
+void sqlitepager_refdump(Pager *pPager){
+ PgHdr *pPg;
+ for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){
+ if( pPg->nRef<=0 ) continue;
+ printf("PAGE %3d addr=0x%08x nRef=%d\n",
+ pPg->pgno, (int)PGHDR_TO_DATA(pPg), pPg->nRef);
+ }
+}
+#endif
diff --git a/usr/src/cmd/svc/configd/sqlite/src/pager.h b/usr/src/cmd/svc/configd/sqlite/src/pager.h
new file mode 100644
index 0000000000..7c22b950c1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/pager.h
@@ -0,0 +1,110 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the sqlite page cache
+** subsystem. The page cache subsystem reads and writes a file a page
+** at a time and provides a journal for rollback.
+**
+** @(#) $Id: pager.h,v 1.26 2004/02/11 02:18:07 drh Exp $
+*/
+
+/*
+** The size of one page
+**
+** You can change this value to another (reasonable) value you want.
+** It need not be a power of two, though the interface to the disk
+** will likely be faster if it is.
+**
+** Experiments show that a page size of 1024 gives the best speed
+** for common usages. The speed differences for different sizes
+** such as 512, 2048, 4096, an so forth, is minimal. Note, however,
+** that changing the page size results in a completely imcompatible
+** file format.
+*/
+#ifndef SQLITE_PAGE_SIZE
+#define SQLITE_PAGE_SIZE 1024
+#endif
+
+/*
+** Number of extra bytes of data allocated at the end of each page and
+** stored on disk but not used by the higher level btree layer. Changing
+** this value results in a completely incompatible file format.
+*/
+#ifndef SQLITE_PAGE_RESERVE
+#define SQLITE_PAGE_RESERVE 0
+#endif
+
+/*
+** The total number of usable bytes stored on disk for each page.
+** The usable bytes come at the beginning of the page and the reserve
+** bytes come at the end.
+*/
+#define SQLITE_USABLE_SIZE (SQLITE_PAGE_SIZE-SQLITE_PAGE_RESERVE)
+
+/*
+** Maximum number of pages in one database. (This is a limitation of
+** imposed by 4GB files size limits.)
+*/
+#define SQLITE_MAX_PAGE 1073741823
+
+/*
+** The type used to represent a page number. The first page in a file
+** is called page 1. 0 is used to represent "not a page".
+*/
+typedef unsigned int Pgno;
+
+/*
+** Each open file is managed by a separate instance of the "Pager" structure.
+*/
+typedef struct Pager Pager;
+
+/*
+** See source code comments for a detailed description of the following
+** routines:
+*/
+int sqlitepager_open(Pager **ppPager, const char *zFilename,
+ int nPage, int nExtra, int useJournal);
+void sqlitepager_set_destructor(Pager*, void(*)(void*));
+void sqlitepager_set_cachesize(Pager*, int);
+int sqlitepager_close(Pager *pPager);
+int sqlitepager_get(Pager *pPager, Pgno pgno, void **ppPage);
+void *sqlitepager_lookup(Pager *pPager, Pgno pgno);
+int sqlitepager_ref(void*);
+int sqlitepager_unref(void*);
+Pgno sqlitepager_pagenumber(void*);
+int sqlitepager_write(void*);
+int sqlitepager_iswriteable(void*);
+int sqlitepager_overwrite(Pager *pPager, Pgno pgno, void*);
+int sqlitepager_pagecount(Pager*);
+int sqlitepager_truncate(Pager*,Pgno);
+int sqlitepager_begin(void*);
+int sqlitepager_commit(Pager*);
+int sqlitepager_rollback(Pager*);
+int sqlitepager_isreadonly(Pager*);
+int sqlitepager_ckpt_begin(Pager*);
+int sqlitepager_ckpt_commit(Pager*);
+int sqlitepager_ckpt_rollback(Pager*);
+void sqlitepager_dont_rollback(void*);
+void sqlitepager_dont_write(Pager*, Pgno);
+int *sqlitepager_stats(Pager*);
+void sqlitepager_set_safety_level(Pager*,int);
+const char *sqlitepager_filename(Pager*);
+int sqlitepager_rename(Pager*, const char *zNewName);
+void sqlitepager_set_codec(Pager*,void(*)(void*,void*,Pgno,int),void*);
+
+#ifdef SQLITE_TEST
+void sqlitepager_refdump(Pager*);
+int pager_refinfo_enable;
+int journal_format;
+#endif
diff --git a/usr/src/cmd/svc/configd/sqlite/src/parse.y b/usr/src/cmd/svc/configd/sqlite/src/parse.y
new file mode 100644
index 0000000000..c3122e7280
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/parse.y
@@ -0,0 +1,900 @@
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains SQLite's grammar for SQL. Process this file
+** using the lemon parser generator to generate C code that runs
+** the parser. Lemon will also generate a header file containing
+** numeric codes for all of the tokens.
+**
+** @(#) $Id: parse.y,v 1.112 2004/02/22 18:40:57 drh Exp $
+*/
+%token_prefix TK_
+%token_type {Token}
+%default_type {Token}
+%extra_argument {Parse *pParse}
+%syntax_error {
+ if( pParse->zErrMsg==0 ){
+ if( TOKEN.z[0] ){
+ sqliteErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN);
+ }else{
+ sqliteErrorMsg(pParse, "incomplete SQL statement");
+ }
+ }
+}
+%name sqliteParser
+%include {
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "sqliteInt.h"
+#include "parse.h"
+
+/*
+** An instance of this structure holds information about the
+** LIMIT clause of a SELECT statement.
+*/
+struct LimitVal {
+ int limit; /* The LIMIT value. -1 if there is no limit */
+ int offset; /* The OFFSET. 0 if there is none */
+};
+
+/*
+** An instance of the following structure describes the event of a
+** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT,
+** TK_DELETE, or TK_INSTEAD. If the event is of the form
+**
+** UPDATE ON (a,b,c)
+**
+** Then the "b" IdList records the list "a,b,c".
+*/
+struct TrigEvent { int a; IdList * b; };
+
+} // end %include
+
+// These are extra tokens used by the lexer but never seen by the
+// parser. We put them in a rule so that the parser generator will
+// add them to the parse.h output file.
+//
+%nonassoc END_OF_FILE ILLEGAL SPACE UNCLOSED_STRING COMMENT FUNCTION
+ COLUMN AGG_FUNCTION.
+
+// Input is a single SQL command
+input ::= cmdlist.
+cmdlist ::= cmdlist ecmd.
+cmdlist ::= ecmd.
+ecmd ::= explain cmdx SEMI.
+ecmd ::= SEMI.
+cmdx ::= cmd. { sqliteExec(pParse); }
+explain ::= EXPLAIN. { sqliteBeginParse(pParse, 1); }
+explain ::= . { sqliteBeginParse(pParse, 0); }
+
+///////////////////// Begin and end transactions. ////////////////////////////
+//
+
+cmd ::= BEGIN trans_opt onconf(R). {sqliteBeginTransaction(pParse,R);}
+trans_opt ::= .
+trans_opt ::= TRANSACTION.
+trans_opt ::= TRANSACTION nm.
+cmd ::= COMMIT trans_opt. {sqliteCommitTransaction(pParse);}
+cmd ::= END trans_opt. {sqliteCommitTransaction(pParse);}
+cmd ::= ROLLBACK trans_opt. {sqliteRollbackTransaction(pParse);}
+
+///////////////////// The CREATE TABLE statement ////////////////////////////
+//
+cmd ::= create_table create_table_args.
+create_table ::= CREATE(X) temp(T) TABLE nm(Y). {
+ sqliteStartTable(pParse,&X,&Y,T,0);
+}
+%type temp {int}
+temp(A) ::= TEMP. {A = 1;}
+temp(A) ::= . {A = 0;}
+create_table_args ::= LP columnlist conslist_opt RP(X). {
+ sqliteEndTable(pParse,&X,0);
+}
+create_table_args ::= AS select(S). {
+ sqliteEndTable(pParse,0,S);
+ sqliteSelectDelete(S);
+}
+columnlist ::= columnlist COMMA column.
+columnlist ::= column.
+
+// About the only information used for a column is the name of the
+// column. The type is always just "text". But the code will accept
+// an elaborate typename. Perhaps someday we'll do something with it.
+//
+column ::= columnid type carglist.
+columnid ::= nm(X). {sqliteAddColumn(pParse,&X);}
+
+// An IDENTIFIER can be a generic identifier, or one of several
+// keywords. Any non-standard keyword can also be an identifier.
+//
+%type id {Token}
+id(A) ::= ID(X). {A = X;}
+
+// The following directive causes tokens ABORT, AFTER, ASC, etc. to
+// fallback to ID if they will not parse as their original value.
+// This obviates the need for the "id" nonterminal.
+//
+%fallback ID
+ ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT
+ COPY DATABASE DEFERRED DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR
+ GLOB IGNORE IMMEDIATE INITIALLY INSTEAD LIKE MATCH KEY
+ OF OFFSET PRAGMA RAISE REPLACE RESTRICT ROW STATEMENT
+ TEMP TRIGGER VACUUM VIEW.
+
+// Define operator precedence early so that this is the first occurance
+// of the operator tokens in the grammer. Keeping the operators together
+// causes them to be assigned integer values that are close together,
+// which keeps parser tables smaller.
+//
+%left OR.
+%left AND.
+%right NOT.
+%left EQ NE ISNULL NOTNULL IS LIKE GLOB BETWEEN IN.
+%left GT GE LT LE.
+%left BITAND BITOR LSHIFT RSHIFT.
+%left PLUS MINUS.
+%left STAR SLASH REM.
+%left CONCAT.
+%right UMINUS UPLUS BITNOT.
+
+// And "ids" is an identifer-or-string.
+//
+%type ids {Token}
+ids(A) ::= ID(X). {A = X;}
+ids(A) ::= STRING(X). {A = X;}
+
+// The name of a column or table can be any of the following:
+//
+%type nm {Token}
+nm(A) ::= ID(X). {A = X;}
+nm(A) ::= STRING(X). {A = X;}
+nm(A) ::= JOIN_KW(X). {A = X;}
+
+type ::= .
+type ::= typename(X). {sqliteAddColumnType(pParse,&X,&X);}
+type ::= typename(X) LP signed RP(Y). {sqliteAddColumnType(pParse,&X,&Y);}
+type ::= typename(X) LP signed COMMA signed RP(Y).
+ {sqliteAddColumnType(pParse,&X,&Y);}
+%type typename {Token}
+typename(A) ::= ids(X). {A = X;}
+typename(A) ::= typename(X) ids. {A = X;}
+%type signed {int}
+signed(A) ::= INTEGER(X). { A = atoi(X.z); }
+signed(A) ::= PLUS INTEGER(X). { A = atoi(X.z); }
+signed(A) ::= MINUS INTEGER(X). { A = -atoi(X.z); }
+carglist ::= carglist carg.
+carglist ::= .
+carg ::= CONSTRAINT nm ccons.
+carg ::= ccons.
+carg ::= DEFAULT STRING(X). {sqliteAddDefaultValue(pParse,&X,0);}
+carg ::= DEFAULT ID(X). {sqliteAddDefaultValue(pParse,&X,0);}
+carg ::= DEFAULT INTEGER(X). {sqliteAddDefaultValue(pParse,&X,0);}
+carg ::= DEFAULT PLUS INTEGER(X). {sqliteAddDefaultValue(pParse,&X,0);}
+carg ::= DEFAULT MINUS INTEGER(X). {sqliteAddDefaultValue(pParse,&X,1);}
+carg ::= DEFAULT FLOAT(X). {sqliteAddDefaultValue(pParse,&X,0);}
+carg ::= DEFAULT PLUS FLOAT(X). {sqliteAddDefaultValue(pParse,&X,0);}
+carg ::= DEFAULT MINUS FLOAT(X). {sqliteAddDefaultValue(pParse,&X,1);}
+carg ::= DEFAULT NULL.
+
+// In addition to the type name, we also care about the primary key and
+// UNIQUE constraints.
+//
+ccons ::= NULL onconf.
+ccons ::= NOT NULL onconf(R). {sqliteAddNotNull(pParse, R);}
+ccons ::= PRIMARY KEY sortorder onconf(R). {sqliteAddPrimaryKey(pParse,0,R);}
+ccons ::= UNIQUE onconf(R). {sqliteCreateIndex(pParse,0,0,0,R,0,0);}
+ccons ::= CHECK LP expr RP onconf.
+ccons ::= REFERENCES nm(T) idxlist_opt(TA) refargs(R).
+ {sqliteCreateForeignKey(pParse,0,&T,TA,R);}
+ccons ::= defer_subclause(D). {sqliteDeferForeignKey(pParse,D);}
+ccons ::= COLLATE id(C). {
+ sqliteAddCollateType(pParse, sqliteCollateType(C.z, C.n));
+}
+
+// The next group of rules parses the arguments to a REFERENCES clause
+// that determine if the referential integrity checking is deferred or
+// or immediate and which determine what action to take if a ref-integ
+// check fails.
+//
+%type refargs {int}
+refargs(A) ::= . { A = OE_Restrict * 0x010101; }
+refargs(A) ::= refargs(X) refarg(Y). { A = (X & Y.mask) | Y.value; }
+%type refarg {struct {int value; int mask;}}
+refarg(A) ::= MATCH nm. { A.value = 0; A.mask = 0x000000; }
+refarg(A) ::= ON DELETE refact(X). { A.value = X; A.mask = 0x0000ff; }
+refarg(A) ::= ON UPDATE refact(X). { A.value = X<<8; A.mask = 0x00ff00; }
+refarg(A) ::= ON INSERT refact(X). { A.value = X<<16; A.mask = 0xff0000; }
+%type refact {int}
+refact(A) ::= SET NULL. { A = OE_SetNull; }
+refact(A) ::= SET DEFAULT. { A = OE_SetDflt; }
+refact(A) ::= CASCADE. { A = OE_Cascade; }
+refact(A) ::= RESTRICT. { A = OE_Restrict; }
+%type defer_subclause {int}
+defer_subclause(A) ::= NOT DEFERRABLE init_deferred_pred_opt(X). {A = X;}
+defer_subclause(A) ::= DEFERRABLE init_deferred_pred_opt(X). {A = X;}
+%type init_deferred_pred_opt {int}
+init_deferred_pred_opt(A) ::= . {A = 0;}
+init_deferred_pred_opt(A) ::= INITIALLY DEFERRED. {A = 1;}
+init_deferred_pred_opt(A) ::= INITIALLY IMMEDIATE. {A = 0;}
+
+// For the time being, the only constraint we care about is the primary
+// key and UNIQUE. Both create indices.
+//
+conslist_opt ::= .
+conslist_opt ::= COMMA conslist.
+conslist ::= conslist COMMA tcons.
+conslist ::= conslist tcons.
+conslist ::= tcons.
+tcons ::= CONSTRAINT nm.
+tcons ::= PRIMARY KEY LP idxlist(X) RP onconf(R).
+ {sqliteAddPrimaryKey(pParse,X,R);}
+tcons ::= UNIQUE LP idxlist(X) RP onconf(R).
+ {sqliteCreateIndex(pParse,0,0,X,R,0,0);}
+tcons ::= CHECK expr onconf.
+tcons ::= FOREIGN KEY LP idxlist(FA) RP
+ REFERENCES nm(T) idxlist_opt(TA) refargs(R) defer_subclause_opt(D). {
+ sqliteCreateForeignKey(pParse, FA, &T, TA, R);
+ sqliteDeferForeignKey(pParse, D);
+}
+%type defer_subclause_opt {int}
+defer_subclause_opt(A) ::= . {A = 0;}
+defer_subclause_opt(A) ::= defer_subclause(X). {A = X;}
+
+// The following is a non-standard extension that allows us to declare the
+// default behavior when there is a constraint conflict.
+//
+%type onconf {int}
+%type orconf {int}
+%type resolvetype {int}
+onconf(A) ::= . { A = OE_Default; }
+onconf(A) ::= ON CONFLICT resolvetype(X). { A = X; }
+orconf(A) ::= . { A = OE_Default; }
+orconf(A) ::= OR resolvetype(X). { A = X; }
+resolvetype(A) ::= ROLLBACK. { A = OE_Rollback; }
+resolvetype(A) ::= ABORT. { A = OE_Abort; }
+resolvetype(A) ::= FAIL. { A = OE_Fail; }
+resolvetype(A) ::= IGNORE. { A = OE_Ignore; }
+resolvetype(A) ::= REPLACE. { A = OE_Replace; }
+
+////////////////////////// The DROP TABLE /////////////////////////////////////
+//
+cmd ::= DROP TABLE nm(X). {sqliteDropTable(pParse,&X,0);}
+
+///////////////////// The CREATE VIEW statement /////////////////////////////
+//
+cmd ::= CREATE(X) temp(T) VIEW nm(Y) AS select(S). {
+ sqliteCreateView(pParse, &X, &Y, S, T);
+}
+cmd ::= DROP VIEW nm(X). {
+ sqliteDropTable(pParse, &X, 1);
+}
+
+//////////////////////// The SELECT statement /////////////////////////////////
+//
+cmd ::= select(X). {
+ sqliteSelect(pParse, X, SRT_Callback, 0, 0, 0, 0);
+ sqliteSelectDelete(X);
+}
+
+%type select {Select*}
+%destructor select {sqliteSelectDelete($$);}
+%type oneselect {Select*}
+%destructor oneselect {sqliteSelectDelete($$);}
+
+select(A) ::= oneselect(X). {A = X;}
+select(A) ::= select(X) multiselect_op(Y) oneselect(Z). {
+ if( Z ){
+ Z->op = Y;
+ Z->pPrior = X;
+ }
+ A = Z;
+}
+%type multiselect_op {int}
+multiselect_op(A) ::= UNION. {A = TK_UNION;}
+multiselect_op(A) ::= UNION ALL. {A = TK_ALL;}
+multiselect_op(A) ::= INTERSECT. {A = TK_INTERSECT;}
+multiselect_op(A) ::= EXCEPT. {A = TK_EXCEPT;}
+oneselect(A) ::= SELECT distinct(D) selcollist(W) from(X) where_opt(Y)
+ groupby_opt(P) having_opt(Q) orderby_opt(Z) limit_opt(L). {
+ A = sqliteSelectNew(W,X,Y,P,Q,Z,D,L.limit,L.offset);
+}
+
+// The "distinct" nonterminal is true (1) if the DISTINCT keyword is
+// present and false (0) if it is not.
+//
+%type distinct {int}
+distinct(A) ::= DISTINCT. {A = 1;}
+distinct(A) ::= ALL. {A = 0;}
+distinct(A) ::= . {A = 0;}
+
+// selcollist is a list of expressions that are to become the return
+// values of the SELECT statement. The "*" in statements like
+// "SELECT * FROM ..." is encoded as a special expression with an
+// opcode of TK_ALL.
+//
+%type selcollist {ExprList*}
+%destructor selcollist {sqliteExprListDelete($$);}
+%type sclp {ExprList*}
+%destructor sclp {sqliteExprListDelete($$);}
+sclp(A) ::= selcollist(X) COMMA. {A = X;}
+sclp(A) ::= . {A = 0;}
+selcollist(A) ::= sclp(P) expr(X) as(Y). {
+ A = sqliteExprListAppend(P,X,Y.n?&Y:0);
+}
+selcollist(A) ::= sclp(P) STAR. {
+ A = sqliteExprListAppend(P, sqliteExpr(TK_ALL, 0, 0, 0), 0);
+}
+selcollist(A) ::= sclp(P) nm(X) DOT STAR. {
+ Expr *pRight = sqliteExpr(TK_ALL, 0, 0, 0);
+ Expr *pLeft = sqliteExpr(TK_ID, 0, 0, &X);
+ A = sqliteExprListAppend(P, sqliteExpr(TK_DOT, pLeft, pRight, 0), 0);
+}
+
+// An option "AS <id>" phrase that can follow one of the expressions that
+// define the result set, or one of the tables in the FROM clause.
+//
+%type as {Token}
+as(X) ::= AS nm(Y). { X = Y; }
+as(X) ::= ids(Y). { X = Y; }
+as(X) ::= . { X.n = 0; }
+
+
+%type seltablist {SrcList*}
+%destructor seltablist {sqliteSrcListDelete($$);}
+%type stl_prefix {SrcList*}
+%destructor stl_prefix {sqliteSrcListDelete($$);}
+%type from {SrcList*}
+%destructor from {sqliteSrcListDelete($$);}
+
+// A complete FROM clause.
+//
+from(A) ::= . {A = sqliteMalloc(sizeof(*A));}
+from(A) ::= FROM seltablist(X). {A = X;}
+
+// "seltablist" is a "Select Table List" - the content of the FROM clause
+// in a SELECT statement. "stl_prefix" is a prefix of this list.
+//
+stl_prefix(A) ::= seltablist(X) joinop(Y). {
+ A = X;
+ if( A && A->nSrc>0 ) A->a[A->nSrc-1].jointype = Y;
+}
+stl_prefix(A) ::= . {A = 0;}
+seltablist(A) ::= stl_prefix(X) nm(Y) dbnm(D) as(Z) on_opt(N) using_opt(U). {
+ A = sqliteSrcListAppend(X,&Y,&D);
+ if( Z.n ) sqliteSrcListAddAlias(A,&Z);
+ if( N ){
+ if( A && A->nSrc>1 ){ A->a[A->nSrc-2].pOn = N; }
+ else { sqliteExprDelete(N); }
+ }
+ if( U ){
+ if( A && A->nSrc>1 ){ A->a[A->nSrc-2].pUsing = U; }
+ else { sqliteIdListDelete(U); }
+ }
+}
+seltablist(A) ::= stl_prefix(X) LP seltablist_paren(S) RP
+ as(Z) on_opt(N) using_opt(U). {
+ A = sqliteSrcListAppend(X,0,0);
+ A->a[A->nSrc-1].pSelect = S;
+ if( Z.n ) sqliteSrcListAddAlias(A,&Z);
+ if( N ){
+ if( A && A->nSrc>1 ){ A->a[A->nSrc-2].pOn = N; }
+ else { sqliteExprDelete(N); }
+ }
+ if( U ){
+ if( A && A->nSrc>1 ){ A->a[A->nSrc-2].pUsing = U; }
+ else { sqliteIdListDelete(U); }
+ }
+}
+
+// A seltablist_paren nonterminal represents anything in a FROM that
+// is contained inside parentheses. This can be either a subquery or
+// a grouping of table and subqueries.
+//
+%type seltablist_paren {Select*}
+%destructor seltablist_paren {sqliteSelectDelete($$);}
+seltablist_paren(A) ::= select(S). {A = S;}
+seltablist_paren(A) ::= seltablist(F). {
+ A = sqliteSelectNew(0,F,0,0,0,0,0,-1,0);
+}
+
+%type dbnm {Token}
+dbnm(A) ::= . {A.z=0; A.n=0;}
+dbnm(A) ::= DOT nm(X). {A = X;}
+
+%type joinop {int}
+%type joinop2 {int}
+joinop(X) ::= COMMA. { X = JT_INNER; }
+joinop(X) ::= JOIN. { X = JT_INNER; }
+joinop(X) ::= JOIN_KW(A) JOIN. { X = sqliteJoinType(pParse,&A,0,0); }
+joinop(X) ::= JOIN_KW(A) nm(B) JOIN. { X = sqliteJoinType(pParse,&A,&B,0); }
+joinop(X) ::= JOIN_KW(A) nm(B) nm(C) JOIN.
+ { X = sqliteJoinType(pParse,&A,&B,&C); }
+
+%type on_opt {Expr*}
+%destructor on_opt {sqliteExprDelete($$);}
+on_opt(N) ::= ON expr(E). {N = E;}
+on_opt(N) ::= . {N = 0;}
+
+%type using_opt {IdList*}
+%destructor using_opt {sqliteIdListDelete($$);}
+using_opt(U) ::= USING LP idxlist(L) RP. {U = L;}
+using_opt(U) ::= . {U = 0;}
+
+
+%type orderby_opt {ExprList*}
+%destructor orderby_opt {sqliteExprListDelete($$);}
+%type sortlist {ExprList*}
+%destructor sortlist {sqliteExprListDelete($$);}
+%type sortitem {Expr*}
+%destructor sortitem {sqliteExprDelete($$);}
+
+orderby_opt(A) ::= . {A = 0;}
+orderby_opt(A) ::= ORDER BY sortlist(X). {A = X;}
+sortlist(A) ::= sortlist(X) COMMA sortitem(Y) collate(C) sortorder(Z). {
+ A = sqliteExprListAppend(X,Y,0);
+ if( A ) A->a[A->nExpr-1].sortOrder = C+Z;
+}
+sortlist(A) ::= sortitem(Y) collate(C) sortorder(Z). {
+ A = sqliteExprListAppend(0,Y,0);
+ if( A ) A->a[0].sortOrder = C+Z;
+}
+sortitem(A) ::= expr(X). {A = X;}
+
+%type sortorder {int}
+%type collate {int}
+
+sortorder(A) ::= ASC. {A = SQLITE_SO_ASC;}
+sortorder(A) ::= DESC. {A = SQLITE_SO_DESC;}
+sortorder(A) ::= . {A = SQLITE_SO_ASC;}
+collate(C) ::= . {C = SQLITE_SO_UNK;}
+collate(C) ::= COLLATE id(X). {C = sqliteCollateType(X.z, X.n);}
+
+%type groupby_opt {ExprList*}
+%destructor groupby_opt {sqliteExprListDelete($$);}
+groupby_opt(A) ::= . {A = 0;}
+groupby_opt(A) ::= GROUP BY exprlist(X). {A = X;}
+
+%type having_opt {Expr*}
+%destructor having_opt {sqliteExprDelete($$);}
+having_opt(A) ::= . {A = 0;}
+having_opt(A) ::= HAVING expr(X). {A = X;}
+
+%type limit_opt {struct LimitVal}
+limit_opt(A) ::= . {A.limit = -1; A.offset = 0;}
+limit_opt(A) ::= LIMIT signed(X). {A.limit = X; A.offset = 0;}
+limit_opt(A) ::= LIMIT signed(X) OFFSET signed(Y).
+ {A.limit = X; A.offset = Y;}
+limit_opt(A) ::= LIMIT signed(X) COMMA signed(Y).
+ {A.limit = Y; A.offset = X;}
+
+/////////////////////////// The DELETE statement /////////////////////////////
+//
+cmd ::= DELETE FROM nm(X) dbnm(D) where_opt(Y). {
+ sqliteDeleteFrom(pParse, sqliteSrcListAppend(0,&X,&D), Y);
+}
+
+%type where_opt {Expr*}
+%destructor where_opt {sqliteExprDelete($$);}
+
+where_opt(A) ::= . {A = 0;}
+where_opt(A) ::= WHERE expr(X). {A = X;}
+
+%type setlist {ExprList*}
+%destructor setlist {sqliteExprListDelete($$);}
+
+////////////////////////// The UPDATE command ////////////////////////////////
+//
+cmd ::= UPDATE orconf(R) nm(X) dbnm(D) SET setlist(Y) where_opt(Z).
+ {sqliteUpdate(pParse,sqliteSrcListAppend(0,&X,&D),Y,Z,R);}
+
+setlist(A) ::= setlist(Z) COMMA nm(X) EQ expr(Y).
+ {A = sqliteExprListAppend(Z,Y,&X);}
+setlist(A) ::= nm(X) EQ expr(Y). {A = sqliteExprListAppend(0,Y,&X);}
+
+////////////////////////// The INSERT command /////////////////////////////////
+//
+cmd ::= insert_cmd(R) INTO nm(X) dbnm(D) inscollist_opt(F)
+ VALUES LP itemlist(Y) RP.
+ {sqliteInsert(pParse, sqliteSrcListAppend(0,&X,&D), Y, 0, F, R);}
+cmd ::= insert_cmd(R) INTO nm(X) dbnm(D) inscollist_opt(F) select(S).
+ {sqliteInsert(pParse, sqliteSrcListAppend(0,&X,&D), 0, S, F, R);}
+
+%type insert_cmd {int}
+insert_cmd(A) ::= INSERT orconf(R). {A = R;}
+insert_cmd(A) ::= REPLACE. {A = OE_Replace;}
+
+
+%type itemlist {ExprList*}
+%destructor itemlist {sqliteExprListDelete($$);}
+
+itemlist(A) ::= itemlist(X) COMMA expr(Y). {A = sqliteExprListAppend(X,Y,0);}
+itemlist(A) ::= expr(X). {A = sqliteExprListAppend(0,X,0);}
+
+%type inscollist_opt {IdList*}
+%destructor inscollist_opt {sqliteIdListDelete($$);}
+%type inscollist {IdList*}
+%destructor inscollist {sqliteIdListDelete($$);}
+
+inscollist_opt(A) ::= . {A = 0;}
+inscollist_opt(A) ::= LP inscollist(X) RP. {A = X;}
+inscollist(A) ::= inscollist(X) COMMA nm(Y). {A = sqliteIdListAppend(X,&Y);}
+inscollist(A) ::= nm(Y). {A = sqliteIdListAppend(0,&Y);}
+
+/////////////////////////// Expression Processing /////////////////////////////
+//
+
+%type expr {Expr*}
+%destructor expr {sqliteExprDelete($$);}
+
+expr(A) ::= LP(B) expr(X) RP(E). {A = X; sqliteExprSpan(A,&B,&E); }
+expr(A) ::= NULL(X). {A = sqliteExpr(TK_NULL, 0, 0, &X);}
+expr(A) ::= ID(X). {A = sqliteExpr(TK_ID, 0, 0, &X);}
+expr(A) ::= JOIN_KW(X). {A = sqliteExpr(TK_ID, 0, 0, &X);}
+expr(A) ::= nm(X) DOT nm(Y). {
+ Expr *temp1 = sqliteExpr(TK_ID, 0, 0, &X);
+ Expr *temp2 = sqliteExpr(TK_ID, 0, 0, &Y);
+ A = sqliteExpr(TK_DOT, temp1, temp2, 0);
+}
+expr(A) ::= nm(X) DOT nm(Y) DOT nm(Z). {
+ Expr *temp1 = sqliteExpr(TK_ID, 0, 0, &X);
+ Expr *temp2 = sqliteExpr(TK_ID, 0, 0, &Y);
+ Expr *temp3 = sqliteExpr(TK_ID, 0, 0, &Z);
+ Expr *temp4 = sqliteExpr(TK_DOT, temp2, temp3, 0);
+ A = sqliteExpr(TK_DOT, temp1, temp4, 0);
+}
+expr(A) ::= INTEGER(X). {A = sqliteExpr(TK_INTEGER, 0, 0, &X);}
+expr(A) ::= FLOAT(X). {A = sqliteExpr(TK_FLOAT, 0, 0, &X);}
+expr(A) ::= STRING(X). {A = sqliteExpr(TK_STRING, 0, 0, &X);}
+expr(A) ::= VARIABLE(X). {
+ A = sqliteExpr(TK_VARIABLE, 0, 0, &X);
+ if( A ) A->iTable = ++pParse->nVar;
+}
+expr(A) ::= ID(X) LP exprlist(Y) RP(E). {
+ A = sqliteExprFunction(Y, &X);
+ sqliteExprSpan(A,&X,&E);
+}
+expr(A) ::= ID(X) LP STAR RP(E). {
+ A = sqliteExprFunction(0, &X);
+ sqliteExprSpan(A,&X,&E);
+}
+expr(A) ::= expr(X) AND expr(Y). {A = sqliteExpr(TK_AND, X, Y, 0);}
+expr(A) ::= expr(X) OR expr(Y). {A = sqliteExpr(TK_OR, X, Y, 0);}
+expr(A) ::= expr(X) LT expr(Y). {A = sqliteExpr(TK_LT, X, Y, 0);}
+expr(A) ::= expr(X) GT expr(Y). {A = sqliteExpr(TK_GT, X, Y, 0);}
+expr(A) ::= expr(X) LE expr(Y). {A = sqliteExpr(TK_LE, X, Y, 0);}
+expr(A) ::= expr(X) GE expr(Y). {A = sqliteExpr(TK_GE, X, Y, 0);}
+expr(A) ::= expr(X) NE expr(Y). {A = sqliteExpr(TK_NE, X, Y, 0);}
+expr(A) ::= expr(X) EQ expr(Y). {A = sqliteExpr(TK_EQ, X, Y, 0);}
+expr(A) ::= expr(X) BITAND expr(Y). {A = sqliteExpr(TK_BITAND, X, Y, 0);}
+expr(A) ::= expr(X) BITOR expr(Y). {A = sqliteExpr(TK_BITOR, X, Y, 0);}
+expr(A) ::= expr(X) LSHIFT expr(Y). {A = sqliteExpr(TK_LSHIFT, X, Y, 0);}
+expr(A) ::= expr(X) RSHIFT expr(Y). {A = sqliteExpr(TK_RSHIFT, X, Y, 0);}
+expr(A) ::= expr(X) likeop(OP) expr(Y). [LIKE] {
+ ExprList *pList = sqliteExprListAppend(0, Y, 0);
+ pList = sqliteExprListAppend(pList, X, 0);
+ A = sqliteExprFunction(pList, 0);
+ if( A ) A->op = OP;
+ sqliteExprSpan(A, &X->span, &Y->span);
+}
+expr(A) ::= expr(X) NOT likeop(OP) expr(Y). [LIKE] {
+ ExprList *pList = sqliteExprListAppend(0, Y, 0);
+ pList = sqliteExprListAppend(pList, X, 0);
+ A = sqliteExprFunction(pList, 0);
+ if( A ) A->op = OP;
+ A = sqliteExpr(TK_NOT, A, 0, 0);
+ sqliteExprSpan(A,&X->span,&Y->span);
+}
+%type likeop {int}
+likeop(A) ::= LIKE. {A = TK_LIKE;}
+likeop(A) ::= GLOB. {A = TK_GLOB;}
+expr(A) ::= expr(X) PLUS expr(Y). {A = sqliteExpr(TK_PLUS, X, Y, 0);}
+expr(A) ::= expr(X) MINUS expr(Y). {A = sqliteExpr(TK_MINUS, X, Y, 0);}
+expr(A) ::= expr(X) STAR expr(Y). {A = sqliteExpr(TK_STAR, X, Y, 0);}
+expr(A) ::= expr(X) SLASH expr(Y). {A = sqliteExpr(TK_SLASH, X, Y, 0);}
+expr(A) ::= expr(X) REM expr(Y). {A = sqliteExpr(TK_REM, X, Y, 0);}
+expr(A) ::= expr(X) CONCAT expr(Y). {A = sqliteExpr(TK_CONCAT, X, Y, 0);}
+expr(A) ::= expr(X) ISNULL(E). {
+ A = sqliteExpr(TK_ISNULL, X, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) IS NULL(E). {
+ A = sqliteExpr(TK_ISNULL, X, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) NOTNULL(E). {
+ A = sqliteExpr(TK_NOTNULL, X, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) NOT NULL(E). {
+ A = sqliteExpr(TK_NOTNULL, X, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) IS NOT NULL(E). {
+ A = sqliteExpr(TK_NOTNULL, X, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= NOT(B) expr(X). {
+ A = sqliteExpr(TK_NOT, X, 0, 0);
+ sqliteExprSpan(A,&B,&X->span);
+}
+expr(A) ::= BITNOT(B) expr(X). {
+ A = sqliteExpr(TK_BITNOT, X, 0, 0);
+ sqliteExprSpan(A,&B,&X->span);
+}
+expr(A) ::= MINUS(B) expr(X). [UMINUS] {
+ A = sqliteExpr(TK_UMINUS, X, 0, 0);
+ sqliteExprSpan(A,&B,&X->span);
+}
+expr(A) ::= PLUS(B) expr(X). [UPLUS] {
+ A = sqliteExpr(TK_UPLUS, X, 0, 0);
+ sqliteExprSpan(A,&B,&X->span);
+}
+expr(A) ::= LP(B) select(X) RP(E). {
+ A = sqliteExpr(TK_SELECT, 0, 0, 0);
+ if( A ) A->pSelect = X;
+ sqliteExprSpan(A,&B,&E);
+}
+expr(A) ::= expr(W) BETWEEN expr(X) AND expr(Y). {
+ ExprList *pList = sqliteExprListAppend(0, X, 0);
+ pList = sqliteExprListAppend(pList, Y, 0);
+ A = sqliteExpr(TK_BETWEEN, W, 0, 0);
+ if( A ) A->pList = pList;
+ sqliteExprSpan(A,&W->span,&Y->span);
+}
+expr(A) ::= expr(W) NOT BETWEEN expr(X) AND expr(Y). {
+ ExprList *pList = sqliteExprListAppend(0, X, 0);
+ pList = sqliteExprListAppend(pList, Y, 0);
+ A = sqliteExpr(TK_BETWEEN, W, 0, 0);
+ if( A ) A->pList = pList;
+ A = sqliteExpr(TK_NOT, A, 0, 0);
+ sqliteExprSpan(A,&W->span,&Y->span);
+}
+expr(A) ::= expr(X) IN LP exprlist(Y) RP(E). {
+ A = sqliteExpr(TK_IN, X, 0, 0);
+ if( A ) A->pList = Y;
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) IN LP select(Y) RP(E). {
+ A = sqliteExpr(TK_IN, X, 0, 0);
+ if( A ) A->pSelect = Y;
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) NOT IN LP exprlist(Y) RP(E). {
+ A = sqliteExpr(TK_IN, X, 0, 0);
+ if( A ) A->pList = Y;
+ A = sqliteExpr(TK_NOT, A, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) NOT IN LP select(Y) RP(E). {
+ A = sqliteExpr(TK_IN, X, 0, 0);
+ if( A ) A->pSelect = Y;
+ A = sqliteExpr(TK_NOT, A, 0, 0);
+ sqliteExprSpan(A,&X->span,&E);
+}
+expr(A) ::= expr(X) IN nm(Y) dbnm(D). {
+ SrcList *pSrc = sqliteSrcListAppend(0, &Y, &D);
+ A = sqliteExpr(TK_IN, X, 0, 0);
+ if( A ) A->pSelect = sqliteSelectNew(0,pSrc,0,0,0,0,0,-1,0);
+ sqliteExprSpan(A,&X->span,D.z?&D:&Y);
+}
+expr(A) ::= expr(X) NOT IN nm(Y) dbnm(D). {
+ SrcList *pSrc = sqliteSrcListAppend(0, &Y, &D);
+ A = sqliteExpr(TK_IN, X, 0, 0);
+ if( A ) A->pSelect = sqliteSelectNew(0,pSrc,0,0,0,0,0,-1,0);
+ A = sqliteExpr(TK_NOT, A, 0, 0);
+ sqliteExprSpan(A,&X->span,D.z?&D:&Y);
+}
+
+
+/* CASE expressions */
+expr(A) ::= CASE(C) case_operand(X) case_exprlist(Y) case_else(Z) END(E). {
+ A = sqliteExpr(TK_CASE, X, Z, 0);
+ if( A ) A->pList = Y;
+ sqliteExprSpan(A, &C, &E);
+}
+%type case_exprlist {ExprList*}
+%destructor case_exprlist {sqliteExprListDelete($$);}
+case_exprlist(A) ::= case_exprlist(X) WHEN expr(Y) THEN expr(Z). {
+ A = sqliteExprListAppend(X, Y, 0);
+ A = sqliteExprListAppend(A, Z, 0);
+}
+case_exprlist(A) ::= WHEN expr(Y) THEN expr(Z). {
+ A = sqliteExprListAppend(0, Y, 0);
+ A = sqliteExprListAppend(A, Z, 0);
+}
+%type case_else {Expr*}
+case_else(A) ::= ELSE expr(X). {A = X;}
+case_else(A) ::= . {A = 0;}
+%type case_operand {Expr*}
+case_operand(A) ::= expr(X). {A = X;}
+case_operand(A) ::= . {A = 0;}
+
+%type exprlist {ExprList*}
+%destructor exprlist {sqliteExprListDelete($$);}
+%type expritem {Expr*}
+%destructor expritem {sqliteExprDelete($$);}
+
+exprlist(A) ::= exprlist(X) COMMA expritem(Y).
+ {A = sqliteExprListAppend(X,Y,0);}
+exprlist(A) ::= expritem(X). {A = sqliteExprListAppend(0,X,0);}
+expritem(A) ::= expr(X). {A = X;}
+expritem(A) ::= . {A = 0;}
+
+///////////////////////////// The CREATE INDEX command ///////////////////////
+//
+cmd ::= CREATE(S) uniqueflag(U) INDEX nm(X)
+ ON nm(Y) dbnm(D) LP idxlist(Z) RP(E) onconf(R). {
+ SrcList *pSrc = sqliteSrcListAppend(0, &Y, &D);
+ if( U!=OE_None ) U = R;
+ if( U==OE_Default) U = OE_Abort;
+ sqliteCreateIndex(pParse, &X, pSrc, Z, U, &S, &E);
+}
+
+%type uniqueflag {int}
+uniqueflag(A) ::= UNIQUE. { A = OE_Abort; }
+uniqueflag(A) ::= . { A = OE_None; }
+
+%type idxlist {IdList*}
+%destructor idxlist {sqliteIdListDelete($$);}
+%type idxlist_opt {IdList*}
+%destructor idxlist_opt {sqliteIdListDelete($$);}
+%type idxitem {Token}
+
+idxlist_opt(A) ::= . {A = 0;}
+idxlist_opt(A) ::= LP idxlist(X) RP. {A = X;}
+idxlist(A) ::= idxlist(X) COMMA idxitem(Y). {A = sqliteIdListAppend(X,&Y);}
+idxlist(A) ::= idxitem(Y). {A = sqliteIdListAppend(0,&Y);}
+idxitem(A) ::= nm(X) sortorder. {A = X;}
+
+///////////////////////////// The DROP INDEX command /////////////////////////
+//
+
+cmd ::= DROP INDEX nm(X) dbnm(Y). {
+ sqliteDropIndex(pParse, sqliteSrcListAppend(0,&X,&Y));
+}
+
+
+///////////////////////////// The COPY command ///////////////////////////////
+//
+cmd ::= COPY orconf(R) nm(X) dbnm(D) FROM nm(Y) USING DELIMITERS STRING(Z).
+ {sqliteCopy(pParse,sqliteSrcListAppend(0,&X,&D),&Y,&Z,R);}
+cmd ::= COPY orconf(R) nm(X) dbnm(D) FROM nm(Y).
+ {sqliteCopy(pParse,sqliteSrcListAppend(0,&X,&D),&Y,0,R);}
+
+///////////////////////////// The VACUUM command /////////////////////////////
+//
+cmd ::= VACUUM. {sqliteVacuum(pParse,0);}
+cmd ::= VACUUM nm(X). {sqliteVacuum(pParse,&X);}
+
+///////////////////////////// The PRAGMA command /////////////////////////////
+//
+cmd ::= PRAGMA ids(X) EQ nm(Y). {sqlitePragma(pParse,&X,&Y,0);}
+cmd ::= PRAGMA ids(X) EQ ON(Y). {sqlitePragma(pParse,&X,&Y,0);}
+cmd ::= PRAGMA ids(X) EQ plus_num(Y). {sqlitePragma(pParse,&X,&Y,0);}
+cmd ::= PRAGMA ids(X) EQ minus_num(Y). {sqlitePragma(pParse,&X,&Y,1);}
+cmd ::= PRAGMA ids(X) LP nm(Y) RP. {sqlitePragma(pParse,&X,&Y,0);}
+cmd ::= PRAGMA ids(X). {sqlitePragma(pParse,&X,&X,0);}
+plus_num(A) ::= plus_opt number(X). {A = X;}
+minus_num(A) ::= MINUS number(X). {A = X;}
+number(A) ::= INTEGER(X). {A = X;}
+number(A) ::= FLOAT(X). {A = X;}
+plus_opt ::= PLUS.
+plus_opt ::= .
+
+//////////////////////////// The CREATE TRIGGER command /////////////////////
+
+cmd ::= CREATE(A) trigger_decl BEGIN trigger_cmd_list(S) END(Z). {
+ Token all;
+ all.z = A.z;
+ all.n = (Z.z - A.z) + Z.n;
+ sqliteFinishTrigger(pParse, S, &all);
+}
+
+trigger_decl ::= temp(T) TRIGGER nm(B) trigger_time(C) trigger_event(D)
+ ON nm(E) dbnm(DB) foreach_clause(F) when_clause(G). {
+ SrcList *pTab = sqliteSrcListAppend(0, &E, &DB);
+ sqliteBeginTrigger(pParse, &B, C, D.a, D.b, pTab, F, G, T);
+}
+
+%type trigger_time {int}
+trigger_time(A) ::= BEFORE. { A = TK_BEFORE; }
+trigger_time(A) ::= AFTER. { A = TK_AFTER; }
+trigger_time(A) ::= INSTEAD OF. { A = TK_INSTEAD;}
+trigger_time(A) ::= . { A = TK_BEFORE; }
+
+%type trigger_event {struct TrigEvent}
+%destructor trigger_event {sqliteIdListDelete($$.b);}
+trigger_event(A) ::= DELETE. { A.a = TK_DELETE; A.b = 0; }
+trigger_event(A) ::= INSERT. { A.a = TK_INSERT; A.b = 0; }
+trigger_event(A) ::= UPDATE. { A.a = TK_UPDATE; A.b = 0;}
+trigger_event(A) ::= UPDATE OF inscollist(X). {A.a = TK_UPDATE; A.b = X; }
+
+%type foreach_clause {int}
+foreach_clause(A) ::= . { A = TK_ROW; }
+foreach_clause(A) ::= FOR EACH ROW. { A = TK_ROW; }
+foreach_clause(A) ::= FOR EACH STATEMENT. { A = TK_STATEMENT; }
+
+%type when_clause {Expr *}
+when_clause(A) ::= . { A = 0; }
+when_clause(A) ::= WHEN expr(X). { A = X; }
+
+%type trigger_cmd_list {TriggerStep *}
+%destructor trigger_cmd_list {sqliteDeleteTriggerStep($$);}
+trigger_cmd_list(A) ::= trigger_cmd(X) SEMI trigger_cmd_list(Y). {
+ X->pNext = Y;
+ A = X;
+}
+trigger_cmd_list(A) ::= . { A = 0; }
+
+%type trigger_cmd {TriggerStep *}
+%destructor trigger_cmd {sqliteDeleteTriggerStep($$);}
+// UPDATE
+trigger_cmd(A) ::= UPDATE orconf(R) nm(X) SET setlist(Y) where_opt(Z).
+ { A = sqliteTriggerUpdateStep(&X, Y, Z, R); }
+
+// INSERT
+trigger_cmd(A) ::= insert_cmd(R) INTO nm(X) inscollist_opt(F)
+ VALUES LP itemlist(Y) RP.
+{A = sqliteTriggerInsertStep(&X, F, Y, 0, R);}
+
+trigger_cmd(A) ::= insert_cmd(R) INTO nm(X) inscollist_opt(F) select(S).
+ {A = sqliteTriggerInsertStep(&X, F, 0, S, R);}
+
+// DELETE
+trigger_cmd(A) ::= DELETE FROM nm(X) where_opt(Y).
+ {A = sqliteTriggerDeleteStep(&X, Y);}
+
+// SELECT
+trigger_cmd(A) ::= select(X). {A = sqliteTriggerSelectStep(X); }
+
+// The special RAISE expression that may occur in trigger programs
+expr(A) ::= RAISE(X) LP IGNORE RP(Y). {
+ A = sqliteExpr(TK_RAISE, 0, 0, 0);
+ A->iColumn = OE_Ignore;
+ sqliteExprSpan(A, &X, &Y);
+}
+expr(A) ::= RAISE(X) LP ROLLBACK COMMA nm(Z) RP(Y). {
+ A = sqliteExpr(TK_RAISE, 0, 0, &Z);
+ A->iColumn = OE_Rollback;
+ sqliteExprSpan(A, &X, &Y);
+}
+expr(A) ::= RAISE(X) LP ABORT COMMA nm(Z) RP(Y). {
+ A = sqliteExpr(TK_RAISE, 0, 0, &Z);
+ A->iColumn = OE_Abort;
+ sqliteExprSpan(A, &X, &Y);
+}
+expr(A) ::= RAISE(X) LP FAIL COMMA nm(Z) RP(Y). {
+ A = sqliteExpr(TK_RAISE, 0, 0, &Z);
+ A->iColumn = OE_Fail;
+ sqliteExprSpan(A, &X, &Y);
+}
+
+//////////////////////// DROP TRIGGER statement //////////////////////////////
+cmd ::= DROP TRIGGER nm(X) dbnm(D). {
+ sqliteDropTrigger(pParse,sqliteSrcListAppend(0,&X,&D));
+}
+
+//////////////////////// ATTACH DATABASE file AS name /////////////////////////
+cmd ::= ATTACH database_kw_opt ids(F) AS nm(D) key_opt(K). {
+ sqliteAttach(pParse, &F, &D, &K);
+}
+%type key_opt {Token}
+key_opt(A) ::= USING ids(X). { A = X; }
+key_opt(A) ::= . { A.z = 0; A.n = 0; }
+
+database_kw_opt ::= DATABASE.
+database_kw_opt ::= .
+
+//////////////////////// DETACH DATABASE name /////////////////////////////////
+cmd ::= DETACH database_kw_opt nm(D). {
+ sqliteDetach(pParse, &D);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/pragma.c b/usr/src/cmd/svc/configd/sqlite/src/pragma.c
new file mode 100644
index 0000000000..2a944c2628
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/pragma.c
@@ -0,0 +1,715 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the PRAGMA command.
+**
+** $Id: pragma.c,v 1.19 2004/04/23 17:04:45 drh Exp $
+*/
+#include "sqliteInt.h"
+#include <ctype.h>
+
+/*
+** Interpret the given string as a boolean value.
+*/
+static int getBoolean(const char *z){
+ static char *azTrue[] = { "yes", "on", "true" };
+ int i;
+ if( z[0]==0 ) return 0;
+ if( isdigit(z[0]) || (z[0]=='-' && isdigit(z[1])) ){
+ return atoi(z);
+ }
+ for(i=0; i<sizeof(azTrue)/sizeof(azTrue[0]); i++){
+ if( sqliteStrICmp(z,azTrue[i])==0 ) return 1;
+ }
+ return 0;
+}
+
+/*
+** Interpret the given string as a safety level. Return 0 for OFF,
+** 1 for ON or NORMAL and 2 for FULL. Return 1 for an empty or
+** unrecognized string argument.
+**
+** Note that the values returned are one less that the values that
+** should be passed into sqliteBtreeSetSafetyLevel(). The is done
+** to support legacy SQL code. The safety level used to be boolean
+** and older scripts may have used numbers 0 for OFF and 1 for ON.
+*/
+static int getSafetyLevel(char *z){
+ static const struct {
+ const char *zWord;
+ int val;
+ } aKey[] = {
+ { "no", 0 },
+ { "off", 0 },
+ { "false", 0 },
+ { "yes", 1 },
+ { "on", 1 },
+ { "true", 1 },
+ { "full", 2 },
+ };
+ int i;
+ if( z[0]==0 ) return 1;
+ if( isdigit(z[0]) || (z[0]=='-' && isdigit(z[1])) ){
+ return atoi(z);
+ }
+ for(i=0; i<sizeof(aKey)/sizeof(aKey[0]); i++){
+ if( sqliteStrICmp(z,aKey[i].zWord)==0 ) return aKey[i].val;
+ }
+ return 1;
+}
+
+/*
+** Interpret the given string as a temp db location. Return 1 for file
+** backed temporary databases, 2 for the Red-Black tree in memory database
+** and 0 to use the compile-time default.
+*/
+static int getTempStore(const char *z){
+ if( z[0]>='0' && z[0]<='2' ){
+ return z[0] - '0';
+ }else if( sqliteStrICmp(z, "file")==0 ){
+ return 1;
+ }else if( sqliteStrICmp(z, "memory")==0 ){
+ return 2;
+ }else{
+ return 0;
+ }
+}
+
+/*
+** If the TEMP database is open, close it and mark the database schema
+** as needing reloading. This must be done when using the TEMP_STORE
+** or DEFAULT_TEMP_STORE pragmas.
+*/
+static int changeTempStorage(Parse *pParse, const char *zStorageType){
+ int ts = getTempStore(zStorageType);
+ sqlite *db = pParse->db;
+ if( db->temp_store==ts ) return SQLITE_OK;
+ if( db->aDb[1].pBt!=0 ){
+ if( db->flags & SQLITE_InTrans ){
+ sqliteErrorMsg(pParse, "temporary storage cannot be changed "
+ "from within a transaction");
+ return SQLITE_ERROR;
+ }
+ sqliteBtreeClose(db->aDb[1].pBt);
+ db->aDb[1].pBt = 0;
+ sqliteResetInternalSchema(db, 0);
+ }
+ db->temp_store = ts;
+ return SQLITE_OK;
+}
+
+/*
+** Check to see if zRight and zLeft refer to a pragma that queries
+** or changes one of the flags in db->flags. Return 1 if so and 0 if not.
+** Also, implement the pragma.
+*/
+static int flagPragma(Parse *pParse, const char *zLeft, const char *zRight){
+ static const struct {
+ const char *zName; /* Name of the pragma */
+ int mask; /* Mask for the db->flags value */
+ } aPragma[] = {
+ { "vdbe_trace", SQLITE_VdbeTrace },
+ { "full_column_names", SQLITE_FullColNames },
+ { "short_column_names", SQLITE_ShortColNames },
+ { "show_datatypes", SQLITE_ReportTypes },
+ { "count_changes", SQLITE_CountRows },
+ { "empty_result_callbacks", SQLITE_NullCallback },
+ };
+ int i;
+ for(i=0; i<sizeof(aPragma)/sizeof(aPragma[0]); i++){
+ if( sqliteStrICmp(zLeft, aPragma[i].zName)==0 ){
+ sqlite *db = pParse->db;
+ Vdbe *v;
+ if( strcmp(zLeft,zRight)==0 && (v = sqliteGetVdbe(pParse))!=0 ){
+ sqliteVdbeOp3(v, OP_ColumnName, 0, 1, aPragma[i].zName, P3_STATIC);
+ sqliteVdbeOp3(v, OP_ColumnName, 1, 0, "boolean", P3_STATIC);
+ sqliteVdbeCode(v, OP_Integer, (db->flags & aPragma[i].mask)!=0, 0,
+ OP_Callback, 1, 0,
+ 0);
+ }else if( getBoolean(zRight) ){
+ db->flags |= aPragma[i].mask;
+ }else{
+ db->flags &= ~aPragma[i].mask;
+ }
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Process a pragma statement.
+**
+** Pragmas are of this form:
+**
+** PRAGMA id = value
+**
+** The identifier might also be a string. The value is a string, and
+** identifier, or a number. If minusFlag is true, then the value is
+** a number that was preceded by a minus sign.
+*/
+void sqlitePragma(Parse *pParse, Token *pLeft, Token *pRight, int minusFlag){
+ char *zLeft = 0;
+ char *zRight = 0;
+ sqlite *db = pParse->db;
+ Vdbe *v = sqliteGetVdbe(pParse);
+ if( v==0 ) return;
+
+ zLeft = sqliteStrNDup(pLeft->z, pLeft->n);
+ sqliteDequote(zLeft);
+ if( minusFlag ){
+ zRight = 0;
+ sqliteSetNString(&zRight, "-", 1, pRight->z, pRight->n, 0);
+ }else{
+ zRight = sqliteStrNDup(pRight->z, pRight->n);
+ sqliteDequote(zRight);
+ }
+ if( sqliteAuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, 0) ){
+ sqliteFree(zLeft);
+ sqliteFree(zRight);
+ return;
+ }
+
+ /*
+ ** PRAGMA default_cache_size
+ ** PRAGMA default_cache_size=N
+ **
+ ** The first form reports the current persistent setting for the
+ ** page cache size. The value returned is the maximum number of
+ ** pages in the page cache. The second form sets both the current
+ ** page cache size value and the persistent page cache size value
+ ** stored in the database file.
+ **
+ ** The default cache size is stored in meta-value 2 of page 1 of the
+ ** database file. The cache size is actually the absolute value of
+ ** this memory location. The sign of meta-value 2 determines the
+ ** synchronous setting. A negative value means synchronous is off
+ ** and a positive value means synchronous is on.
+ */
+ if( sqliteStrICmp(zLeft,"default_cache_size")==0 ){
+ static VdbeOpList getCacheSize[] = {
+ { OP_ReadCookie, 0, 2, 0},
+ { OP_AbsValue, 0, 0, 0},
+ { OP_Dup, 0, 0, 0},
+ { OP_Integer, 0, 0, 0},
+ { OP_Ne, 0, 6, 0},
+ { OP_Integer, 0, 0, 0}, /* 5 */
+ { OP_ColumnName, 0, 1, "cache_size"},
+ { OP_Callback, 1, 0, 0},
+ };
+ int addr;
+ if( pRight->z==pLeft->z ){
+ addr = sqliteVdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize);
+ sqliteVdbeChangeP1(v, addr+5, MAX_PAGES);
+ }else{
+ int size = atoi(zRight);
+ if( size<0 ) size = -size;
+ sqliteBeginWriteOperation(pParse, 0, 0);
+ sqliteVdbeAddOp(v, OP_Integer, size, 0);
+ sqliteVdbeAddOp(v, OP_ReadCookie, 0, 2);
+ addr = sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ sqliteVdbeAddOp(v, OP_Ge, 0, addr+3);
+ sqliteVdbeAddOp(v, OP_Negative, 0, 0);
+ sqliteVdbeAddOp(v, OP_SetCookie, 0, 2);
+ sqliteEndWriteOperation(pParse);
+ db->cache_size = db->cache_size<0 ? -size : size;
+ sqliteBtreeSetCacheSize(db->aDb[0].pBt, db->cache_size);
+ }
+ }else
+
+ /*
+ ** PRAGMA cache_size
+ ** PRAGMA cache_size=N
+ **
+ ** The first form reports the current local setting for the
+ ** page cache size. The local setting can be different from
+ ** the persistent cache size value that is stored in the database
+ ** file itself. The value returned is the maximum number of
+ ** pages in the page cache. The second form sets the local
+ ** page cache size value. It does not change the persistent
+ ** cache size stored on the disk so the cache size will revert
+ ** to its default value when the database is closed and reopened.
+ ** N should be a positive integer.
+ */
+ if( sqliteStrICmp(zLeft,"cache_size")==0 ){
+ static VdbeOpList getCacheSize[] = {
+ { OP_ColumnName, 0, 1, "cache_size"},
+ { OP_Callback, 1, 0, 0},
+ };
+ if( pRight->z==pLeft->z ){
+ int size = db->cache_size;;
+ if( size<0 ) size = -size;
+ sqliteVdbeAddOp(v, OP_Integer, size, 0);
+ sqliteVdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize);
+ }else{
+ int size = atoi(zRight);
+ if( size<0 ) size = -size;
+ if( db->cache_size<0 ) size = -size;
+ db->cache_size = size;
+ sqliteBtreeSetCacheSize(db->aDb[0].pBt, db->cache_size);
+ }
+ }else
+
+ /*
+ ** PRAGMA default_synchronous
+ ** PRAGMA default_synchronous=ON|OFF|NORMAL|FULL
+ **
+ ** The first form returns the persistent value of the "synchronous" setting
+ ** that is stored in the database. This is the synchronous setting that
+ ** is used whenever the database is opened unless overridden by a separate
+ ** "synchronous" pragma. The second form changes the persistent and the
+ ** local synchronous setting to the value given.
+ **
+ ** If synchronous is OFF, SQLite does not attempt any fsync() systems calls
+ ** to make sure data is committed to disk. Write operations are very fast,
+ ** but a power failure can leave the database in an inconsistent state.
+ ** If synchronous is ON or NORMAL, SQLite will do an fsync() system call to
+ ** make sure data is being written to disk. The risk of corruption due to
+ ** a power loss in this mode is negligible but non-zero. If synchronous
+ ** is FULL, extra fsync()s occur to reduce the risk of corruption to near
+ ** zero, but with a write performance penalty. The default mode is NORMAL.
+ */
+ if( sqliteStrICmp(zLeft,"default_synchronous")==0 ){
+ static VdbeOpList getSync[] = {
+ { OP_ColumnName, 0, 1, "synchronous"},
+ { OP_ReadCookie, 0, 3, 0},
+ { OP_Dup, 0, 0, 0},
+ { OP_If, 0, 0, 0}, /* 3 */
+ { OP_ReadCookie, 0, 2, 0},
+ { OP_Integer, 0, 0, 0},
+ { OP_Lt, 0, 5, 0},
+ { OP_AddImm, 1, 0, 0},
+ { OP_Callback, 1, 0, 0},
+ { OP_Halt, 0, 0, 0},
+ { OP_AddImm, -1, 0, 0}, /* 10 */
+ { OP_Callback, 1, 0, 0}
+ };
+ if( pRight->z==pLeft->z ){
+ int addr = sqliteVdbeAddOpList(v, ArraySize(getSync), getSync);
+ sqliteVdbeChangeP2(v, addr+3, addr+10);
+ }else{
+ int addr;
+ int size = db->cache_size;
+ if( size<0 ) size = -size;
+ sqliteBeginWriteOperation(pParse, 0, 0);
+ sqliteVdbeAddOp(v, OP_ReadCookie, 0, 2);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ addr = sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ sqliteVdbeAddOp(v, OP_Ne, 0, addr+3);
+ sqliteVdbeAddOp(v, OP_AddImm, MAX_PAGES, 0);
+ sqliteVdbeAddOp(v, OP_AbsValue, 0, 0);
+ db->safety_level = getSafetyLevel(zRight)+1;
+ if( db->safety_level==1 ){
+ sqliteVdbeAddOp(v, OP_Negative, 0, 0);
+ size = -size;
+ }
+ sqliteVdbeAddOp(v, OP_SetCookie, 0, 2);
+ sqliteVdbeAddOp(v, OP_Integer, db->safety_level, 0);
+ sqliteVdbeAddOp(v, OP_SetCookie, 0, 3);
+ sqliteEndWriteOperation(pParse);
+ db->cache_size = size;
+ sqliteBtreeSetCacheSize(db->aDb[0].pBt, db->cache_size);
+ sqliteBtreeSetSafetyLevel(db->aDb[0].pBt, db->safety_level);
+ }
+ }else
+
+ /*
+ ** PRAGMA synchronous
+ ** PRAGMA synchronous=OFF|ON|NORMAL|FULL
+ **
+ ** Return or set the local value of the synchronous flag. Changing
+ ** the local value does not make changes to the disk file and the
+ ** default value will be restored the next time the database is
+ ** opened.
+ */
+ if( sqliteStrICmp(zLeft,"synchronous")==0 ){
+ static VdbeOpList getSync[] = {
+ { OP_ColumnName, 0, 1, "synchronous"},
+ { OP_Callback, 1, 0, 0},
+ };
+ if( pRight->z==pLeft->z ){
+ sqliteVdbeAddOp(v, OP_Integer, db->safety_level-1, 0);
+ sqliteVdbeAddOpList(v, ArraySize(getSync), getSync);
+ }else{
+ int size = db->cache_size;
+ if( size<0 ) size = -size;
+ db->safety_level = getSafetyLevel(zRight)+1;
+ if( db->safety_level==1 ) size = -size;
+ db->cache_size = size;
+ sqliteBtreeSetCacheSize(db->aDb[0].pBt, db->cache_size);
+ sqliteBtreeSetSafetyLevel(db->aDb[0].pBt, db->safety_level);
+ }
+ }else
+
+#ifndef NDEBUG
+ if( sqliteStrICmp(zLeft, "trigger_overhead_test")==0 ){
+ if( getBoolean(zRight) ){
+ always_code_trigger_setup = 1;
+ }else{
+ always_code_trigger_setup = 0;
+ }
+ }else
+#endif
+
+ if( flagPragma(pParse, zLeft, zRight) ){
+ /* The flagPragma() call also generates any necessary code */
+ }else
+
+ if( sqliteStrICmp(zLeft, "table_info")==0 ){
+ Table *pTab;
+ pTab = sqliteFindTable(db, zRight, 0);
+ if( pTab ){
+ static VdbeOpList tableInfoPreface[] = {
+ { OP_ColumnName, 0, 0, "cid"},
+ { OP_ColumnName, 1, 0, "name"},
+ { OP_ColumnName, 2, 0, "type"},
+ { OP_ColumnName, 3, 0, "notnull"},
+ { OP_ColumnName, 4, 0, "dflt_value"},
+ { OP_ColumnName, 5, 1, "pk"},
+ };
+ int i;
+ sqliteVdbeAddOpList(v, ArraySize(tableInfoPreface), tableInfoPreface);
+ sqliteViewGetColumnNames(pParse, pTab);
+ for(i=0; i<pTab->nCol; i++){
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, pTab->aCol[i].zName, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0,
+ pTab->aCol[i].zType ? pTab->aCol[i].zType : "numeric", 0);
+ sqliteVdbeAddOp(v, OP_Integer, pTab->aCol[i].notNull, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0,
+ pTab->aCol[i].zDflt, P3_STATIC);
+ sqliteVdbeAddOp(v, OP_Integer, pTab->aCol[i].isPrimKey, 0);
+ sqliteVdbeAddOp(v, OP_Callback, 6, 0);
+ }
+ }
+ }else
+
+ if( sqliteStrICmp(zLeft, "index_info")==0 ){
+ Index *pIdx;
+ Table *pTab;
+ pIdx = sqliteFindIndex(db, zRight, 0);
+ if( pIdx ){
+ static VdbeOpList tableInfoPreface[] = {
+ { OP_ColumnName, 0, 0, "seqno"},
+ { OP_ColumnName, 1, 0, "cid"},
+ { OP_ColumnName, 2, 1, "name"},
+ };
+ int i;
+ pTab = pIdx->pTable;
+ sqliteVdbeAddOpList(v, ArraySize(tableInfoPreface), tableInfoPreface);
+ for(i=0; i<pIdx->nColumn; i++){
+ int cnum = pIdx->aiColumn[i];
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeAddOp(v, OP_Integer, cnum, 0);
+ assert( pTab->nCol>cnum );
+ sqliteVdbeOp3(v, OP_String, 0, 0, pTab->aCol[cnum].zName, 0);
+ sqliteVdbeAddOp(v, OP_Callback, 3, 0);
+ }
+ }
+ }else
+
+ if( sqliteStrICmp(zLeft, "index_list")==0 ){
+ Index *pIdx;
+ Table *pTab;
+ pTab = sqliteFindTable(db, zRight, 0);
+ if( pTab ){
+ v = sqliteGetVdbe(pParse);
+ pIdx = pTab->pIndex;
+ }
+ if( pTab && pIdx ){
+ int i = 0;
+ static VdbeOpList indexListPreface[] = {
+ { OP_ColumnName, 0, 0, "seq"},
+ { OP_ColumnName, 1, 0, "name"},
+ { OP_ColumnName, 2, 1, "unique"},
+ };
+
+ sqliteVdbeAddOpList(v, ArraySize(indexListPreface), indexListPreface);
+ while(pIdx){
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, pIdx->zName, 0);
+ sqliteVdbeAddOp(v, OP_Integer, pIdx->onError!=OE_None, 0);
+ sqliteVdbeAddOp(v, OP_Callback, 3, 0);
+ ++i;
+ pIdx = pIdx->pNext;
+ }
+ }
+ }else
+
+ if( sqliteStrICmp(zLeft, "foreign_key_list")==0 ){
+ FKey *pFK;
+ Table *pTab;
+ pTab = sqliteFindTable(db, zRight, 0);
+ if( pTab ){
+ v = sqliteGetVdbe(pParse);
+ pFK = pTab->pFKey;
+ }
+ if( pTab && pFK ){
+ int i = 0;
+ static VdbeOpList indexListPreface[] = {
+ { OP_ColumnName, 0, 0, "id"},
+ { OP_ColumnName, 1, 0, "seq"},
+ { OP_ColumnName, 2, 0, "table"},
+ { OP_ColumnName, 3, 0, "from"},
+ { OP_ColumnName, 4, 1, "to"},
+ };
+
+ sqliteVdbeAddOpList(v, ArraySize(indexListPreface), indexListPreface);
+ while(pFK){
+ int j;
+ for(j=0; j<pFK->nCol; j++){
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeAddOp(v, OP_Integer, j, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, pFK->zTo, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0,
+ pTab->aCol[pFK->aCol[j].iFrom].zName, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, pFK->aCol[j].zCol, 0);
+ sqliteVdbeAddOp(v, OP_Callback, 5, 0);
+ }
+ ++i;
+ pFK = pFK->pNextFrom;
+ }
+ }
+ }else
+
+ if( sqliteStrICmp(zLeft, "database_list")==0 ){
+ int i;
+ static VdbeOpList indexListPreface[] = {
+ { OP_ColumnName, 0, 0, "seq"},
+ { OP_ColumnName, 1, 0, "name"},
+ { OP_ColumnName, 2, 1, "file"},
+ };
+
+ sqliteVdbeAddOpList(v, ArraySize(indexListPreface), indexListPreface);
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].pBt==0 ) continue;
+ assert( db->aDb[i].zName!=0 );
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0, db->aDb[i].zName, 0);
+ sqliteVdbeOp3(v, OP_String, 0, 0,
+ sqliteBtreeGetFilename(db->aDb[i].pBt), 0);
+ sqliteVdbeAddOp(v, OP_Callback, 3, 0);
+ }
+ }else
+
+
+ /*
+ ** PRAGMA temp_store
+ ** PRAGMA temp_store = "default"|"memory"|"file"
+ **
+ ** Return or set the local value of the temp_store flag. Changing
+ ** the local value does not make changes to the disk file and the default
+ ** value will be restored the next time the database is opened.
+ **
+ ** Note that it is possible for the library compile-time options to
+ ** override this setting
+ */
+ if( sqliteStrICmp(zLeft, "temp_store")==0 ){
+ static VdbeOpList getTmpDbLoc[] = {
+ { OP_ColumnName, 0, 1, "temp_store"},
+ { OP_Callback, 1, 0, 0},
+ };
+ if( pRight->z==pLeft->z ){
+ sqliteVdbeAddOp(v, OP_Integer, db->temp_store, 0);
+ sqliteVdbeAddOpList(v, ArraySize(getTmpDbLoc), getTmpDbLoc);
+ }else{
+ changeTempStorage(pParse, zRight);
+ }
+ }else
+
+ /*
+ ** PRAGMA default_temp_store
+ ** PRAGMA default_temp_store = "default"|"memory"|"file"
+ **
+ ** Return or set the value of the persistent temp_store flag. Any
+ ** change does not take effect until the next time the database is
+ ** opened.
+ **
+ ** Note that it is possible for the library compile-time options to
+ ** override this setting
+ */
+ if( sqliteStrICmp(zLeft, "default_temp_store")==0 ){
+ static VdbeOpList getTmpDbLoc[] = {
+ { OP_ColumnName, 0, 1, "temp_store"},
+ { OP_ReadCookie, 0, 5, 0},
+ { OP_Callback, 1, 0, 0}};
+ if( pRight->z==pLeft->z ){
+ sqliteVdbeAddOpList(v, ArraySize(getTmpDbLoc), getTmpDbLoc);
+ }else{
+ sqliteBeginWriteOperation(pParse, 0, 0);
+ sqliteVdbeAddOp(v, OP_Integer, getTempStore(zRight), 0);
+ sqliteVdbeAddOp(v, OP_SetCookie, 0, 5);
+ sqliteEndWriteOperation(pParse);
+ }
+ }else
+
+#ifndef NDEBUG
+ if( sqliteStrICmp(zLeft, "parser_trace")==0 ){
+ extern void sqliteParserTrace(FILE*, char *);
+ if( getBoolean(zRight) ){
+ sqliteParserTrace(stdout, "parser: ");
+ }else{
+ sqliteParserTrace(0, 0);
+ }
+ }else
+#endif
+
+ if( sqliteStrICmp(zLeft, "integrity_check")==0 ){
+ int i, j, addr;
+
+ /* Code that initializes the integrity check program. Set the
+ ** error count 0
+ */
+ static VdbeOpList initCode[] = {
+ { OP_Integer, 0, 0, 0},
+ { OP_MemStore, 0, 1, 0},
+ { OP_ColumnName, 0, 1, "integrity_check"},
+ };
+
+ /* Code to do an BTree integrity check on a single database file.
+ */
+ static VdbeOpList checkDb[] = {
+ { OP_SetInsert, 0, 0, "2"},
+ { OP_Integer, 0, 0, 0}, /* 1 */
+ { OP_OpenRead, 0, 2, 0},
+ { OP_Rewind, 0, 7, 0}, /* 3 */
+ { OP_Column, 0, 3, 0}, /* 4 */
+ { OP_SetInsert, 0, 0, 0},
+ { OP_Next, 0, 4, 0}, /* 6 */
+ { OP_IntegrityCk, 0, 0, 0}, /* 7 */
+ { OP_Dup, 0, 1, 0},
+ { OP_String, 0, 0, "ok"},
+ { OP_StrEq, 0, 12, 0}, /* 10 */
+ { OP_MemIncr, 0, 0, 0},
+ { OP_String, 0, 0, "*** in database "},
+ { OP_String, 0, 0, 0}, /* 13 */
+ { OP_String, 0, 0, " ***\n"},
+ { OP_Pull, 3, 0, 0},
+ { OP_Concat, 4, 1, 0},
+ { OP_Callback, 1, 0, 0},
+ };
+
+ /* Code that appears at the end of the integrity check. If no error
+ ** messages have been generated, output OK. Otherwise output the
+ ** error message
+ */
+ static VdbeOpList endCode[] = {
+ { OP_MemLoad, 0, 0, 0},
+ { OP_Integer, 0, 0, 0},
+ { OP_Ne, 0, 0, 0}, /* 2 */
+ { OP_String, 0, 0, "ok"},
+ { OP_Callback, 1, 0, 0},
+ };
+
+ /* Initialize the VDBE program */
+ sqliteVdbeAddOpList(v, ArraySize(initCode), initCode);
+
+ /* Do an integrity check on each database file */
+ for(i=0; i<db->nDb; i++){
+ HashElem *x;
+
+ /* Do an integrity check of the B-Tree
+ */
+ addr = sqliteVdbeAddOpList(v, ArraySize(checkDb), checkDb);
+ sqliteVdbeChangeP1(v, addr+1, i);
+ sqliteVdbeChangeP2(v, addr+3, addr+7);
+ sqliteVdbeChangeP2(v, addr+6, addr+4);
+ sqliteVdbeChangeP2(v, addr+7, i);
+ sqliteVdbeChangeP2(v, addr+10, addr+ArraySize(checkDb));
+ sqliteVdbeChangeP3(v, addr+13, db->aDb[i].zName, P3_STATIC);
+
+ /* Make sure all the indices are constructed correctly.
+ */
+ sqliteCodeVerifySchema(pParse, i);
+ for(x=sqliteHashFirst(&db->aDb[i].tblHash); x; x=sqliteHashNext(x)){
+ Table *pTab = sqliteHashData(x);
+ Index *pIdx;
+ int loopTop;
+
+ if( pTab->pIndex==0 ) continue;
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, 1, pTab->tnum, pTab->zName, 0);
+ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ if( pIdx->tnum==0 ) continue;
+ sqliteVdbeAddOp(v, OP_Integer, pIdx->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, j+2, pIdx->tnum, pIdx->zName, 0);
+ }
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, 1, 1);
+ loopTop = sqliteVdbeAddOp(v, OP_Rewind, 1, 0);
+ sqliteVdbeAddOp(v, OP_MemIncr, 1, 0);
+ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ int k, jmp2;
+ static VdbeOpList idxErr[] = {
+ { OP_MemIncr, 0, 0, 0},
+ { OP_String, 0, 0, "rowid "},
+ { OP_Recno, 1, 0, 0},
+ { OP_String, 0, 0, " missing from index "},
+ { OP_String, 0, 0, 0}, /* 4 */
+ { OP_Concat, 4, 0, 0},
+ { OP_Callback, 1, 0, 0},
+ };
+ sqliteVdbeAddOp(v, OP_Recno, 1, 0);
+ for(k=0; k<pIdx->nColumn; k++){
+ int idx = pIdx->aiColumn[k];
+ if( idx==pTab->iPKey ){
+ sqliteVdbeAddOp(v, OP_Recno, 1, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_Column, 1, idx);
+ }
+ }
+ sqliteVdbeAddOp(v, OP_MakeIdxKey, pIdx->nColumn, 0);
+ if( db->file_format>=4 ) sqliteAddIdxKeyType(v, pIdx);
+ jmp2 = sqliteVdbeAddOp(v, OP_Found, j+2, 0);
+ addr = sqliteVdbeAddOpList(v, ArraySize(idxErr), idxErr);
+ sqliteVdbeChangeP3(v, addr+4, pIdx->zName, P3_STATIC);
+ sqliteVdbeChangeP2(v, jmp2, sqliteVdbeCurrentAddr(v));
+ }
+ sqliteVdbeAddOp(v, OP_Next, 1, loopTop+1);
+ sqliteVdbeChangeP2(v, loopTop, sqliteVdbeCurrentAddr(v));
+ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ static VdbeOpList cntIdx[] = {
+ { OP_Integer, 0, 0, 0},
+ { OP_MemStore, 2, 1, 0},
+ { OP_Rewind, 0, 0, 0}, /* 2 */
+ { OP_MemIncr, 2, 0, 0},
+ { OP_Next, 0, 0, 0}, /* 4 */
+ { OP_MemLoad, 1, 0, 0},
+ { OP_MemLoad, 2, 0, 0},
+ { OP_Eq, 0, 0, 0}, /* 7 */
+ { OP_MemIncr, 0, 0, 0},
+ { OP_String, 0, 0, "wrong # of entries in index "},
+ { OP_String, 0, 0, 0}, /* 10 */
+ { OP_Concat, 2, 0, 0},
+ { OP_Callback, 1, 0, 0},
+ };
+ if( pIdx->tnum==0 ) continue;
+ addr = sqliteVdbeAddOpList(v, ArraySize(cntIdx), cntIdx);
+ sqliteVdbeChangeP1(v, addr+2, j+2);
+ sqliteVdbeChangeP2(v, addr+2, addr+5);
+ sqliteVdbeChangeP1(v, addr+4, j+2);
+ sqliteVdbeChangeP2(v, addr+4, addr+3);
+ sqliteVdbeChangeP2(v, addr+7, addr+ArraySize(cntIdx));
+ sqliteVdbeChangeP3(v, addr+10, pIdx->zName, P3_STATIC);
+ }
+ }
+ }
+ addr = sqliteVdbeAddOpList(v, ArraySize(endCode), endCode);
+ sqliteVdbeChangeP2(v, addr+2, addr+ArraySize(endCode));
+ }else
+
+ {}
+ sqliteFree(zLeft);
+ sqliteFree(zRight);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/printf.c b/usr/src/cmd/svc/configd/sqlite/src/printf.c
new file mode 100644
index 0000000000..27bd2ac758
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/printf.c
@@ -0,0 +1,861 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** The "printf" code that follows dates from the 1980's. It is in
+** the public domain. The original comments are included here for
+** completeness. They are very out-of-date but might be useful as
+** an historical reference. Most of the "enhancements" have been backed
+** out so that the functionality is now the same as standard printf().
+**
+**************************************************************************
+**
+** The following modules is an enhanced replacement for the "printf" subroutines
+** found in the standard C library. The following enhancements are
+** supported:
+**
+** + Additional functions. The standard set of "printf" functions
+** includes printf, fprintf, sprintf, vprintf, vfprintf, and
+** vsprintf. This module adds the following:
+**
+** * snprintf -- Works like sprintf, but has an extra argument
+** which is the size of the buffer written to.
+**
+** * mprintf -- Similar to sprintf. Writes output to memory
+** obtained from malloc.
+**
+** * xprintf -- Calls a function to dispose of output.
+**
+** * nprintf -- No output, but returns the number of characters
+** that would have been output by printf.
+**
+** * A v- version (ex: vsnprintf) of every function is also
+** supplied.
+**
+** + A few extensions to the formatting notation are supported:
+**
+** * The "=" flag (similar to "-") causes the output to be
+** be centered in the appropriately sized field.
+**
+** * The %b field outputs an integer in binary notation.
+**
+** * The %c field now accepts a precision. The character output
+** is repeated by the number of times the precision specifies.
+**
+** * The %' field works like %c, but takes as its character the
+** next character of the format string, instead of the next
+** argument. For example, printf("%.78'-") prints 78 minus
+** signs, the same as printf("%.78c",'-').
+**
+** + When compiled using GCC on a SPARC, this version of printf is
+** faster than the library printf for SUN OS 4.1.
+**
+** + All functions are fully reentrant.
+**
+*/
+#include "sqliteInt.h"
+
+/*
+** Conversion types fall into various categories as defined by the
+** following enumeration.
+*/
+#define etRADIX 1 /* Integer types. %d, %x, %o, and so forth */
+#define etFLOAT 2 /* Floating point. %f */
+#define etEXP 3 /* Exponentional notation. %e and %E */
+#define etGENERIC 4 /* Floating or exponential, depending on exponent. %g */
+#define etSIZE 5 /* Return number of characters processed so far. %n */
+#define etSTRING 6 /* Strings. %s */
+#define etDYNSTRING 7 /* Dynamically allocated strings. %z */
+#define etPERCENT 8 /* Percent symbol. %% */
+#define etCHARX 9 /* Characters. %c */
+#define etERROR 10 /* Used to indicate no such conversion type */
+/* The rest are extensions, not normally found in printf() */
+#define etCHARLIT 11 /* Literal characters. %' */
+#define etSQLESCAPE 12 /* Strings with '\'' doubled. %q */
+#define etSQLESCAPE2 13 /* Strings with '\'' doubled and enclosed in '',
+ NULL pointers replaced by SQL NULL. %Q */
+#define etTOKEN 14 /* a pointer to a Token structure */
+#define etSRCLIST 15 /* a pointer to a SrcList */
+
+
+/*
+** An "etByte" is an 8-bit unsigned value.
+*/
+typedef unsigned char etByte;
+
+/*
+** Each builtin conversion character (ex: the 'd' in "%d") is described
+** by an instance of the following structure
+*/
+typedef struct et_info { /* Information about each format field */
+ char fmttype; /* The format field code letter */
+ etByte base; /* The base for radix conversion */
+ etByte flags; /* One or more of FLAG_ constants below */
+ etByte type; /* Conversion paradigm */
+ char *charset; /* The character set for conversion */
+ char *prefix; /* Prefix on non-zero values in alt format */
+} et_info;
+
+/*
+** Allowed values for et_info.flags
+*/
+#define FLAG_SIGNED 1 /* True if the value to convert is signed */
+#define FLAG_INTERN 2 /* True if for internal use only */
+
+
+/*
+** The following table is searched linearly, so it is good to put the
+** most frequently used conversion types first.
+*/
+static et_info fmtinfo[] = {
+ { 'd', 10, 1, etRADIX, "0123456789", 0 },
+ { 's', 0, 0, etSTRING, 0, 0 },
+ { 'z', 0, 2, etDYNSTRING, 0, 0 },
+ { 'q', 0, 0, etSQLESCAPE, 0, 0 },
+ { 'Q', 0, 0, etSQLESCAPE2, 0, 0 },
+ { 'c', 0, 0, etCHARX, 0, 0 },
+ { 'o', 8, 0, etRADIX, "01234567", "0" },
+ { 'u', 10, 0, etRADIX, "0123456789", 0 },
+ { 'x', 16, 0, etRADIX, "0123456789abcdef", "x0" },
+ { 'X', 16, 0, etRADIX, "0123456789ABCDEF", "X0" },
+ { 'f', 0, 1, etFLOAT, 0, 0 },
+ { 'e', 0, 1, etEXP, "e", 0 },
+ { 'E', 0, 1, etEXP, "E", 0 },
+ { 'g', 0, 1, etGENERIC, "e", 0 },
+ { 'G', 0, 1, etGENERIC, "E", 0 },
+ { 'i', 10, 1, etRADIX, "0123456789", 0 },
+ { 'n', 0, 0, etSIZE, 0, 0 },
+ { '%', 0, 0, etPERCENT, 0, 0 },
+ { 'p', 10, 0, etRADIX, "0123456789", 0 },
+ { 'T', 0, 2, etTOKEN, 0, 0 },
+ { 'S', 0, 2, etSRCLIST, 0, 0 },
+};
+#define etNINFO (sizeof(fmtinfo)/sizeof(fmtinfo[0]))
+
+/*
+** If NOFLOATINGPOINT is defined, then none of the floating point
+** conversions will work.
+*/
+#ifndef etNOFLOATINGPOINT
+/*
+** "*val" is a double such that 0.1 <= *val < 10.0
+** Return the ascii code for the leading digit of *val, then
+** multiply "*val" by 10.0 to renormalize.
+**
+** Example:
+** input: *val = 3.14159
+** output: *val = 1.4159 function return = '3'
+**
+** The counter *cnt is incremented each time. After counter exceeds
+** 16 (the number of significant digits in a 64-bit float) '0' is
+** always returned.
+*/
+static int et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){
+ int digit;
+ LONGDOUBLE_TYPE d;
+ if( (*cnt)++ >= 16 ) return '0';
+ digit = (int)*val;
+ d = digit;
+ digit += '0';
+ *val = (*val - d)*10.0;
+ return digit;
+}
+#endif
+
+#define etBUFSIZE 1000 /* Size of the output buffer */
+
+/*
+** The root program. All variations call this core.
+**
+** INPUTS:
+** func This is a pointer to a function taking three arguments
+** 1. A pointer to anything. Same as the "arg" parameter.
+** 2. A pointer to the list of characters to be output
+** (Note, this list is NOT null terminated.)
+** 3. An integer number of characters to be output.
+** (Note: This number might be zero.)
+**
+** arg This is the pointer to anything which will be passed as the
+** first argument to "func". Use it for whatever you like.
+**
+** fmt This is the format string, as in the usual print.
+**
+** ap This is a pointer to a list of arguments. Same as in
+** vfprint.
+**
+** OUTPUTS:
+** The return value is the total number of characters sent to
+** the function "func". Returns -1 on a error.
+**
+** Note that the order in which automatic variables are declared below
+** seems to make a big difference in determining how fast this beast
+** will run.
+*/
+static int vxprintf(
+ void (*func)(void*,const char*,int), /* Consumer of text */
+ void *arg, /* First argument to the consumer */
+ int useExtended, /* Allow extended %-conversions */
+ const char *fmt, /* Format string */
+ va_list ap /* arguments */
+){
+ int c; /* Next character in the format string */
+ char *bufpt; /* Pointer to the conversion buffer */
+ int precision; /* Precision of the current field */
+ int length; /* Length of the field */
+ int idx; /* A general purpose loop counter */
+ int count; /* Total number of characters output */
+ int width; /* Width of the current field */
+ etByte flag_leftjustify; /* True if "-" flag is present */
+ etByte flag_plussign; /* True if "+" flag is present */
+ etByte flag_blanksign; /* True if " " flag is present */
+ etByte flag_alternateform; /* True if "#" flag is present */
+ etByte flag_zeropad; /* True if field width constant starts with zero */
+ etByte flag_long; /* True if "l" flag is present */
+ unsigned long longvalue; /* Value for integer types */
+ LONGDOUBLE_TYPE realvalue; /* Value for real types */
+ et_info *infop; /* Pointer to the appropriate info structure */
+ char buf[etBUFSIZE]; /* Conversion buffer */
+ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
+ etByte errorflag = 0; /* True if an error is encountered */
+ etByte xtype; /* Conversion paradigm */
+ char *zExtra; /* Extra memory used for etTCLESCAPE conversions */
+ static char spaces[] = " ";
+#define etSPACESIZE (sizeof(spaces)-1)
+#ifndef etNOFLOATINGPOINT
+ int exp; /* exponent of real numbers */
+ double rounder; /* Used for rounding floating point values */
+ etByte flag_dp; /* True if decimal point should be shown */
+ etByte flag_rtz; /* True if trailing zeros should be removed */
+ etByte flag_exp; /* True to force display of the exponent */
+ int nsd; /* Number of significant digits returned */
+#endif
+
+ func(arg,"",0);
+ count = length = 0;
+ bufpt = 0;
+ for(; (c=(*fmt))!=0; ++fmt){
+ if( c!='%' ){
+ int amt;
+ bufpt = (char *)fmt;
+ amt = 1;
+ while( (c=(*++fmt))!='%' && c!=0 ) amt++;
+ (*func)(arg,bufpt,amt);
+ count += amt;
+ if( c==0 ) break;
+ }
+ if( (c=(*++fmt))==0 ){
+ errorflag = 1;
+ (*func)(arg,"%",1);
+ count++;
+ break;
+ }
+ /* Find out what flags are present */
+ flag_leftjustify = flag_plussign = flag_blanksign =
+ flag_alternateform = flag_zeropad = 0;
+ do{
+ switch( c ){
+ case '-': flag_leftjustify = 1; c = 0; break;
+ case '+': flag_plussign = 1; c = 0; break;
+ case ' ': flag_blanksign = 1; c = 0; break;
+ case '#': flag_alternateform = 1; c = 0; break;
+ case '0': flag_zeropad = 1; c = 0; break;
+ default: break;
+ }
+ }while( c==0 && (c=(*++fmt))!=0 );
+ /* Get the field width */
+ width = 0;
+ if( c=='*' ){
+ width = va_arg(ap,int);
+ if( width<0 ){
+ flag_leftjustify = 1;
+ width = -width;
+ }
+ c = *++fmt;
+ }else{
+ while( c>='0' && c<='9' ){
+ width = width*10 + c - '0';
+ c = *++fmt;
+ }
+ }
+ if( width > etBUFSIZE-10 ){
+ width = etBUFSIZE-10;
+ }
+ /* Get the precision */
+ if( c=='.' ){
+ precision = 0;
+ c = *++fmt;
+ if( c=='*' ){
+ precision = va_arg(ap,int);
+ if( precision<0 ) precision = -precision;
+ c = *++fmt;
+ }else{
+ while( c>='0' && c<='9' ){
+ precision = precision*10 + c - '0';
+ c = *++fmt;
+ }
+ }
+ /* Limit the precision to prevent overflowing buf[] during conversion */
+ if( precision>etBUFSIZE-40 ) precision = etBUFSIZE-40;
+ }else{
+ precision = -1;
+ }
+ /* Get the conversion type modifier */
+ if( c=='l' ){
+ flag_long = 1;
+ c = *++fmt;
+ }else{
+ flag_long = 0;
+ }
+ /* Fetch the info entry for the field */
+ infop = 0;
+ xtype = etERROR;
+ for(idx=0; idx<etNINFO; idx++){
+ if( c==fmtinfo[idx].fmttype ){
+ infop = &fmtinfo[idx];
+ if( useExtended || (infop->flags & FLAG_INTERN)==0 ){
+ xtype = infop->type;
+ }
+ break;
+ }
+ }
+ zExtra = 0;
+
+ /*
+ ** At this point, variables are initialized as follows:
+ **
+ ** flag_alternateform TRUE if a '#' is present.
+ ** flag_plussign TRUE if a '+' is present.
+ ** flag_leftjustify TRUE if a '-' is present or if the
+ ** field width was negative.
+ ** flag_zeropad TRUE if the width began with 0.
+ ** flag_long TRUE if the letter 'l' (ell) prefixed
+ ** the conversion character.
+ ** flag_blanksign TRUE if a ' ' is present.
+ ** width The specified field width. This is
+ ** always non-negative. Zero is the default.
+ ** precision The specified precision. The default
+ ** is -1.
+ ** xtype The class of the conversion.
+ ** infop Pointer to the appropriate info struct.
+ */
+ switch( xtype ){
+ case etRADIX:
+ if( flag_long ) longvalue = va_arg(ap,long);
+ else longvalue = va_arg(ap,int);
+#if 1
+ /* For the format %#x, the value zero is printed "0" not "0x0".
+ ** I think this is stupid. */
+ if( longvalue==0 ) flag_alternateform = 0;
+#else
+ /* More sensible: turn off the prefix for octal (to prevent "00"),
+ ** but leave the prefix for hex. */
+ if( longvalue==0 && infop->base==8 ) flag_alternateform = 0;
+#endif
+ if( infop->flags & FLAG_SIGNED ){
+ if( *(long*)&longvalue<0 ){
+ longvalue = -*(long*)&longvalue;
+ prefix = '-';
+ }else if( flag_plussign ) prefix = '+';
+ else if( flag_blanksign ) prefix = ' ';
+ else prefix = 0;
+ }else prefix = 0;
+ if( flag_zeropad && precision<width-(prefix!=0) ){
+ precision = width-(prefix!=0);
+ }
+ bufpt = &buf[etBUFSIZE-1];
+ {
+ register char *cset; /* Use registers for speed */
+ register int base;
+ cset = infop->charset;
+ base = infop->base;
+ do{ /* Convert to ascii */
+ *(--bufpt) = cset[longvalue%base];
+ longvalue = longvalue/base;
+ }while( longvalue>0 );
+ }
+ length = &buf[etBUFSIZE-1]-bufpt;
+ for(idx=precision-length; idx>0; idx--){
+ *(--bufpt) = '0'; /* Zero pad */
+ }
+ if( prefix ) *(--bufpt) = prefix; /* Add sign */
+ if( flag_alternateform && infop->prefix ){ /* Add "0" or "0x" */
+ char *pre, x;
+ pre = infop->prefix;
+ if( *bufpt!=pre[0] ){
+ for(pre=infop->prefix; (x=(*pre))!=0; pre++) *(--bufpt) = x;
+ }
+ }
+ length = &buf[etBUFSIZE-1]-bufpt;
+ break;
+ case etFLOAT:
+ case etEXP:
+ case etGENERIC:
+ realvalue = va_arg(ap,double);
+#ifndef etNOFLOATINGPOINT
+ if( precision<0 ) precision = 6; /* Set default precision */
+ if( precision>etBUFSIZE-10 ) precision = etBUFSIZE-10;
+ if( realvalue<0.0 ){
+ realvalue = -realvalue;
+ prefix = '-';
+ }else{
+ if( flag_plussign ) prefix = '+';
+ else if( flag_blanksign ) prefix = ' ';
+ else prefix = 0;
+ }
+ if( infop->type==etGENERIC && precision>0 ) precision--;
+ rounder = 0.0;
+#if 0
+ /* Rounding works like BSD when the constant 0.4999 is used. Wierd! */
+ for(idx=precision, rounder=0.4999; idx>0; idx--, rounder*=0.1);
+#else
+ /* It makes more sense to use 0.5 */
+ for(idx=precision, rounder=0.5; idx>0; idx--, rounder*=0.1);
+#endif
+ if( infop->type==etFLOAT ) realvalue += rounder;
+ /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */
+ exp = 0;
+ if( realvalue>0.0 ){
+ while( realvalue>=1e8 && exp<=350 ){ realvalue *= 1e-8; exp+=8; }
+ while( realvalue>=10.0 && exp<=350 ){ realvalue *= 0.1; exp++; }
+ while( realvalue<1e-8 && exp>=-350 ){ realvalue *= 1e8; exp-=8; }
+ while( realvalue<1.0 && exp>=-350 ){ realvalue *= 10.0; exp--; }
+ if( exp>350 || exp<-350 ){
+ bufpt = "NaN";
+ length = 3;
+ break;
+ }
+ }
+ bufpt = buf;
+ /*
+ ** If the field type is etGENERIC, then convert to either etEXP
+ ** or etFLOAT, as appropriate.
+ */
+ flag_exp = xtype==etEXP;
+ if( xtype!=etFLOAT ){
+ realvalue += rounder;
+ if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; }
+ }
+ if( xtype==etGENERIC ){
+ flag_rtz = !flag_alternateform;
+ if( exp<-4 || exp>precision ){
+ xtype = etEXP;
+ }else{
+ precision = precision - exp;
+ xtype = etFLOAT;
+ }
+ }else{
+ flag_rtz = 0;
+ }
+ /*
+ ** The "exp+precision" test causes output to be of type etEXP if
+ ** the precision is too large to fit in buf[].
+ */
+ nsd = 0;
+ if( xtype==etFLOAT && exp+precision<etBUFSIZE-30 ){
+ flag_dp = (precision>0 || flag_alternateform);
+ if( prefix ) *(bufpt++) = prefix; /* Sign */
+ if( exp<0 ) *(bufpt++) = '0'; /* Digits before "." */
+ else for(; exp>=0; exp--) *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ if( flag_dp ) *(bufpt++) = '.'; /* The decimal point */
+ for(exp++; exp<0 && precision>0; precision--, exp++){
+ *(bufpt++) = '0';
+ }
+ while( (precision--)>0 ) *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ *(bufpt--) = 0; /* Null terminate */
+ if( flag_rtz && flag_dp ){ /* Remove trailing zeros and "." */
+ while( bufpt>=buf && *bufpt=='0' ) *(bufpt--) = 0;
+ if( bufpt>=buf && *bufpt=='.' ) *(bufpt--) = 0;
+ }
+ bufpt++; /* point to next free slot */
+ }else{ /* etEXP or etGENERIC */
+ flag_dp = (precision>0 || flag_alternateform);
+ if( prefix ) *(bufpt++) = prefix; /* Sign */
+ *(bufpt++) = et_getdigit(&realvalue,&nsd); /* First digit */
+ if( flag_dp ) *(bufpt++) = '.'; /* Decimal point */
+ while( (precision--)>0 ) *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ bufpt--; /* point to last digit */
+ if( flag_rtz && flag_dp ){ /* Remove tail zeros */
+ while( bufpt>=buf && *bufpt=='0' ) *(bufpt--) = 0;
+ if( bufpt>=buf && *bufpt=='.' ) *(bufpt--) = 0;
+ }
+ bufpt++; /* point to next free slot */
+ if( exp || flag_exp ){
+ *(bufpt++) = infop->charset[0];
+ if( exp<0 ){ *(bufpt++) = '-'; exp = -exp; } /* sign of exp */
+ else { *(bufpt++) = '+'; }
+ if( exp>=100 ){
+ *(bufpt++) = (exp/100)+'0'; /* 100's digit */
+ exp %= 100;
+ }
+ *(bufpt++) = exp/10+'0'; /* 10's digit */
+ *(bufpt++) = exp%10+'0'; /* 1's digit */
+ }
+ }
+ /* The converted number is in buf[] and zero terminated. Output it.
+ ** Note that the number is in the usual order, not reversed as with
+ ** integer conversions. */
+ length = bufpt-buf;
+ bufpt = buf;
+
+ /* Special case: Add leading zeros if the flag_zeropad flag is
+ ** set and we are not left justified */
+ if( flag_zeropad && !flag_leftjustify && length < width){
+ int i;
+ int nPad = width - length;
+ for(i=width; i>=nPad; i--){
+ bufpt[i] = bufpt[i-nPad];
+ }
+ i = prefix!=0;
+ while( nPad-- ) bufpt[i++] = '0';
+ length = width;
+ }
+#endif
+ break;
+ case etSIZE:
+ *(va_arg(ap,int*)) = count;
+ length = width = 0;
+ break;
+ case etPERCENT:
+ buf[0] = '%';
+ bufpt = buf;
+ length = 1;
+ break;
+ case etCHARLIT:
+ case etCHARX:
+ c = buf[0] = (xtype==etCHARX ? va_arg(ap,int) : *++fmt);
+ if( precision>=0 ){
+ for(idx=1; idx<precision; idx++) buf[idx] = c;
+ length = precision;
+ }else{
+ length =1;
+ }
+ bufpt = buf;
+ break;
+ case etSTRING:
+ case etDYNSTRING:
+ bufpt = va_arg(ap,char*);
+ if( bufpt==0 ){
+ bufpt = "";
+ }else if( xtype==etDYNSTRING ){
+ zExtra = bufpt;
+ }
+ length = strlen(bufpt);
+ if( precision>=0 && precision<length ) length = precision;
+ break;
+ case etSQLESCAPE:
+ case etSQLESCAPE2:
+ {
+ int i, j, n, c, isnull;
+ char *arg = va_arg(ap,char*);
+ isnull = arg==0;
+ if( isnull ) arg = (xtype==etSQLESCAPE2 ? "NULL" : "(NULL)");
+ for(i=n=0; (c=arg[i])!=0; i++){
+ if( c=='\'' ) n++;
+ }
+ n += i + 1 + ((!isnull && xtype==etSQLESCAPE2) ? 2 : 0);
+ if( n>etBUFSIZE ){
+ bufpt = zExtra = sqliteMalloc( n );
+ if( bufpt==0 ) return -1;
+ }else{
+ bufpt = buf;
+ }
+ j = 0;
+ if( !isnull && xtype==etSQLESCAPE2 ) bufpt[j++] = '\'';
+ for(i=0; (c=arg[i])!=0; i++){
+ bufpt[j++] = c;
+ if( c=='\'' ) bufpt[j++] = c;
+ }
+ if( !isnull && xtype==etSQLESCAPE2 ) bufpt[j++] = '\'';
+ bufpt[j] = 0;
+ length = j;
+ if( precision>=0 && precision<length ) length = precision;
+ }
+ break;
+ case etTOKEN: {
+ Token *pToken = va_arg(ap, Token*);
+ (*func)(arg, pToken->z, pToken->n);
+ length = width = 0;
+ break;
+ }
+ case etSRCLIST: {
+ SrcList *pSrc = va_arg(ap, SrcList*);
+ int k = va_arg(ap, int);
+ struct SrcList_item *pItem = &pSrc->a[k];
+ assert( k>=0 && k<pSrc->nSrc );
+ if( pItem->zDatabase && pItem->zDatabase[0] ){
+ (*func)(arg, pItem->zDatabase, strlen(pItem->zDatabase));
+ (*func)(arg, ".", 1);
+ }
+ (*func)(arg, pItem->zName, strlen(pItem->zName));
+ length = width = 0;
+ break;
+ }
+ case etERROR:
+ buf[0] = '%';
+ buf[1] = c;
+ errorflag = 0;
+ idx = 1+(c!=0);
+ (*func)(arg,"%",idx);
+ count += idx;
+ if( c==0 ) fmt--;
+ break;
+ }/* End switch over the format type */
+ /*
+ ** The text of the conversion is pointed to by "bufpt" and is
+ ** "length" characters long. The field width is "width". Do
+ ** the output.
+ */
+ if( !flag_leftjustify ){
+ register int nspace;
+ nspace = width-length;
+ if( nspace>0 ){
+ count += nspace;
+ while( nspace>=etSPACESIZE ){
+ (*func)(arg,spaces,etSPACESIZE);
+ nspace -= etSPACESIZE;
+ }
+ if( nspace>0 ) (*func)(arg,spaces,nspace);
+ }
+ }
+ if( length>0 ){
+ (*func)(arg,bufpt,length);
+ count += length;
+ }
+ if( flag_leftjustify ){
+ register int nspace;
+ nspace = width-length;
+ if( nspace>0 ){
+ count += nspace;
+ while( nspace>=etSPACESIZE ){
+ (*func)(arg,spaces,etSPACESIZE);
+ nspace -= etSPACESIZE;
+ }
+ if( nspace>0 ) (*func)(arg,spaces,nspace);
+ }
+ }
+ if( zExtra ){
+ sqliteFree(zExtra);
+ }
+ }/* End for loop over the format string */
+ return errorflag ? -1 : count;
+} /* End of function */
+
+
+/* This structure is used to store state information about the
+** write to memory that is currently in progress.
+*/
+struct sgMprintf {
+ char *zBase; /* A base allocation */
+ char *zText; /* The string collected so far */
+ int nChar; /* Length of the string so far */
+ int nTotal; /* Output size if unconstrained */
+ int nAlloc; /* Amount of space allocated in zText */
+ void *(*xRealloc)(void*,int); /* Function used to realloc memory */
+};
+
+/*
+** This function implements the callback from vxprintf.
+**
+** This routine add nNewChar characters of text in zNewText to
+** the sgMprintf structure pointed to by "arg".
+*/
+static void mout(void *arg, const char *zNewText, int nNewChar){
+ struct sgMprintf *pM = (struct sgMprintf*)arg;
+ pM->nTotal += nNewChar;
+ if( pM->nChar + nNewChar + 1 > pM->nAlloc ){
+ if( pM->xRealloc==0 ){
+ nNewChar = pM->nAlloc - pM->nChar - 1;
+ }else{
+ pM->nAlloc = pM->nChar + nNewChar*2 + 1;
+ if( pM->zText==pM->zBase ){
+ pM->zText = pM->xRealloc(0, pM->nAlloc);
+ if( pM->zText && pM->nChar ){
+ memcpy(pM->zText, pM->zBase, pM->nChar);
+ }
+ }else{
+ pM->zText = pM->xRealloc(pM->zText, pM->nAlloc);
+ }
+ }
+ }
+ if( pM->zText ){
+ if( nNewChar>0 ){
+ memcpy(&pM->zText[pM->nChar], zNewText, nNewChar);
+ pM->nChar += nNewChar;
+ }
+ pM->zText[pM->nChar] = 0;
+ }
+}
+
+/*
+** This routine is a wrapper around xprintf() that invokes mout() as
+** the consumer.
+*/
+static char *base_vprintf(
+ void *(*xRealloc)(void*,int), /* Routine to realloc memory. May be NULL */
+ int useInternal, /* Use internal %-conversions if true */
+ char *zInitBuf, /* Initially write here, before mallocing */
+ int nInitBuf, /* Size of zInitBuf[] */
+ const char *zFormat, /* format string */
+ va_list ap /* arguments */
+){
+ struct sgMprintf sM;
+ sM.zBase = sM.zText = zInitBuf;
+ sM.nChar = sM.nTotal = 0;
+ sM.nAlloc = nInitBuf;
+ sM.xRealloc = xRealloc;
+ vxprintf(mout, &sM, useInternal, zFormat, ap);
+ if( xRealloc ){
+ if( sM.zText==sM.zBase ){
+ sM.zText = xRealloc(0, sM.nChar+1);
+ memcpy(sM.zText, sM.zBase, sM.nChar+1);
+ }else if( sM.nAlloc>sM.nChar+10 ){
+ sM.zText = xRealloc(sM.zText, sM.nChar+1);
+ }
+ }
+ return sM.zText;
+}
+
+/*
+** Realloc that is a real function, not a macro.
+*/
+static void *printf_realloc(void *old, int size){
+ return sqliteRealloc(old,size);
+}
+
+/*
+** Print into memory obtained from sqliteMalloc(). Use the internal
+** %-conversion extensions.
+*/
+char *sqliteVMPrintf(const char *zFormat, va_list ap){
+ char zBase[1000];
+ return base_vprintf(printf_realloc, 1, zBase, sizeof(zBase), zFormat, ap);
+}
+
+/*
+** Print into memory obtained from sqliteMalloc(). Use the internal
+** %-conversion extensions.
+*/
+char *sqliteMPrintf(const char *zFormat, ...){
+ va_list ap;
+ char *z;
+ char zBase[1000];
+ va_start(ap, zFormat);
+ z = base_vprintf(printf_realloc, 1, zBase, sizeof(zBase), zFormat, ap);
+ va_end(ap);
+ return z;
+}
+
+/*
+** Print into memory obtained from malloc(). Do not use the internal
+** %-conversion extensions. This routine is for use by external users.
+*/
+char *sqlite_mprintf(const char *zFormat, ...){
+ va_list ap;
+ char *z;
+ char zBuf[200];
+
+ va_start(ap,zFormat);
+ z = base_vprintf((void*(*)(void*,int))realloc, 0,
+ zBuf, sizeof(zBuf), zFormat, ap);
+ va_end(ap);
+ return z;
+}
+
+/* This is the varargs version of sqlite_mprintf.
+*/
+char *sqlite_vmprintf(const char *zFormat, va_list ap){
+ char zBuf[200];
+ return base_vprintf((void*(*)(void*,int))realloc, 0,
+ zBuf, sizeof(zBuf), zFormat, ap);
+}
+
+/*
+** sqlite_snprintf() works like snprintf() except that it ignores the
+** current locale settings. This is important for SQLite because we
+** are not able to use a "," as the decimal point in place of "." as
+** specified by some locales.
+*/
+char *sqlite_snprintf(int n, char *zBuf, const char *zFormat, ...){
+ char *z;
+ va_list ap;
+
+ va_start(ap,zFormat);
+ z = base_vprintf(0, 0, zBuf, n, zFormat, ap);
+ va_end(ap);
+ return z;
+}
+
+/*
+** The following four routines implement the varargs versions of the
+** sqlite_exec() and sqlite_get_table() interfaces. See the sqlite.h
+** header files for a more detailed description of how these interfaces
+** work.
+**
+** These routines are all just simple wrappers.
+*/
+int sqlite_exec_printf(
+ sqlite *db, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ sqlite_callback xCallback, /* Callback function */
+ void *pArg, /* 1st argument to callback function */
+ char **errmsg, /* Error msg written here */
+ ... /* Arguments to the format string. */
+){
+ va_list ap;
+ int rc;
+
+ va_start(ap, errmsg);
+ rc = sqlite_exec_vprintf(db, sqlFormat, xCallback, pArg, errmsg, ap);
+ va_end(ap);
+ return rc;
+}
+int sqlite_exec_vprintf(
+ sqlite *db, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ sqlite_callback xCallback, /* Callback function */
+ void *pArg, /* 1st argument to callback function */
+ char **errmsg, /* Error msg written here */
+ va_list ap /* Arguments to the format string. */
+){
+ char *zSql;
+ int rc;
+
+ zSql = sqlite_vmprintf(sqlFormat, ap);
+ rc = sqlite_exec(db, zSql, xCallback, pArg, errmsg);
+ free(zSql);
+ return rc;
+}
+int sqlite_get_table_printf(
+ sqlite *db, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ char ***resultp, /* Result written to a char *[] that this points to */
+ int *nrow, /* Number of result rows written here */
+ int *ncol, /* Number of result columns written here */
+ char **errmsg, /* Error msg written here */
+ ... /* Arguments to the format string */
+){
+ va_list ap;
+ int rc;
+
+ va_start(ap, errmsg);
+ rc = sqlite_get_table_vprintf(db, sqlFormat, resultp, nrow, ncol, errmsg, ap);
+ va_end(ap);
+ return rc;
+}
+int sqlite_get_table_vprintf(
+ sqlite *db, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ char ***resultp, /* Result written to a char *[] that this points to */
+ int *nrow, /* Number of result rows written here */
+ int *ncolumn, /* Number of result columns written here */
+ char **errmsg, /* Error msg written here */
+ va_list ap /* Arguments to the format string */
+){
+ char *zSql;
+ int rc;
+
+ zSql = sqlite_vmprintf(sqlFormat, ap);
+ rc = sqlite_get_table(db, zSql, resultp, nrow, ncolumn, errmsg);
+ free(zSql);
+ return rc;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/random.c b/usr/src/cmd/svc/configd/sqlite/src/random.c
new file mode 100644
index 0000000000..e0fac930a6
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/random.c
@@ -0,0 +1,100 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code to implement a pseudo-random number
+** generator (PRNG) for SQLite.
+**
+** Random numbers are used by some of the database backends in order
+** to generate random integer keys for tables or random filenames.
+**
+** $Id: random.c,v 1.11 2004/02/11 09:46:33 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "os.h"
+
+
+/*
+** Get a single 8-bit random value from the RC4 PRNG. The Mutex
+** must be held while executing this routine.
+**
+** Why not just use a library random generator like lrand48() for this?
+** Because the OP_NewRecno opcode in the VDBE depends on having a very
+** good source of random numbers. The lrand48() library function may
+** well be good enough. But maybe not. Or maybe lrand48() has some
+** subtle problems on some systems that could cause problems. It is hard
+** to know. To minimize the risk of problems due to bad lrand48()
+** implementations, SQLite uses this random number generator based
+** on RC4, which we know works very well.
+*/
+static int randomByte(){
+ unsigned char t;
+
+ /* All threads share a single random number generator.
+ ** This structure is the current state of the generator.
+ */
+ static struct {
+ unsigned char isInit; /* True if initialized */
+ unsigned char i, j; /* State variables */
+ unsigned char s[256]; /* State variables */
+ } prng;
+
+ /* Initialize the state of the random number generator once,
+ ** the first time this routine is called. The seed value does
+ ** not need to contain a lot of randomness since we are not
+ ** trying to do secure encryption or anything like that...
+ **
+ ** Nothing in this file or anywhere else in SQLite does any kind of
+ ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random
+ ** number generator) not as an encryption device.
+ */
+ if( !prng.isInit ){
+ int i;
+ char k[256];
+ prng.j = 0;
+ prng.i = 0;
+ sqliteOsRandomSeed(k);
+ for(i=0; i<256; i++){
+ prng.s[i] = i;
+ }
+ for(i=0; i<256; i++){
+ prng.j += prng.s[i] + k[i];
+ t = prng.s[prng.j];
+ prng.s[prng.j] = prng.s[i];
+ prng.s[i] = t;
+ }
+ prng.isInit = 1;
+ }
+
+ /* Generate and return single random byte
+ */
+ prng.i++;
+ t = prng.s[prng.i];
+ prng.j += t;
+ prng.s[prng.i] = prng.s[prng.j];
+ prng.s[prng.j] = t;
+ t += prng.s[prng.i];
+ return prng.s[t];
+}
+
+/*
+** Return N random bytes.
+*/
+void sqliteRandomness(int N, void *pBuf){
+ unsigned char *zBuf = pBuf;
+ sqliteOsEnterMutex();
+ while( N-- ){
+ *(zBuf++) = randomByte();
+ }
+ sqliteOsLeaveMutex();
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/select.c b/usr/src/cmd/svc/configd/sqlite/src/select.c
new file mode 100644
index 0000000000..92acc32c98
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/select.c
@@ -0,0 +1,2437 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle SELECT statements in SQLite.
+**
+** $Id: select.c,v 1.161.2.4 2004/07/20 01:45:49 drh Exp $
+*/
+#include "sqliteInt.h"
+
+
+/*
+** Allocate a new Select structure and return a pointer to that
+** structure.
+*/
+Select *sqliteSelectNew(
+ ExprList *pEList, /* which columns to include in the result */
+ SrcList *pSrc, /* the FROM clause -- which tables to scan */
+ Expr *pWhere, /* the WHERE clause */
+ ExprList *pGroupBy, /* the GROUP BY clause */
+ Expr *pHaving, /* the HAVING clause */
+ ExprList *pOrderBy, /* the ORDER BY clause */
+ int isDistinct, /* true if the DISTINCT keyword is present */
+ int nLimit, /* LIMIT value. -1 means not used */
+ int nOffset /* OFFSET value. 0 means no offset */
+){
+ Select *pNew;
+ pNew = sqliteMalloc( sizeof(*pNew) );
+ if( pNew==0 ){
+ sqliteExprListDelete(pEList);
+ sqliteSrcListDelete(pSrc);
+ sqliteExprDelete(pWhere);
+ sqliteExprListDelete(pGroupBy);
+ sqliteExprDelete(pHaving);
+ sqliteExprListDelete(pOrderBy);
+ }else{
+ if( pEList==0 ){
+ pEList = sqliteExprListAppend(0, sqliteExpr(TK_ALL,0,0,0), 0);
+ }
+ pNew->pEList = pEList;
+ pNew->pSrc = pSrc;
+ pNew->pWhere = pWhere;
+ pNew->pGroupBy = pGroupBy;
+ pNew->pHaving = pHaving;
+ pNew->pOrderBy = pOrderBy;
+ pNew->isDistinct = isDistinct;
+ pNew->op = TK_SELECT;
+ pNew->nLimit = nLimit;
+ pNew->nOffset = nOffset;
+ pNew->iLimit = -1;
+ pNew->iOffset = -1;
+ }
+ return pNew;
+}
+
+/*
+** Given 1 to 3 identifiers preceeding the JOIN keyword, determine the
+** type of join. Return an integer constant that expresses that type
+** in terms of the following bit values:
+**
+** JT_INNER
+** JT_OUTER
+** JT_NATURAL
+** JT_LEFT
+** JT_RIGHT
+**
+** A full outer join is the combination of JT_LEFT and JT_RIGHT.
+**
+** If an illegal or unsupported join type is seen, then still return
+** a join type, but put an error in the pParse structure.
+*/
+int sqliteJoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){
+ int jointype = 0;
+ Token *apAll[3];
+ Token *p;
+ static struct {
+ const char *zKeyword;
+ int nChar;
+ int code;
+ } keywords[] = {
+ { "natural", 7, JT_NATURAL },
+ { "left", 4, JT_LEFT|JT_OUTER },
+ { "right", 5, JT_RIGHT|JT_OUTER },
+ { "full", 4, JT_LEFT|JT_RIGHT|JT_OUTER },
+ { "outer", 5, JT_OUTER },
+ { "inner", 5, JT_INNER },
+ { "cross", 5, JT_INNER },
+ };
+ int i, j;
+ apAll[0] = pA;
+ apAll[1] = pB;
+ apAll[2] = pC;
+ for(i=0; i<3 && apAll[i]; i++){
+ p = apAll[i];
+ for(j=0; j<sizeof(keywords)/sizeof(keywords[0]); j++){
+ if( p->n==keywords[j].nChar
+ && sqliteStrNICmp(p->z, keywords[j].zKeyword, p->n)==0 ){
+ jointype |= keywords[j].code;
+ break;
+ }
+ }
+ if( j>=sizeof(keywords)/sizeof(keywords[0]) ){
+ jointype |= JT_ERROR;
+ break;
+ }
+ }
+ if(
+ (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) ||
+ (jointype & JT_ERROR)!=0
+ ){
+ static Token dummy = { 0, 0 };
+ char *zSp1 = " ", *zSp2 = " ";
+ if( pB==0 ){ pB = &dummy; zSp1 = 0; }
+ if( pC==0 ){ pC = &dummy; zSp2 = 0; }
+ sqliteSetNString(&pParse->zErrMsg, "unknown or unsupported join type: ", 0,
+ pA->z, pA->n, zSp1, 1, pB->z, pB->n, zSp2, 1, pC->z, pC->n, 0);
+ pParse->nErr++;
+ jointype = JT_INNER;
+ }else if( jointype & JT_RIGHT ){
+ sqliteErrorMsg(pParse,
+ "RIGHT and FULL OUTER JOINs are not currently supported");
+ jointype = JT_INNER;
+ }
+ return jointype;
+}
+
+/*
+** Return the index of a column in a table. Return -1 if the column
+** is not contained in the table.
+*/
+static int columnIndex(Table *pTab, const char *zCol){
+ int i;
+ for(i=0; i<pTab->nCol; i++){
+ if( sqliteStrICmp(pTab->aCol[i].zName, zCol)==0 ) return i;
+ }
+ return -1;
+}
+
+/*
+** Add a term to the WHERE expression in *ppExpr that requires the
+** zCol column to be equal in the two tables pTab1 and pTab2.
+*/
+static void addWhereTerm(
+ const char *zCol, /* Name of the column */
+ const Table *pTab1, /* First table */
+ const Table *pTab2, /* Second table */
+ Expr **ppExpr /* Add the equality term to this expression */
+){
+ Token dummy;
+ Expr *pE1a, *pE1b, *pE1c;
+ Expr *pE2a, *pE2b, *pE2c;
+ Expr *pE;
+
+ dummy.z = zCol;
+ dummy.n = strlen(zCol);
+ dummy.dyn = 0;
+ pE1a = sqliteExpr(TK_ID, 0, 0, &dummy);
+ pE2a = sqliteExpr(TK_ID, 0, 0, &dummy);
+ dummy.z = pTab1->zName;
+ dummy.n = strlen(dummy.z);
+ pE1b = sqliteExpr(TK_ID, 0, 0, &dummy);
+ dummy.z = pTab2->zName;
+ dummy.n = strlen(dummy.z);
+ pE2b = sqliteExpr(TK_ID, 0, 0, &dummy);
+ pE1c = sqliteExpr(TK_DOT, pE1b, pE1a, 0);
+ pE2c = sqliteExpr(TK_DOT, pE2b, pE2a, 0);
+ pE = sqliteExpr(TK_EQ, pE1c, pE2c, 0);
+ ExprSetProperty(pE, EP_FromJoin);
+ if( *ppExpr ){
+ *ppExpr = sqliteExpr(TK_AND, *ppExpr, pE, 0);
+ }else{
+ *ppExpr = pE;
+ }
+}
+
+/*
+** Set the EP_FromJoin property on all terms of the given expression.
+**
+** The EP_FromJoin property is used on terms of an expression to tell
+** the LEFT OUTER JOIN processing logic that this term is part of the
+** join restriction specified in the ON or USING clause and not a part
+** of the more general WHERE clause. These terms are moved over to the
+** WHERE clause during join processing but we need to remember that they
+** originated in the ON or USING clause.
+*/
+static void setJoinExpr(Expr *p){
+ while( p ){
+ ExprSetProperty(p, EP_FromJoin);
+ setJoinExpr(p->pLeft);
+ p = p->pRight;
+ }
+}
+
+/*
+** This routine processes the join information for a SELECT statement.
+** ON and USING clauses are converted into extra terms of the WHERE clause.
+** NATURAL joins also create extra WHERE clause terms.
+**
+** This routine returns the number of errors encountered.
+*/
+static int sqliteProcessJoin(Parse *pParse, Select *p){
+ SrcList *pSrc;
+ int i, j;
+ pSrc = p->pSrc;
+ for(i=0; i<pSrc->nSrc-1; i++){
+ struct SrcList_item *pTerm = &pSrc->a[i];
+ struct SrcList_item *pOther = &pSrc->a[i+1];
+
+ if( pTerm->pTab==0 || pOther->pTab==0 ) continue;
+
+ /* When the NATURAL keyword is present, add WHERE clause terms for
+ ** every column that the two tables have in common.
+ */
+ if( pTerm->jointype & JT_NATURAL ){
+ Table *pTab;
+ if( pTerm->pOn || pTerm->pUsing ){
+ sqliteErrorMsg(pParse, "a NATURAL join may not have "
+ "an ON or USING clause", 0);
+ return 1;
+ }
+ pTab = pTerm->pTab;
+ for(j=0; j<pTab->nCol; j++){
+ if( columnIndex(pOther->pTab, pTab->aCol[j].zName)>=0 ){
+ addWhereTerm(pTab->aCol[j].zName, pTab, pOther->pTab, &p->pWhere);
+ }
+ }
+ }
+
+ /* Disallow both ON and USING clauses in the same join
+ */
+ if( pTerm->pOn && pTerm->pUsing ){
+ sqliteErrorMsg(pParse, "cannot have both ON and USING "
+ "clauses in the same join");
+ return 1;
+ }
+
+ /* Add the ON clause to the end of the WHERE clause, connected by
+ ** and AND operator.
+ */
+ if( pTerm->pOn ){
+ setJoinExpr(pTerm->pOn);
+ if( p->pWhere==0 ){
+ p->pWhere = pTerm->pOn;
+ }else{
+ p->pWhere = sqliteExpr(TK_AND, p->pWhere, pTerm->pOn, 0);
+ }
+ pTerm->pOn = 0;
+ }
+
+ /* Create extra terms on the WHERE clause for each column named
+ ** in the USING clause. Example: If the two tables to be joined are
+ ** A and B and the USING clause names X, Y, and Z, then add this
+ ** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z
+ ** Report an error if any column mentioned in the USING clause is
+ ** not contained in both tables to be joined.
+ */
+ if( pTerm->pUsing ){
+ IdList *pList;
+ int j;
+ assert( i<pSrc->nSrc-1 );
+ pList = pTerm->pUsing;
+ for(j=0; j<pList->nId; j++){
+ if( columnIndex(pTerm->pTab, pList->a[j].zName)<0 ||
+ columnIndex(pOther->pTab, pList->a[j].zName)<0 ){
+ sqliteErrorMsg(pParse, "cannot join using column %s - column "
+ "not present in both tables", pList->a[j].zName);
+ return 1;
+ }
+ addWhereTerm(pList->a[j].zName, pTerm->pTab, pOther->pTab, &p->pWhere);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Delete the given Select structure and all of its substructures.
+*/
+void sqliteSelectDelete(Select *p){
+ if( p==0 ) return;
+ sqliteExprListDelete(p->pEList);
+ sqliteSrcListDelete(p->pSrc);
+ sqliteExprDelete(p->pWhere);
+ sqliteExprListDelete(p->pGroupBy);
+ sqliteExprDelete(p->pHaving);
+ sqliteExprListDelete(p->pOrderBy);
+ sqliteSelectDelete(p->pPrior);
+ sqliteFree(p->zSelect);
+ sqliteFree(p);
+}
+
+/*
+** Delete the aggregate information from the parse structure.
+*/
+static void sqliteAggregateInfoReset(Parse *pParse){
+ sqliteFree(pParse->aAgg);
+ pParse->aAgg = 0;
+ pParse->nAgg = 0;
+ pParse->useAgg = 0;
+}
+
+/*
+** Insert code into "v" that will push the record on the top of the
+** stack into the sorter.
+*/
+static void pushOntoSorter(Parse *pParse, Vdbe *v, ExprList *pOrderBy){
+ char *zSortOrder;
+ int i;
+ zSortOrder = sqliteMalloc( pOrderBy->nExpr + 1 );
+ if( zSortOrder==0 ) return;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ int order = pOrderBy->a[i].sortOrder;
+ int type;
+ int c;
+ if( (order & SQLITE_SO_TYPEMASK)==SQLITE_SO_TEXT ){
+ type = SQLITE_SO_TEXT;
+ }else if( (order & SQLITE_SO_TYPEMASK)==SQLITE_SO_NUM ){
+ type = SQLITE_SO_NUM;
+ }else if( pParse->db->file_format>=4 ){
+ type = sqliteExprType(pOrderBy->a[i].pExpr);
+ }else{
+ type = SQLITE_SO_NUM;
+ }
+ if( (order & SQLITE_SO_DIRMASK)==SQLITE_SO_ASC ){
+ c = type==SQLITE_SO_TEXT ? 'A' : '+';
+ }else{
+ c = type==SQLITE_SO_TEXT ? 'D' : '-';
+ }
+ zSortOrder[i] = c;
+ sqliteExprCode(pParse, pOrderBy->a[i].pExpr);
+ }
+ zSortOrder[pOrderBy->nExpr] = 0;
+ sqliteVdbeOp3(v, OP_SortMakeKey, pOrderBy->nExpr, 0, zSortOrder, P3_DYNAMIC);
+ sqliteVdbeAddOp(v, OP_SortPut, 0, 0);
+}
+
+/*
+** This routine adds a P3 argument to the last VDBE opcode that was
+** inserted. The P3 argument added is a string suitable for the
+** OP_MakeKey or OP_MakeIdxKey opcodes. The string consists of
+** characters 't' or 'n' depending on whether or not the various
+** fields of the key to be generated should be treated as numeric
+** or as text. See the OP_MakeKey and OP_MakeIdxKey opcode
+** documentation for additional information about the P3 string.
+** See also the sqliteAddIdxKeyType() routine.
+*/
+void sqliteAddKeyType(Vdbe *v, ExprList *pEList){
+ int nColumn = pEList->nExpr;
+ char *zType = sqliteMalloc( nColumn+1 );
+ int i;
+ if( zType==0 ) return;
+ for(i=0; i<nColumn; i++){
+ zType[i] = sqliteExprType(pEList->a[i].pExpr)==SQLITE_SO_NUM ? 'n' : 't';
+ }
+ zType[i] = 0;
+ sqliteVdbeChangeP3(v, -1, zType, P3_DYNAMIC);
+}
+
+/*
+** Add code to implement the OFFSET and LIMIT
+*/
+static void codeLimiter(
+ Vdbe *v, /* Generate code into this VM */
+ Select *p, /* The SELECT statement being coded */
+ int iContinue, /* Jump here to skip the current record */
+ int iBreak, /* Jump here to end the loop */
+ int nPop /* Number of times to pop stack when jumping */
+){
+ if( p->iOffset>=0 ){
+ int addr = sqliteVdbeCurrentAddr(v) + 2;
+ if( nPop>0 ) addr++;
+ sqliteVdbeAddOp(v, OP_MemIncr, p->iOffset, addr);
+ if( nPop>0 ){
+ sqliteVdbeAddOp(v, OP_Pop, nPop, 0);
+ }
+ sqliteVdbeAddOp(v, OP_Goto, 0, iContinue);
+ }
+ if( p->iLimit>=0 ){
+ sqliteVdbeAddOp(v, OP_MemIncr, p->iLimit, iBreak);
+ }
+}
+
+/*
+** This routine generates the code for the inside of the inner loop
+** of a SELECT.
+**
+** If srcTab and nColumn are both zero, then the pEList expressions
+** are evaluated in order to get the data for this row. If nColumn>0
+** then data is pulled from srcTab and pEList is used only to get the
+** datatypes for each column.
+*/
+static int selectInnerLoop(
+ Parse *pParse, /* The parser context */
+ Select *p, /* The complete select statement being coded */
+ ExprList *pEList, /* List of values being extracted */
+ int srcTab, /* Pull data from this table */
+ int nColumn, /* Number of columns in the source table */
+ ExprList *pOrderBy, /* If not NULL, sort results using this key */
+ int distinct, /* If >=0, make sure results are distinct */
+ int eDest, /* How to dispose of the results */
+ int iParm, /* An argument to the disposal method */
+ int iContinue, /* Jump here to continue with next row */
+ int iBreak /* Jump here to break out of the inner loop */
+){
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ int hasDistinct; /* True if the DISTINCT keyword is present */
+
+ if( v==0 ) return 0;
+ assert( pEList!=0 );
+
+ /* If there was a LIMIT clause on the SELECT statement, then do the check
+ ** to see if this row should be output.
+ */
+ hasDistinct = distinct>=0 && pEList && pEList->nExpr>0;
+ if( pOrderBy==0 && !hasDistinct ){
+ codeLimiter(v, p, iContinue, iBreak, 0);
+ }
+
+ /* Pull the requested columns.
+ */
+ if( nColumn>0 ){
+ for(i=0; i<nColumn; i++){
+ sqliteVdbeAddOp(v, OP_Column, srcTab, i);
+ }
+ }else{
+ nColumn = pEList->nExpr;
+ for(i=0; i<pEList->nExpr; i++){
+ sqliteExprCode(pParse, pEList->a[i].pExpr);
+ }
+ }
+
+ /* If the DISTINCT keyword was present on the SELECT statement
+ ** and this row has been seen before, then do not make this row
+ ** part of the result.
+ */
+ if( hasDistinct ){
+#if NULL_ALWAYS_DISTINCT
+ sqliteVdbeAddOp(v, OP_IsNull, -pEList->nExpr, sqliteVdbeCurrentAddr(v)+7);
+#endif
+ sqliteVdbeAddOp(v, OP_MakeKey, pEList->nExpr, 1);
+ if( pParse->db->file_format>=4 ) sqliteAddKeyType(v, pEList);
+ sqliteVdbeAddOp(v, OP_Distinct, distinct, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, pEList->nExpr+1, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, iContinue);
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_PutStrKey, distinct, 0);
+ if( pOrderBy==0 ){
+ codeLimiter(v, p, iContinue, iBreak, nColumn);
+ }
+ }
+
+ switch( eDest ){
+ /* In this mode, write each query result to the key of the temporary
+ ** table iParm.
+ */
+ case SRT_Union: {
+ sqliteVdbeAddOp(v, OP_MakeRecord, nColumn, NULL_ALWAYS_DISTINCT);
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_PutStrKey, iParm, 0);
+ break;
+ }
+
+ /* Store the result as data using a unique key.
+ */
+ case SRT_Table:
+ case SRT_TempTable: {
+ sqliteVdbeAddOp(v, OP_MakeRecord, nColumn, 0);
+ if( pOrderBy ){
+ pushOntoSorter(pParse, v, pOrderBy);
+ }else{
+ sqliteVdbeAddOp(v, OP_NewRecno, iParm, 0);
+ sqliteVdbeAddOp(v, OP_Pull, 1, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, iParm, 0);
+ }
+ break;
+ }
+
+ /* Construct a record from the query result, but instead of
+ ** saving that record, use it as a key to delete elements from
+ ** the temporary table iParm.
+ */
+ case SRT_Except: {
+ int addr;
+ addr = sqliteVdbeAddOp(v, OP_MakeRecord, nColumn, NULL_ALWAYS_DISTINCT);
+ sqliteVdbeAddOp(v, OP_NotFound, iParm, addr+3);
+ sqliteVdbeAddOp(v, OP_Delete, iParm, 0);
+ break;
+ }
+
+ /* If we are creating a set for an "expr IN (SELECT ...)" construct,
+ ** then there should be a single item on the stack. Write this
+ ** item into the set table with bogus data.
+ */
+ case SRT_Set: {
+ int addr1 = sqliteVdbeCurrentAddr(v);
+ int addr2;
+ assert( nColumn==1 );
+ sqliteVdbeAddOp(v, OP_NotNull, -1, addr1+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ addr2 = sqliteVdbeAddOp(v, OP_Goto, 0, 0);
+ if( pOrderBy ){
+ pushOntoSorter(pParse, v, pOrderBy);
+ }else{
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_PutStrKey, iParm, 0);
+ }
+ sqliteVdbeChangeP2(v, addr2, sqliteVdbeCurrentAddr(v));
+ break;
+ }
+
+ /* If this is a scalar select that is part of an expression, then
+ ** store the results in the appropriate memory cell and break out
+ ** of the scan loop.
+ */
+ case SRT_Mem: {
+ assert( nColumn==1 );
+ if( pOrderBy ){
+ pushOntoSorter(pParse, v, pOrderBy);
+ }else{
+ sqliteVdbeAddOp(v, OP_MemStore, iParm, 1);
+ sqliteVdbeAddOp(v, OP_Goto, 0, iBreak);
+ }
+ break;
+ }
+
+ /* Send the data to the callback function.
+ */
+ case SRT_Callback:
+ case SRT_Sorter: {
+ if( pOrderBy ){
+ sqliteVdbeAddOp(v, OP_SortMakeRec, nColumn, 0);
+ pushOntoSorter(pParse, v, pOrderBy);
+ }else{
+ assert( eDest==SRT_Callback );
+ sqliteVdbeAddOp(v, OP_Callback, nColumn, 0);
+ }
+ break;
+ }
+
+ /* Invoke a subroutine to handle the results. The subroutine itself
+ ** is responsible for popping the results off of the stack.
+ */
+ case SRT_Subroutine: {
+ if( pOrderBy ){
+ sqliteVdbeAddOp(v, OP_MakeRecord, nColumn, 0);
+ pushOntoSorter(pParse, v, pOrderBy);
+ }else{
+ sqliteVdbeAddOp(v, OP_Gosub, 0, iParm);
+ }
+ break;
+ }
+
+ /* Discard the results. This is used for SELECT statements inside
+ ** the body of a TRIGGER. The purpose of such selects is to call
+ ** user-defined functions that have side effects. We do not care
+ ** about the actual results of the select.
+ */
+ default: {
+ assert( eDest==SRT_Discard );
+ sqliteVdbeAddOp(v, OP_Pop, nColumn, 0);
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+** If the inner loop was generated using a non-null pOrderBy argument,
+** then the results were placed in a sorter. After the loop is terminated
+** we need to run the sorter and output the results. The following
+** routine generates the code needed to do that.
+*/
+static void generateSortTail(
+ Select *p, /* The SELECT statement */
+ Vdbe *v, /* Generate code into this VDBE */
+ int nColumn, /* Number of columns of data */
+ int eDest, /* Write the sorted results here */
+ int iParm /* Optional parameter associated with eDest */
+){
+ int end1 = sqliteVdbeMakeLabel(v);
+ int end2 = sqliteVdbeMakeLabel(v);
+ int addr;
+ if( eDest==SRT_Sorter ) return;
+ sqliteVdbeAddOp(v, OP_Sort, 0, 0);
+ addr = sqliteVdbeAddOp(v, OP_SortNext, 0, end1);
+ codeLimiter(v, p, addr, end2, 1);
+ switch( eDest ){
+ case SRT_Callback: {
+ sqliteVdbeAddOp(v, OP_SortCallback, nColumn, 0);
+ break;
+ }
+ case SRT_Table:
+ case SRT_TempTable: {
+ sqliteVdbeAddOp(v, OP_NewRecno, iParm, 0);
+ sqliteVdbeAddOp(v, OP_Pull, 1, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, iParm, 0);
+ break;
+ }
+ case SRT_Set: {
+ assert( nColumn==1 );
+ sqliteVdbeAddOp(v, OP_NotNull, -1, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_PutStrKey, iParm, 0);
+ break;
+ }
+ case SRT_Mem: {
+ assert( nColumn==1 );
+ sqliteVdbeAddOp(v, OP_MemStore, iParm, 1);
+ sqliteVdbeAddOp(v, OP_Goto, 0, end1);
+ break;
+ }
+ case SRT_Subroutine: {
+ int i;
+ for(i=0; i<nColumn; i++){
+ sqliteVdbeAddOp(v, OP_Column, -1-i, i);
+ }
+ sqliteVdbeAddOp(v, OP_Gosub, 0, iParm);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ break;
+ }
+ default: {
+ /* Do nothing */
+ break;
+ }
+ }
+ sqliteVdbeAddOp(v, OP_Goto, 0, addr);
+ sqliteVdbeResolveLabel(v, end2);
+ sqliteVdbeAddOp(v, OP_Pop, 1, 0);
+ sqliteVdbeResolveLabel(v, end1);
+ sqliteVdbeAddOp(v, OP_SortReset, 0, 0);
+}
+
+/*
+** Generate code that will tell the VDBE the datatypes of
+** columns in the result set.
+**
+** This routine only generates code if the "PRAGMA show_datatypes=on"
+** has been executed. The datatypes are reported out in the azCol
+** parameter to the callback function. The first N azCol[] entries
+** are the names of the columns, and the second N entries are the
+** datatypes for the columns.
+**
+** The "datatype" for a result that is a column of a type is the
+** datatype definition extracted from the CREATE TABLE statement.
+** The datatype for an expression is either TEXT or NUMERIC. The
+** datatype for a ROWID field is INTEGER.
+*/
+static void generateColumnTypes(
+ Parse *pParse, /* Parser context */
+ SrcList *pTabList, /* List of tables */
+ ExprList *pEList /* Expressions defining the result set */
+){
+ Vdbe *v = pParse->pVdbe;
+ int i, j;
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *p = pEList->a[i].pExpr;
+ char *zType = 0;
+ if( p==0 ) continue;
+ if( p->op==TK_COLUMN && pTabList ){
+ Table *pTab;
+ int iCol = p->iColumn;
+ for(j=0; j<pTabList->nSrc && pTabList->a[j].iCursor!=p->iTable; j++){}
+ assert( j<pTabList->nSrc );
+ pTab = pTabList->a[j].pTab;
+ if( iCol<0 ) iCol = pTab->iPKey;
+ assert( iCol==-1 || (iCol>=0 && iCol<pTab->nCol) );
+ if( iCol<0 ){
+ zType = "INTEGER";
+ }else{
+ zType = pTab->aCol[iCol].zType;
+ }
+ }else{
+ if( sqliteExprType(p)==SQLITE_SO_TEXT ){
+ zType = "TEXT";
+ }else{
+ zType = "NUMERIC";
+ }
+ }
+ sqliteVdbeOp3(v, OP_ColumnName, i + pEList->nExpr, 0, zType, 0);
+ }
+}
+
+/*
+** Generate code that will tell the VDBE the names of columns
+** in the result set. This information is used to provide the
+** azCol[] values in the callback.
+*/
+static void generateColumnNames(
+ Parse *pParse, /* Parser context */
+ SrcList *pTabList, /* List of tables */
+ ExprList *pEList /* Expressions defining the result set */
+){
+ Vdbe *v = pParse->pVdbe;
+ int i, j;
+ sqlite *db = pParse->db;
+ int fullNames, shortNames;
+
+ assert( v!=0 );
+ if( pParse->colNamesSet || v==0 || sqlite_malloc_failed ) return;
+ pParse->colNamesSet = 1;
+ fullNames = (db->flags & SQLITE_FullColNames)!=0;
+ shortNames = (db->flags & SQLITE_ShortColNames)!=0;
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *p;
+ int p2 = i==pEList->nExpr-1;
+ p = pEList->a[i].pExpr;
+ if( p==0 ) continue;
+ if( pEList->a[i].zName ){
+ char *zName = pEList->a[i].zName;
+ sqliteVdbeOp3(v, OP_ColumnName, i, p2, zName, 0);
+ continue;
+ }
+ if( p->op==TK_COLUMN && pTabList ){
+ Table *pTab;
+ char *zCol;
+ int iCol = p->iColumn;
+ for(j=0; j<pTabList->nSrc && pTabList->a[j].iCursor!=p->iTable; j++){}
+ assert( j<pTabList->nSrc );
+ pTab = pTabList->a[j].pTab;
+ if( iCol<0 ) iCol = pTab->iPKey;
+ assert( iCol==-1 || (iCol>=0 && iCol<pTab->nCol) );
+ if( iCol<0 ){
+ zCol = "_ROWID_";
+ }else{
+ zCol = pTab->aCol[iCol].zName;
+ }
+ if( !shortNames && !fullNames && p->span.z && p->span.z[0] ){
+ int addr = sqliteVdbeOp3(v,OP_ColumnName, i, p2, p->span.z, p->span.n);
+ sqliteVdbeCompressSpace(v, addr);
+ }else if( fullNames || (!shortNames && pTabList->nSrc>1) ){
+ char *zName = 0;
+ char *zTab;
+
+ zTab = pTabList->a[j].zAlias;
+ if( fullNames || zTab==0 ) zTab = pTab->zName;
+ sqliteSetString(&zName, zTab, ".", zCol, 0);
+ sqliteVdbeOp3(v, OP_ColumnName, i, p2, zName, P3_DYNAMIC);
+ }else{
+ sqliteVdbeOp3(v, OP_ColumnName, i, p2, zCol, 0);
+ }
+ }else if( p->span.z && p->span.z[0] ){
+ int addr = sqliteVdbeOp3(v,OP_ColumnName, i, p2, p->span.z, p->span.n);
+ sqliteVdbeCompressSpace(v, addr);
+ }else{
+ char zName[30];
+ assert( p->op!=TK_COLUMN || pTabList==0 );
+ sprintf(zName, "column%d", i+1);
+ sqliteVdbeOp3(v, OP_ColumnName, i, p2, zName, 0);
+ }
+ }
+}
+
+/*
+** Name of the connection operator, used for error messages.
+*/
+static const char *selectOpName(int id){
+ char *z;
+ switch( id ){
+ case TK_ALL: z = "UNION ALL"; break;
+ case TK_INTERSECT: z = "INTERSECT"; break;
+ case TK_EXCEPT: z = "EXCEPT"; break;
+ default: z = "UNION"; break;
+ }
+ return z;
+}
+
+/*
+** Forward declaration
+*/
+static int fillInColumnList(Parse*, Select*);
+
+/*
+** Given a SELECT statement, generate a Table structure that describes
+** the result set of that SELECT.
+*/
+Table *sqliteResultSetOfSelect(Parse *pParse, char *zTabName, Select *pSelect){
+ Table *pTab;
+ int i, j;
+ ExprList *pEList;
+ Column *aCol;
+
+ if( fillInColumnList(pParse, pSelect) ){
+ return 0;
+ }
+ pTab = sqliteMalloc( sizeof(Table) );
+ if( pTab==0 ){
+ return 0;
+ }
+ pTab->zName = zTabName ? sqliteStrDup(zTabName) : 0;
+ pEList = pSelect->pEList;
+ pTab->nCol = pEList->nExpr;
+ assert( pTab->nCol>0 );
+ pTab->aCol = aCol = sqliteMalloc( sizeof(pTab->aCol[0])*pTab->nCol );
+ for(i=0; i<pTab->nCol; i++){
+ Expr *p, *pR;
+ if( pEList->a[i].zName ){
+ aCol[i].zName = sqliteStrDup(pEList->a[i].zName);
+ }else if( (p=pEList->a[i].pExpr)->op==TK_DOT
+ && (pR=p->pRight)!=0 && pR->token.z && pR->token.z[0] ){
+ int cnt;
+ sqliteSetNString(&aCol[i].zName, pR->token.z, pR->token.n, 0);
+ for(j=cnt=0; j<i; j++){
+ if( sqliteStrICmp(aCol[j].zName, aCol[i].zName)==0 ){
+ int n;
+ char zBuf[30];
+ sprintf(zBuf,"_%d",++cnt);
+ n = strlen(zBuf);
+ sqliteSetNString(&aCol[i].zName, pR->token.z, pR->token.n, zBuf, n,0);
+ j = -1;
+ }
+ }
+ }else if( p->span.z && p->span.z[0] ){
+ sqliteSetNString(&pTab->aCol[i].zName, p->span.z, p->span.n, 0);
+ }else{
+ char zBuf[30];
+ sprintf(zBuf, "column%d", i+1);
+ aCol[i].zName = sqliteStrDup(zBuf);
+ }
+ sqliteDequote(aCol[i].zName);
+ }
+ pTab->iPKey = -1;
+ return pTab;
+}
+
+/*
+** For the given SELECT statement, do three things.
+**
+** (1) Fill in the pTabList->a[].pTab fields in the SrcList that
+** defines the set of tables that should be scanned. For views,
+** fill pTabList->a[].pSelect with a copy of the SELECT statement
+** that implements the view. A copy is made of the view's SELECT
+** statement so that we can freely modify or delete that statement
+** without worrying about messing up the presistent representation
+** of the view.
+**
+** (2) Add terms to the WHERE clause to accomodate the NATURAL keyword
+** on joins and the ON and USING clause of joins.
+**
+** (3) Scan the list of columns in the result set (pEList) looking
+** for instances of the "*" operator or the TABLE.* operator.
+** If found, expand each "*" to be every column in every table
+** and TABLE.* to be every column in TABLE.
+**
+** Return 0 on success. If there are problems, leave an error message
+** in pParse and return non-zero.
+*/
+static int fillInColumnList(Parse *pParse, Select *p){
+ int i, j, k, rc;
+ SrcList *pTabList;
+ ExprList *pEList;
+ Table *pTab;
+
+ if( p==0 || p->pSrc==0 ) return 1;
+ pTabList = p->pSrc;
+ pEList = p->pEList;
+
+ /* Look up every table in the table list.
+ */
+ for(i=0; i<pTabList->nSrc; i++){
+ if( pTabList->a[i].pTab ){
+ /* This routine has run before! No need to continue */
+ return 0;
+ }
+ if( pTabList->a[i].zName==0 ){
+ /* A sub-query in the FROM clause of a SELECT */
+ assert( pTabList->a[i].pSelect!=0 );
+ if( pTabList->a[i].zAlias==0 ){
+ char zFakeName[60];
+ sprintf(zFakeName, "sqlite_subquery_%p_",
+ (void*)pTabList->a[i].pSelect);
+ sqliteSetString(&pTabList->a[i].zAlias, zFakeName, 0);
+ }
+ pTabList->a[i].pTab = pTab =
+ sqliteResultSetOfSelect(pParse, pTabList->a[i].zAlias,
+ pTabList->a[i].pSelect);
+ if( pTab==0 ){
+ return 1;
+ }
+ /* The isTransient flag indicates that the Table structure has been
+ ** dynamically allocated and may be freed at any time. In other words,
+ ** pTab is not pointing to a persistent table structure that defines
+ ** part of the schema. */
+ pTab->isTransient = 1;
+ }else{
+ /* An ordinary table or view name in the FROM clause */
+ pTabList->a[i].pTab = pTab =
+ sqliteLocateTable(pParse,pTabList->a[i].zName,pTabList->a[i].zDatabase);
+ if( pTab==0 ){
+ return 1;
+ }
+ if( pTab->pSelect ){
+ /* We reach here if the named table is a really a view */
+ if( sqliteViewGetColumnNames(pParse, pTab) ){
+ return 1;
+ }
+ /* If pTabList->a[i].pSelect!=0 it means we are dealing with a
+ ** view within a view. The SELECT structure has already been
+ ** copied by the outer view so we can skip the copy step here
+ ** in the inner view.
+ */
+ if( pTabList->a[i].pSelect==0 ){
+ pTabList->a[i].pSelect = sqliteSelectDup(pTab->pSelect);
+ }
+ }
+ }
+ }
+
+ /* Process NATURAL keywords, and ON and USING clauses of joins.
+ */
+ if( sqliteProcessJoin(pParse, p) ) return 1;
+
+ /* For every "*" that occurs in the column list, insert the names of
+ ** all columns in all tables. And for every TABLE.* insert the names
+ ** of all columns in TABLE. The parser inserted a special expression
+ ** with the TK_ALL operator for each "*" that it found in the column list.
+ ** The following code just has to locate the TK_ALL expressions and expand
+ ** each one to the list of all columns in all tables.
+ **
+ ** The first loop just checks to see if there are any "*" operators
+ ** that need expanding.
+ */
+ for(k=0; k<pEList->nExpr; k++){
+ Expr *pE = pEList->a[k].pExpr;
+ if( pE->op==TK_ALL ) break;
+ if( pE->op==TK_DOT && pE->pRight && pE->pRight->op==TK_ALL
+ && pE->pLeft && pE->pLeft->op==TK_ID ) break;
+ }
+ rc = 0;
+ if( k<pEList->nExpr ){
+ /*
+ ** If we get here it means the result set contains one or more "*"
+ ** operators that need to be expanded. Loop through each expression
+ ** in the result set and expand them one by one.
+ */
+ struct ExprList_item *a = pEList->a;
+ ExprList *pNew = 0;
+ for(k=0; k<pEList->nExpr; k++){
+ Expr *pE = a[k].pExpr;
+ if( pE->op!=TK_ALL &&
+ (pE->op!=TK_DOT || pE->pRight==0 || pE->pRight->op!=TK_ALL) ){
+ /* This particular expression does not need to be expanded.
+ */
+ pNew = sqliteExprListAppend(pNew, a[k].pExpr, 0);
+ pNew->a[pNew->nExpr-1].zName = a[k].zName;
+ a[k].pExpr = 0;
+ a[k].zName = 0;
+ }else{
+ /* This expression is a "*" or a "TABLE.*" and needs to be
+ ** expanded. */
+ int tableSeen = 0; /* Set to 1 when TABLE matches */
+ char *zTName; /* text of name of TABLE */
+ if( pE->op==TK_DOT && pE->pLeft ){
+ zTName = sqliteTableNameFromToken(&pE->pLeft->token);
+ }else{
+ zTName = 0;
+ }
+ for(i=0; i<pTabList->nSrc; i++){
+ Table *pTab = pTabList->a[i].pTab;
+ char *zTabName = pTabList->a[i].zAlias;
+ if( zTabName==0 || zTabName[0]==0 ){
+ zTabName = pTab->zName;
+ }
+ if( zTName && (zTabName==0 || zTabName[0]==0 ||
+ sqliteStrICmp(zTName, zTabName)!=0) ){
+ continue;
+ }
+ tableSeen = 1;
+ for(j=0; j<pTab->nCol; j++){
+ Expr *pExpr, *pLeft, *pRight;
+ char *zName = pTab->aCol[j].zName;
+
+ if( i>0 && (pTabList->a[i-1].jointype & JT_NATURAL)!=0 &&
+ columnIndex(pTabList->a[i-1].pTab, zName)>=0 ){
+ /* In a NATURAL join, omit the join columns from the
+ ** table on the right */
+ continue;
+ }
+ if( i>0 && sqliteIdListIndex(pTabList->a[i-1].pUsing, zName)>=0 ){
+ /* In a join with a USING clause, omit columns in the
+ ** using clause from the table on the right. */
+ continue;
+ }
+ pRight = sqliteExpr(TK_ID, 0, 0, 0);
+ if( pRight==0 ) break;
+ pRight->token.z = zName;
+ pRight->token.n = strlen(zName);
+ pRight->token.dyn = 0;
+ if( zTabName && pTabList->nSrc>1 ){
+ pLeft = sqliteExpr(TK_ID, 0, 0, 0);
+ pExpr = sqliteExpr(TK_DOT, pLeft, pRight, 0);
+ if( pExpr==0 ) break;
+ pLeft->token.z = zTabName;
+ pLeft->token.n = strlen(zTabName);
+ pLeft->token.dyn = 0;
+ sqliteSetString((char**)&pExpr->span.z, zTabName, ".", zName, 0);
+ pExpr->span.n = strlen(pExpr->span.z);
+ pExpr->span.dyn = 1;
+ pExpr->token.z = 0;
+ pExpr->token.n = 0;
+ pExpr->token.dyn = 0;
+ }else{
+ pExpr = pRight;
+ pExpr->span = pExpr->token;
+ }
+ pNew = sqliteExprListAppend(pNew, pExpr, 0);
+ }
+ }
+ if( !tableSeen ){
+ if( zTName ){
+ sqliteErrorMsg(pParse, "no such table: %s", zTName);
+ }else{
+ sqliteErrorMsg(pParse, "no tables specified");
+ }
+ rc = 1;
+ }
+ sqliteFree(zTName);
+ }
+ }
+ sqliteExprListDelete(pEList);
+ p->pEList = pNew;
+ }
+ return rc;
+}
+
+/*
+** This routine recursively unlinks the Select.pSrc.a[].pTab pointers
+** in a select structure. It just sets the pointers to NULL. This
+** routine is recursive in the sense that if the Select.pSrc.a[].pSelect
+** pointer is not NULL, this routine is called recursively on that pointer.
+**
+** This routine is called on the Select structure that defines a
+** VIEW in order to undo any bindings to tables. This is necessary
+** because those tables might be DROPed by a subsequent SQL command.
+** If the bindings are not removed, then the Select.pSrc->a[].pTab field
+** will be left pointing to a deallocated Table structure after the
+** DROP and a coredump will occur the next time the VIEW is used.
+*/
+void sqliteSelectUnbind(Select *p){
+ int i;
+ SrcList *pSrc = p->pSrc;
+ Table *pTab;
+ if( p==0 ) return;
+ for(i=0; i<pSrc->nSrc; i++){
+ if( (pTab = pSrc->a[i].pTab)!=0 ){
+ if( pTab->isTransient ){
+ sqliteDeleteTable(0, pTab);
+ }
+ pSrc->a[i].pTab = 0;
+ if( pSrc->a[i].pSelect ){
+ sqliteSelectUnbind(pSrc->a[i].pSelect);
+ }
+ }
+ }
+}
+
+/*
+** This routine associates entries in an ORDER BY expression list with
+** columns in a result. For each ORDER BY expression, the opcode of
+** the top-level node is changed to TK_COLUMN and the iColumn value of
+** the top-level node is filled in with column number and the iTable
+** value of the top-level node is filled with iTable parameter.
+**
+** If there are prior SELECT clauses, they are processed first. A match
+** in an earlier SELECT takes precedence over a later SELECT.
+**
+** Any entry that does not match is flagged as an error. The number
+** of errors is returned.
+**
+** This routine does NOT correctly initialize the Expr.dataType field
+** of the ORDER BY expressions. The multiSelectSortOrder() routine
+** must be called to do that after the individual select statements
+** have all been analyzed. This routine is unable to compute Expr.dataType
+** because it must be called before the individual select statements
+** have been analyzed.
+*/
+static int matchOrderbyToColumn(
+ Parse *pParse, /* A place to leave error messages */
+ Select *pSelect, /* Match to result columns of this SELECT */
+ ExprList *pOrderBy, /* The ORDER BY values to match against columns */
+ int iTable, /* Insert this value in iTable */
+ int mustComplete /* If TRUE all ORDER BYs must match */
+){
+ int nErr = 0;
+ int i, j;
+ ExprList *pEList;
+
+ if( pSelect==0 || pOrderBy==0 ) return 1;
+ if( mustComplete ){
+ for(i=0; i<pOrderBy->nExpr; i++){ pOrderBy->a[i].done = 0; }
+ }
+ if( fillInColumnList(pParse, pSelect) ){
+ return 1;
+ }
+ if( pSelect->pPrior ){
+ if( matchOrderbyToColumn(pParse, pSelect->pPrior, pOrderBy, iTable, 0) ){
+ return 1;
+ }
+ }
+ pEList = pSelect->pEList;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ Expr *pE = pOrderBy->a[i].pExpr;
+ int iCol = -1;
+ if( pOrderBy->a[i].done ) continue;
+ if( sqliteExprIsInteger(pE, &iCol) ){
+ if( iCol<=0 || iCol>pEList->nExpr ){
+ sqliteErrorMsg(pParse,
+ "ORDER BY position %d should be between 1 and %d",
+ iCol, pEList->nExpr);
+ nErr++;
+ break;
+ }
+ if( !mustComplete ) continue;
+ iCol--;
+ }
+ for(j=0; iCol<0 && j<pEList->nExpr; j++){
+ if( pEList->a[j].zName && (pE->op==TK_ID || pE->op==TK_STRING) ){
+ char *zName, *zLabel;
+ zName = pEList->a[j].zName;
+ assert( pE->token.z );
+ zLabel = sqliteStrNDup(pE->token.z, pE->token.n);
+ sqliteDequote(zLabel);
+ if( sqliteStrICmp(zName, zLabel)==0 ){
+ iCol = j;
+ }
+ sqliteFree(zLabel);
+ }
+ if( iCol<0 && sqliteExprCompare(pE, pEList->a[j].pExpr) ){
+ iCol = j;
+ }
+ }
+ if( iCol>=0 ){
+ pE->op = TK_COLUMN;
+ pE->iColumn = iCol;
+ pE->iTable = iTable;
+ pOrderBy->a[i].done = 1;
+ }
+ if( iCol<0 && mustComplete ){
+ sqliteErrorMsg(pParse,
+ "ORDER BY term number %d does not match any result column", i+1);
+ nErr++;
+ break;
+ }
+ }
+ return nErr;
+}
+
+/*
+** Get a VDBE for the given parser context. Create a new one if necessary.
+** If an error occurs, return NULL and leave a message in pParse.
+*/
+Vdbe *sqliteGetVdbe(Parse *pParse){
+ Vdbe *v = pParse->pVdbe;
+ if( v==0 ){
+ v = pParse->pVdbe = sqliteVdbeCreate(pParse->db);
+ }
+ return v;
+}
+
+/*
+** This routine sets the Expr.dataType field on all elements of
+** the pOrderBy expression list. The pOrderBy list will have been
+** set up by matchOrderbyToColumn(). Hence each expression has
+** a TK_COLUMN as its root node. The Expr.iColumn refers to a
+** column in the result set. The datatype is set to SQLITE_SO_TEXT
+** if the corresponding column in p and every SELECT to the left of
+** p has a datatype of SQLITE_SO_TEXT. If the cooressponding column
+** in p or any of the left SELECTs is SQLITE_SO_NUM, then the datatype
+** of the order-by expression is set to SQLITE_SO_NUM.
+**
+** Examples:
+**
+** CREATE TABLE one(a INTEGER, b TEXT);
+** CREATE TABLE two(c VARCHAR(5), d FLOAT);
+**
+** SELECT b, b FROM one UNION SELECT d, c FROM two ORDER BY 1, 2;
+**
+** The primary sort key will use SQLITE_SO_NUM because the "d" in
+** the second SELECT is numeric. The 1st column of the first SELECT
+** is text but that does not matter because a numeric always overrides
+** a text.
+**
+** The secondary key will use the SQLITE_SO_TEXT sort order because
+** both the (second) "b" in the first SELECT and the "c" in the second
+** SELECT have a datatype of text.
+*/
+static void multiSelectSortOrder(Select *p, ExprList *pOrderBy){
+ int i;
+ ExprList *pEList;
+ if( pOrderBy==0 ) return;
+ if( p==0 ){
+ for(i=0; i<pOrderBy->nExpr; i++){
+ pOrderBy->a[i].pExpr->dataType = SQLITE_SO_TEXT;
+ }
+ return;
+ }
+ multiSelectSortOrder(p->pPrior, pOrderBy);
+ pEList = p->pEList;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ Expr *pE = pOrderBy->a[i].pExpr;
+ if( pE->dataType==SQLITE_SO_NUM ) continue;
+ assert( pE->iColumn>=0 );
+ if( pEList->nExpr>pE->iColumn ){
+ pE->dataType = sqliteExprType(pEList->a[pE->iColumn].pExpr);
+ }
+ }
+}
+
+/*
+** Compute the iLimit and iOffset fields of the SELECT based on the
+** nLimit and nOffset fields. nLimit and nOffset hold the integers
+** that appear in the original SQL statement after the LIMIT and OFFSET
+** keywords. Or that hold -1 and 0 if those keywords are omitted.
+** iLimit and iOffset are the integer memory register numbers for
+** counters used to compute the limit and offset. If there is no
+** limit and/or offset, then iLimit and iOffset are negative.
+**
+** This routine changes the values if iLimit and iOffset only if
+** a limit or offset is defined by nLimit and nOffset. iLimit and
+** iOffset should have been preset to appropriate default values
+** (usually but not always -1) prior to calling this routine.
+** Only if nLimit>=0 or nOffset>0 do the limit registers get
+** redefined. The UNION ALL operator uses this property to force
+** the reuse of the same limit and offset registers across multiple
+** SELECT statements.
+*/
+static void computeLimitRegisters(Parse *pParse, Select *p){
+ /*
+ ** If the comparison is p->nLimit>0 then "LIMIT 0" shows
+ ** all rows. It is the same as no limit. If the comparision is
+ ** p->nLimit>=0 then "LIMIT 0" show no rows at all.
+ ** "LIMIT -1" always shows all rows. There is some
+ ** contraversy about what the correct behavior should be.
+ ** The current implementation interprets "LIMIT 0" to mean
+ ** no rows.
+ */
+ if( p->nLimit>=0 ){
+ int iMem = pParse->nMem++;
+ Vdbe *v = sqliteGetVdbe(pParse);
+ if( v==0 ) return;
+ sqliteVdbeAddOp(v, OP_Integer, -p->nLimit, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, iMem, 1);
+ p->iLimit = iMem;
+ }
+ if( p->nOffset>0 ){
+ int iMem = pParse->nMem++;
+ Vdbe *v = sqliteGetVdbe(pParse);
+ if( v==0 ) return;
+ sqliteVdbeAddOp(v, OP_Integer, -p->nOffset, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, iMem, 1);
+ p->iOffset = iMem;
+ }
+}
+
+/*
+** This routine is called to process a query that is really the union
+** or intersection of two or more separate queries.
+**
+** "p" points to the right-most of the two queries. the query on the
+** left is p->pPrior. The left query could also be a compound query
+** in which case this routine will be called recursively.
+**
+** The results of the total query are to be written into a destination
+** of type eDest with parameter iParm.
+**
+** Example 1: Consider a three-way compound SQL statement.
+**
+** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3
+**
+** This statement is parsed up as follows:
+**
+** SELECT c FROM t3
+** |
+** `-----> SELECT b FROM t2
+** |
+** `------> SELECT a FROM t1
+**
+** The arrows in the diagram above represent the Select.pPrior pointer.
+** So if this routine is called with p equal to the t3 query, then
+** pPrior will be the t2 query. p->op will be TK_UNION in this case.
+**
+** Notice that because of the way SQLite parses compound SELECTs, the
+** individual selects always group from left to right.
+*/
+static int multiSelect(Parse *pParse, Select *p, int eDest, int iParm){
+ int rc; /* Success code from a subroutine */
+ Select *pPrior; /* Another SELECT immediately to our left */
+ Vdbe *v; /* Generate code to this VDBE */
+
+ /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only
+ ** the last SELECT in the series may have an ORDER BY or LIMIT.
+ */
+ if( p==0 || p->pPrior==0 ) return 1;
+ pPrior = p->pPrior;
+ if( pPrior->pOrderBy ){
+ sqliteErrorMsg(pParse,"ORDER BY clause should come after %s not before",
+ selectOpName(p->op));
+ return 1;
+ }
+ if( pPrior->nLimit>=0 || pPrior->nOffset>0 ){
+ sqliteErrorMsg(pParse,"LIMIT clause should come after %s not before",
+ selectOpName(p->op));
+ return 1;
+ }
+
+ /* Make sure we have a valid query engine. If not, create a new one.
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) return 1;
+
+ /* Create the destination temporary table if necessary
+ */
+ if( eDest==SRT_TempTable ){
+ sqliteVdbeAddOp(v, OP_OpenTemp, iParm, 0);
+ eDest = SRT_Table;
+ }
+
+ /* Generate code for the left and right SELECT statements.
+ */
+ switch( p->op ){
+ case TK_ALL: {
+ if( p->pOrderBy==0 ){
+ pPrior->nLimit = p->nLimit;
+ pPrior->nOffset = p->nOffset;
+ rc = sqliteSelect(pParse, pPrior, eDest, iParm, 0, 0, 0);
+ if( rc ) return rc;
+ p->pPrior = 0;
+ p->iLimit = pPrior->iLimit;
+ p->iOffset = pPrior->iOffset;
+ p->nLimit = -1;
+ p->nOffset = 0;
+ rc = sqliteSelect(pParse, p, eDest, iParm, 0, 0, 0);
+ p->pPrior = pPrior;
+ if( rc ) return rc;
+ break;
+ }
+ /* For UNION ALL ... ORDER BY fall through to the next case */
+ }
+ case TK_EXCEPT:
+ case TK_UNION: {
+ int unionTab; /* Cursor number of the temporary table holding result */
+ int op; /* One of the SRT_ operations to apply to self */
+ int priorOp; /* The SRT_ operation to apply to prior selects */
+ int nLimit, nOffset; /* Saved values of p->nLimit and p->nOffset */
+ ExprList *pOrderBy; /* The ORDER BY clause for the right SELECT */
+
+ priorOp = p->op==TK_ALL ? SRT_Table : SRT_Union;
+ if( eDest==priorOp && p->pOrderBy==0 && p->nLimit<0 && p->nOffset==0 ){
+ /* We can reuse a temporary table generated by a SELECT to our
+ ** right.
+ */
+ unionTab = iParm;
+ }else{
+ /* We will need to create our own temporary table to hold the
+ ** intermediate results.
+ */
+ unionTab = pParse->nTab++;
+ if( p->pOrderBy
+ && matchOrderbyToColumn(pParse, p, p->pOrderBy, unionTab, 1) ){
+ return 1;
+ }
+ if( p->op!=TK_ALL ){
+ sqliteVdbeAddOp(v, OP_OpenTemp, unionTab, 1);
+ sqliteVdbeAddOp(v, OP_KeyAsData, unionTab, 1);
+ }else{
+ sqliteVdbeAddOp(v, OP_OpenTemp, unionTab, 0);
+ }
+ }
+
+ /* Code the SELECT statements to our left
+ */
+ rc = sqliteSelect(pParse, pPrior, priorOp, unionTab, 0, 0, 0);
+ if( rc ) return rc;
+
+ /* Code the current SELECT statement
+ */
+ switch( p->op ){
+ case TK_EXCEPT: op = SRT_Except; break;
+ case TK_UNION: op = SRT_Union; break;
+ case TK_ALL: op = SRT_Table; break;
+ }
+ p->pPrior = 0;
+ pOrderBy = p->pOrderBy;
+ p->pOrderBy = 0;
+ nLimit = p->nLimit;
+ p->nLimit = -1;
+ nOffset = p->nOffset;
+ p->nOffset = 0;
+ rc = sqliteSelect(pParse, p, op, unionTab, 0, 0, 0);
+ p->pPrior = pPrior;
+ p->pOrderBy = pOrderBy;
+ p->nLimit = nLimit;
+ p->nOffset = nOffset;
+ if( rc ) return rc;
+
+ /* Convert the data in the temporary table into whatever form
+ ** it is that we currently need.
+ */
+ if( eDest!=priorOp || unionTab!=iParm ){
+ int iCont, iBreak, iStart;
+ assert( p->pEList );
+ if( eDest==SRT_Callback ){
+ generateColumnNames(pParse, 0, p->pEList);
+ generateColumnTypes(pParse, p->pSrc, p->pEList);
+ }
+ iBreak = sqliteVdbeMakeLabel(v);
+ iCont = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_Rewind, unionTab, iBreak);
+ computeLimitRegisters(pParse, p);
+ iStart = sqliteVdbeCurrentAddr(v);
+ multiSelectSortOrder(p, p->pOrderBy);
+ rc = selectInnerLoop(pParse, p, p->pEList, unionTab, p->pEList->nExpr,
+ p->pOrderBy, -1, eDest, iParm,
+ iCont, iBreak);
+ if( rc ) return 1;
+ sqliteVdbeResolveLabel(v, iCont);
+ sqliteVdbeAddOp(v, OP_Next, unionTab, iStart);
+ sqliteVdbeResolveLabel(v, iBreak);
+ sqliteVdbeAddOp(v, OP_Close, unionTab, 0);
+ if( p->pOrderBy ){
+ generateSortTail(p, v, p->pEList->nExpr, eDest, iParm);
+ }
+ }
+ break;
+ }
+ case TK_INTERSECT: {
+ int tab1, tab2;
+ int iCont, iBreak, iStart;
+ int nLimit, nOffset;
+
+ /* INTERSECT is different from the others since it requires
+ ** two temporary tables. Hence it has its own case. Begin
+ ** by allocating the tables we will need.
+ */
+ tab1 = pParse->nTab++;
+ tab2 = pParse->nTab++;
+ if( p->pOrderBy && matchOrderbyToColumn(pParse,p,p->pOrderBy,tab1,1) ){
+ return 1;
+ }
+ sqliteVdbeAddOp(v, OP_OpenTemp, tab1, 1);
+ sqliteVdbeAddOp(v, OP_KeyAsData, tab1, 1);
+
+ /* Code the SELECTs to our left into temporary table "tab1".
+ */
+ rc = sqliteSelect(pParse, pPrior, SRT_Union, tab1, 0, 0, 0);
+ if( rc ) return rc;
+
+ /* Code the current SELECT into temporary table "tab2"
+ */
+ sqliteVdbeAddOp(v, OP_OpenTemp, tab2, 1);
+ sqliteVdbeAddOp(v, OP_KeyAsData, tab2, 1);
+ p->pPrior = 0;
+ nLimit = p->nLimit;
+ p->nLimit = -1;
+ nOffset = p->nOffset;
+ p->nOffset = 0;
+ rc = sqliteSelect(pParse, p, SRT_Union, tab2, 0, 0, 0);
+ p->pPrior = pPrior;
+ p->nLimit = nLimit;
+ p->nOffset = nOffset;
+ if( rc ) return rc;
+
+ /* Generate code to take the intersection of the two temporary
+ ** tables.
+ */
+ assert( p->pEList );
+ if( eDest==SRT_Callback ){
+ generateColumnNames(pParse, 0, p->pEList);
+ generateColumnTypes(pParse, p->pSrc, p->pEList);
+ }
+ iBreak = sqliteVdbeMakeLabel(v);
+ iCont = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_Rewind, tab1, iBreak);
+ computeLimitRegisters(pParse, p);
+ iStart = sqliteVdbeAddOp(v, OP_FullKey, tab1, 0);
+ sqliteVdbeAddOp(v, OP_NotFound, tab2, iCont);
+ multiSelectSortOrder(p, p->pOrderBy);
+ rc = selectInnerLoop(pParse, p, p->pEList, tab1, p->pEList->nExpr,
+ p->pOrderBy, -1, eDest, iParm,
+ iCont, iBreak);
+ if( rc ) return 1;
+ sqliteVdbeResolveLabel(v, iCont);
+ sqliteVdbeAddOp(v, OP_Next, tab1, iStart);
+ sqliteVdbeResolveLabel(v, iBreak);
+ sqliteVdbeAddOp(v, OP_Close, tab2, 0);
+ sqliteVdbeAddOp(v, OP_Close, tab1, 0);
+ if( p->pOrderBy ){
+ generateSortTail(p, v, p->pEList->nExpr, eDest, iParm);
+ }
+ break;
+ }
+ }
+ assert( p->pEList && pPrior->pEList );
+ if( p->pEList->nExpr!=pPrior->pEList->nExpr ){
+ sqliteErrorMsg(pParse, "SELECTs to the left and right of %s"
+ " do not have the same number of result columns", selectOpName(p->op));
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** Scan through the expression pExpr. Replace every reference to
+** a column in table number iTable with a copy of the iColumn-th
+** entry in pEList. (But leave references to the ROWID column
+** unchanged.)
+**
+** This routine is part of the flattening procedure. A subquery
+** whose result set is defined by pEList appears as entry in the
+** FROM clause of a SELECT such that the VDBE cursor assigned to that
+** FORM clause entry is iTable. This routine make the necessary
+** changes to pExpr so that it refers directly to the source table
+** of the subquery rather the result set of the subquery.
+*/
+static void substExprList(ExprList*,int,ExprList*); /* Forward Decl */
+static void substExpr(Expr *pExpr, int iTable, ExprList *pEList){
+ if( pExpr==0 ) return;
+ if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){
+ if( pExpr->iColumn<0 ){
+ pExpr->op = TK_NULL;
+ }else{
+ Expr *pNew;
+ assert( pEList!=0 && pExpr->iColumn<pEList->nExpr );
+ assert( pExpr->pLeft==0 && pExpr->pRight==0 && pExpr->pList==0 );
+ pNew = pEList->a[pExpr->iColumn].pExpr;
+ assert( pNew!=0 );
+ pExpr->op = pNew->op;
+ pExpr->dataType = pNew->dataType;
+ assert( pExpr->pLeft==0 );
+ pExpr->pLeft = sqliteExprDup(pNew->pLeft);
+ assert( pExpr->pRight==0 );
+ pExpr->pRight = sqliteExprDup(pNew->pRight);
+ assert( pExpr->pList==0 );
+ pExpr->pList = sqliteExprListDup(pNew->pList);
+ pExpr->iTable = pNew->iTable;
+ pExpr->iColumn = pNew->iColumn;
+ pExpr->iAgg = pNew->iAgg;
+ sqliteTokenCopy(&pExpr->token, &pNew->token);
+ sqliteTokenCopy(&pExpr->span, &pNew->span);
+ }
+ }else{
+ substExpr(pExpr->pLeft, iTable, pEList);
+ substExpr(pExpr->pRight, iTable, pEList);
+ substExprList(pExpr->pList, iTable, pEList);
+ }
+}
+static void
+substExprList(ExprList *pList, int iTable, ExprList *pEList){
+ int i;
+ if( pList==0 ) return;
+ for(i=0; i<pList->nExpr; i++){
+ substExpr(pList->a[i].pExpr, iTable, pEList);
+ }
+}
+
+/*
+** This routine attempts to flatten subqueries in order to speed
+** execution. It returns 1 if it makes changes and 0 if no flattening
+** occurs.
+**
+** To understand the concept of flattening, consider the following
+** query:
+**
+** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5
+**
+** The default way of implementing this query is to execute the
+** subquery first and store the results in a temporary table, then
+** run the outer query on that temporary table. This requires two
+** passes over the data. Furthermore, because the temporary table
+** has no indices, the WHERE clause on the outer query cannot be
+** optimized.
+**
+** This routine attempts to rewrite queries such as the above into
+** a single flat select, like this:
+**
+** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5
+**
+** The code generated for this simpification gives the same result
+** but only has to scan the data once. And because indices might
+** exist on the table t1, a complete scan of the data might be
+** avoided.
+**
+** Flattening is only attempted if all of the following are true:
+**
+** (1) The subquery and the outer query do not both use aggregates.
+**
+** (2) The subquery is not an aggregate or the outer query is not a join.
+**
+** (3) The subquery is not the right operand of a left outer join, or
+** the subquery is not itself a join. (Ticket #306)
+**
+** (4) The subquery is not DISTINCT or the outer query is not a join.
+**
+** (5) The subquery is not DISTINCT or the outer query does not use
+** aggregates.
+**
+** (6) The subquery does not use aggregates or the outer query is not
+** DISTINCT.
+**
+** (7) The subquery has a FROM clause.
+**
+** (8) The subquery does not use LIMIT or the outer query is not a join.
+**
+** (9) The subquery does not use LIMIT or the outer query does not use
+** aggregates.
+**
+** (10) The subquery does not use aggregates or the outer query does not
+** use LIMIT.
+**
+** (11) The subquery and the outer query do not both have ORDER BY clauses.
+**
+** (12) The subquery is not the right term of a LEFT OUTER JOIN or the
+** subquery has no WHERE clause. (added by ticket #350)
+**
+** In this routine, the "p" parameter is a pointer to the outer query.
+** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query
+** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates.
+**
+** If flattening is not attempted, this routine is a no-op and returns 0.
+** If flattening is attempted this routine returns 1.
+**
+** All of the expression analysis must occur on both the outer query and
+** the subquery before this routine runs.
+*/
+static int flattenSubquery(
+ Parse *pParse, /* The parsing context */
+ Select *p, /* The parent or outer SELECT statement */
+ int iFrom, /* Index in p->pSrc->a[] of the inner subquery */
+ int isAgg, /* True if outer SELECT uses aggregate functions */
+ int subqueryIsAgg /* True if the subquery uses aggregate functions */
+){
+ Select *pSub; /* The inner query or "subquery" */
+ SrcList *pSrc; /* The FROM clause of the outer query */
+ SrcList *pSubSrc; /* The FROM clause of the subquery */
+ ExprList *pList; /* The result set of the outer query */
+ int iParent; /* VDBE cursor number of the pSub result set temp table */
+ int i;
+ Expr *pWhere;
+
+ /* Check to see if flattening is permitted. Return 0 if not.
+ */
+ if( p==0 ) return 0;
+ pSrc = p->pSrc;
+ assert( pSrc && iFrom>=0 && iFrom<pSrc->nSrc );
+ pSub = pSrc->a[iFrom].pSelect;
+ assert( pSub!=0 );
+ if( isAgg && subqueryIsAgg ) return 0;
+ if( subqueryIsAgg && pSrc->nSrc>1 ) return 0;
+ pSubSrc = pSub->pSrc;
+ assert( pSubSrc );
+ if( pSubSrc->nSrc==0 ) return 0;
+ if( (pSub->isDistinct || pSub->nLimit>=0) && (pSrc->nSrc>1 || isAgg) ){
+ return 0;
+ }
+ if( (p->isDistinct || p->nLimit>=0) && subqueryIsAgg ) return 0;
+ if( p->pOrderBy && pSub->pOrderBy ) return 0;
+
+ /* Restriction 3: If the subquery is a join, make sure the subquery is
+ ** not used as the right operand of an outer join. Examples of why this
+ ** is not allowed:
+ **
+ ** t1 LEFT OUTER JOIN (t2 JOIN t3)
+ **
+ ** If we flatten the above, we would get
+ **
+ ** (t1 LEFT OUTER JOIN t2) JOIN t3
+ **
+ ** which is not at all the same thing.
+ */
+ if( pSubSrc->nSrc>1 && iFrom>0 && (pSrc->a[iFrom-1].jointype & JT_OUTER)!=0 ){
+ return 0;
+ }
+
+ /* Restriction 12: If the subquery is the right operand of a left outer
+ ** join, make sure the subquery has no WHERE clause.
+ ** An examples of why this is not allowed:
+ **
+ ** t1 LEFT OUTER JOIN (SELECT * FROM t2 WHERE t2.x>0)
+ **
+ ** If we flatten the above, we would get
+ **
+ ** (t1 LEFT OUTER JOIN t2) WHERE t2.x>0
+ **
+ ** But the t2.x>0 test will always fail on a NULL row of t2, which
+ ** effectively converts the OUTER JOIN into an INNER JOIN.
+ */
+ if( iFrom>0 && (pSrc->a[iFrom-1].jointype & JT_OUTER)!=0
+ && pSub->pWhere!=0 ){
+ return 0;
+ }
+
+ /* If we reach this point, it means flattening is permitted for the
+ ** iFrom-th entry of the FROM clause in the outer query.
+ */
+
+ /* Move all of the FROM elements of the subquery into the
+ ** the FROM clause of the outer query. Before doing this, remember
+ ** the cursor number for the original outer query FROM element in
+ ** iParent. The iParent cursor will never be used. Subsequent code
+ ** will scan expressions looking for iParent references and replace
+ ** those references with expressions that resolve to the subquery FROM
+ ** elements we are now copying in.
+ */
+ iParent = pSrc->a[iFrom].iCursor;
+ {
+ int nSubSrc = pSubSrc->nSrc;
+ int jointype = pSrc->a[iFrom].jointype;
+
+ if( pSrc->a[iFrom].pTab && pSrc->a[iFrom].pTab->isTransient ){
+ sqliteDeleteTable(0, pSrc->a[iFrom].pTab);
+ }
+ sqliteFree(pSrc->a[iFrom].zDatabase);
+ sqliteFree(pSrc->a[iFrom].zName);
+ sqliteFree(pSrc->a[iFrom].zAlias);
+ if( nSubSrc>1 ){
+ int extra = nSubSrc - 1;
+ for(i=1; i<nSubSrc; i++){
+ pSrc = sqliteSrcListAppend(pSrc, 0, 0);
+ }
+ p->pSrc = pSrc;
+ for(i=pSrc->nSrc-1; i-extra>=iFrom; i--){
+ pSrc->a[i] = pSrc->a[i-extra];
+ }
+ }
+ for(i=0; i<nSubSrc; i++){
+ pSrc->a[i+iFrom] = pSubSrc->a[i];
+ memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i]));
+ }
+ pSrc->a[iFrom+nSubSrc-1].jointype = jointype;
+ }
+
+ /* Now begin substituting subquery result set expressions for
+ ** references to the iParent in the outer query.
+ **
+ ** Example:
+ **
+ ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b;
+ ** \ \_____________ subquery __________/ /
+ ** \_____________________ outer query ______________________________/
+ **
+ ** We look at every expression in the outer query and every place we see
+ ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10".
+ */
+ substExprList(p->pEList, iParent, pSub->pEList);
+ pList = p->pEList;
+ for(i=0; i<pList->nExpr; i++){
+ Expr *pExpr;
+ if( pList->a[i].zName==0 && (pExpr = pList->a[i].pExpr)->span.z!=0 ){
+ pList->a[i].zName = sqliteStrNDup(pExpr->span.z, pExpr->span.n);
+ }
+ }
+ if( isAgg ){
+ substExprList(p->pGroupBy, iParent, pSub->pEList);
+ substExpr(p->pHaving, iParent, pSub->pEList);
+ }
+ if( pSub->pOrderBy ){
+ assert( p->pOrderBy==0 );
+ p->pOrderBy = pSub->pOrderBy;
+ pSub->pOrderBy = 0;
+ }else if( p->pOrderBy ){
+ substExprList(p->pOrderBy, iParent, pSub->pEList);
+ }
+ if( pSub->pWhere ){
+ pWhere = sqliteExprDup(pSub->pWhere);
+ }else{
+ pWhere = 0;
+ }
+ if( subqueryIsAgg ){
+ assert( p->pHaving==0 );
+ p->pHaving = p->pWhere;
+ p->pWhere = pWhere;
+ substExpr(p->pHaving, iParent, pSub->pEList);
+ if( pSub->pHaving ){
+ Expr *pHaving = sqliteExprDup(pSub->pHaving);
+ if( p->pHaving ){
+ p->pHaving = sqliteExpr(TK_AND, p->pHaving, pHaving, 0);
+ }else{
+ p->pHaving = pHaving;
+ }
+ }
+ assert( p->pGroupBy==0 );
+ p->pGroupBy = sqliteExprListDup(pSub->pGroupBy);
+ }else if( p->pWhere==0 ){
+ p->pWhere = pWhere;
+ }else{
+ substExpr(p->pWhere, iParent, pSub->pEList);
+ if( pWhere ){
+ p->pWhere = sqliteExpr(TK_AND, p->pWhere, pWhere, 0);
+ }
+ }
+
+ /* The flattened query is distinct if either the inner or the
+ ** outer query is distinct.
+ */
+ p->isDistinct = p->isDistinct || pSub->isDistinct;
+
+ /* Transfer the limit expression from the subquery to the outer
+ ** query.
+ */
+ if( pSub->nLimit>=0 ){
+ if( p->nLimit<0 ){
+ p->nLimit = pSub->nLimit;
+ }else if( p->nLimit+p->nOffset > pSub->nLimit+pSub->nOffset ){
+ p->nLimit = pSub->nLimit + pSub->nOffset - p->nOffset;
+ }
+ }
+ p->nOffset += pSub->nOffset;
+
+ /* Finially, delete what is left of the subquery and return
+ ** success.
+ */
+ sqliteSelectDelete(pSub);
+ return 1;
+}
+
+/*
+** Analyze the SELECT statement passed in as an argument to see if it
+** is a simple min() or max() query. If it is and this query can be
+** satisfied using a single seek to the beginning or end of an index,
+** then generate the code for this SELECT and return 1. If this is not a
+** simple min() or max() query, then return 0;
+**
+** A simply min() or max() query looks like this:
+**
+** SELECT min(a) FROM table;
+** SELECT max(a) FROM table;
+**
+** The query may have only a single table in its FROM argument. There
+** can be no GROUP BY or HAVING or WHERE clauses. The result set must
+** be the min() or max() of a single column of the table. The column
+** in the min() or max() function must be indexed.
+**
+** The parameters to this routine are the same as for sqliteSelect().
+** See the header comment on that routine for additional information.
+*/
+static int simpleMinMaxQuery(Parse *pParse, Select *p, int eDest, int iParm){
+ Expr *pExpr;
+ int iCol;
+ Table *pTab;
+ Index *pIdx;
+ int base;
+ Vdbe *v;
+ int seekOp;
+ int cont;
+ ExprList *pEList, *pList, eList;
+ struct ExprList_item eListItem;
+ SrcList *pSrc;
+
+
+ /* Check to see if this query is a simple min() or max() query. Return
+ ** zero if it is not.
+ */
+ if( p->pGroupBy || p->pHaving || p->pWhere ) return 0;
+ pSrc = p->pSrc;
+ if( pSrc->nSrc!=1 ) return 0;
+ pEList = p->pEList;
+ if( pEList->nExpr!=1 ) return 0;
+ pExpr = pEList->a[0].pExpr;
+ if( pExpr->op!=TK_AGG_FUNCTION ) return 0;
+ pList = pExpr->pList;
+ if( pList==0 || pList->nExpr!=1 ) return 0;
+ if( pExpr->token.n!=3 ) return 0;
+ if( sqliteStrNICmp(pExpr->token.z,"min",3)==0 ){
+ seekOp = OP_Rewind;
+ }else if( sqliteStrNICmp(pExpr->token.z,"max",3)==0 ){
+ seekOp = OP_Last;
+ }else{
+ return 0;
+ }
+ pExpr = pList->a[0].pExpr;
+ if( pExpr->op!=TK_COLUMN ) return 0;
+ iCol = pExpr->iColumn;
+ pTab = pSrc->a[0].pTab;
+
+ /* If we get to here, it means the query is of the correct form.
+ ** Check to make sure we have an index and make pIdx point to the
+ ** appropriate index. If the min() or max() is on an INTEGER PRIMARY
+ ** key column, no index is necessary so set pIdx to NULL. If no
+ ** usable index is found, return 0.
+ */
+ if( iCol<0 ){
+ pIdx = 0;
+ }else{
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ assert( pIdx->nColumn>=1 );
+ if( pIdx->aiColumn[0]==iCol ) break;
+ }
+ if( pIdx==0 ) return 0;
+ }
+
+ /* Identify column types if we will be using the callback. This
+ ** step is skipped if the output is going to a table or a memory cell.
+ ** The column names have already been generated in the calling function.
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) return 0;
+ if( eDest==SRT_Callback ){
+ generateColumnTypes(pParse, p->pSrc, p->pEList);
+ }
+
+ /* If the output is destined for a temporary table, open that table.
+ */
+ if( eDest==SRT_TempTable ){
+ sqliteVdbeAddOp(v, OP_OpenTemp, iParm, 0);
+ }
+
+ /* Generating code to find the min or the max. Basically all we have
+ ** to do is find the first or the last entry in the chosen index. If
+ ** the min() or max() is on the INTEGER PRIMARY KEY, then find the first
+ ** or last entry in the main table.
+ */
+ sqliteCodeVerifySchema(pParse, pTab->iDb);
+ base = pSrc->a[0].iCursor;
+ computeLimitRegisters(pParse, p);
+ if( pSrc->a[0].pSelect==0 ){
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, base, pTab->tnum, pTab->zName, 0);
+ }
+ cont = sqliteVdbeMakeLabel(v);
+ if( pIdx==0 ){
+ sqliteVdbeAddOp(v, seekOp, base, 0);
+ }else{
+ sqliteVdbeAddOp(v, OP_Integer, pIdx->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, base+1, pIdx->tnum, pIdx->zName, P3_STATIC);
+ if( seekOp==OP_Rewind ){
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_MakeKey, 1, 0);
+ sqliteVdbeAddOp(v, OP_IncrKey, 0, 0);
+ seekOp = OP_MoveTo;
+ }
+ sqliteVdbeAddOp(v, seekOp, base+1, 0);
+ sqliteVdbeAddOp(v, OP_IdxRecno, base+1, 0);
+ sqliteVdbeAddOp(v, OP_Close, base+1, 0);
+ sqliteVdbeAddOp(v, OP_MoveTo, base, 0);
+ }
+ eList.nExpr = 1;
+ memset(&eListItem, 0, sizeof(eListItem));
+ eList.a = &eListItem;
+ eList.a[0].pExpr = pExpr;
+ selectInnerLoop(pParse, p, &eList, 0, 0, 0, -1, eDest, iParm, cont, cont);
+ sqliteVdbeResolveLabel(v, cont);
+ sqliteVdbeAddOp(v, OP_Close, base, 0);
+
+ return 1;
+}
+
+/*
+** Generate code for the given SELECT statement.
+**
+** The results are distributed in various ways depending on the
+** value of eDest and iParm.
+**
+** eDest Value Result
+** ------------ -------------------------------------------
+** SRT_Callback Invoke the callback for each row of the result.
+**
+** SRT_Mem Store first result in memory cell iParm
+**
+** SRT_Set Store results as keys of a table with cursor iParm
+**
+** SRT_Union Store results as a key in a temporary table iParm
+**
+** SRT_Except Remove results from the temporary table iParm.
+**
+** SRT_Table Store results in temporary table iParm
+**
+** The table above is incomplete. Additional eDist value have be added
+** since this comment was written. See the selectInnerLoop() function for
+** a complete listing of the allowed values of eDest and their meanings.
+**
+** This routine returns the number of errors. If any errors are
+** encountered, then an appropriate error message is left in
+** pParse->zErrMsg.
+**
+** This routine does NOT free the Select structure passed in. The
+** calling function needs to do that.
+**
+** The pParent, parentTab, and *pParentAgg fields are filled in if this
+** SELECT is a subquery. This routine may try to combine this SELECT
+** with its parent to form a single flat query. In so doing, it might
+** change the parent query from a non-aggregate to an aggregate query.
+** For that reason, the pParentAgg flag is passed as a pointer, so it
+** can be changed.
+**
+** Example 1: The meaning of the pParent parameter.
+**
+** SELECT * FROM t1 JOIN (SELECT x, count(*) FROM t2) JOIN t3;
+** \ \_______ subquery _______/ /
+** \ /
+** \____________________ outer query ___________________/
+**
+** This routine is called for the outer query first. For that call,
+** pParent will be NULL. During the processing of the outer query, this
+** routine is called recursively to handle the subquery. For the recursive
+** call, pParent will point to the outer query. Because the subquery is
+** the second element in a three-way join, the parentTab parameter will
+** be 1 (the 2nd value of a 0-indexed array.)
+*/
+int sqliteSelect(
+ Parse *pParse, /* The parser context */
+ Select *p, /* The SELECT statement being coded. */
+ int eDest, /* How to dispose of the results */
+ int iParm, /* A parameter used by the eDest disposal method */
+ Select *pParent, /* Another SELECT for which this is a sub-query */
+ int parentTab, /* Index in pParent->pSrc of this query */
+ int *pParentAgg /* True if pParent uses aggregate functions */
+){
+ int i;
+ WhereInfo *pWInfo;
+ Vdbe *v;
+ int isAgg = 0; /* True for select lists like "count(*)" */
+ ExprList *pEList; /* List of columns to extract. */
+ SrcList *pTabList; /* List of tables to select from */
+ Expr *pWhere; /* The WHERE clause. May be NULL */
+ ExprList *pOrderBy; /* The ORDER BY clause. May be NULL */
+ ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */
+ Expr *pHaving; /* The HAVING clause. May be NULL */
+ int isDistinct; /* True if the DISTINCT keyword is present */
+ int distinct; /* Table to use for the distinct set */
+ int rc = 1; /* Value to return from this function */
+
+ if( sqlite_malloc_failed || pParse->nErr || p==0 ) return 1;
+ if( sqliteAuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
+
+ /* If there is are a sequence of queries, do the earlier ones first.
+ */
+ if( p->pPrior ){
+ return multiSelect(pParse, p, eDest, iParm);
+ }
+
+ /* Make local copies of the parameters for this query.
+ */
+ pTabList = p->pSrc;
+ pWhere = p->pWhere;
+ pOrderBy = p->pOrderBy;
+ pGroupBy = p->pGroupBy;
+ pHaving = p->pHaving;
+ isDistinct = p->isDistinct;
+
+ /* Allocate VDBE cursors for each table in the FROM clause
+ */
+ sqliteSrcListAssignCursors(pParse, pTabList);
+
+ /*
+ ** Do not even attempt to generate any code if we have already seen
+ ** errors before this routine starts.
+ */
+ if( pParse->nErr>0 ) goto select_end;
+
+ /* Expand any "*" terms in the result set. (For example the "*" in
+ ** "SELECT * FROM t1") The fillInColumnlist() routine also does some
+ ** other housekeeping - see the header comment for details.
+ */
+ if( fillInColumnList(pParse, p) ){
+ goto select_end;
+ }
+ pWhere = p->pWhere;
+ pEList = p->pEList;
+ if( pEList==0 ) goto select_end;
+
+ /* If writing to memory or generating a set
+ ** only a single column may be output.
+ */
+ if( (eDest==SRT_Mem || eDest==SRT_Set) && pEList->nExpr>1 ){
+ sqliteErrorMsg(pParse, "only a single result allowed for "
+ "a SELECT that is part of an expression");
+ goto select_end;
+ }
+
+ /* ORDER BY is ignored for some destinations.
+ */
+ switch( eDest ){
+ case SRT_Union:
+ case SRT_Except:
+ case SRT_Discard:
+ pOrderBy = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* At this point, we should have allocated all the cursors that we
+ ** need to handle subquerys and temporary tables.
+ **
+ ** Resolve the column names and do a semantics check on all the expressions.
+ */
+ for(i=0; i<pEList->nExpr; i++){
+ if( sqliteExprResolveIds(pParse, pTabList, 0, pEList->a[i].pExpr) ){
+ goto select_end;
+ }
+ if( sqliteExprCheck(pParse, pEList->a[i].pExpr, 1, &isAgg) ){
+ goto select_end;
+ }
+ }
+ if( pWhere ){
+ if( sqliteExprResolveIds(pParse, pTabList, pEList, pWhere) ){
+ goto select_end;
+ }
+ if( sqliteExprCheck(pParse, pWhere, 0, 0) ){
+ goto select_end;
+ }
+ }
+ if( pHaving ){
+ if( pGroupBy==0 ){
+ sqliteErrorMsg(pParse, "a GROUP BY clause is required before HAVING");
+ goto select_end;
+ }
+ if( sqliteExprResolveIds(pParse, pTabList, pEList, pHaving) ){
+ goto select_end;
+ }
+ if( sqliteExprCheck(pParse, pHaving, 1, &isAgg) ){
+ goto select_end;
+ }
+ }
+ if( pOrderBy ){
+ for(i=0; i<pOrderBy->nExpr; i++){
+ int iCol;
+ Expr *pE = pOrderBy->a[i].pExpr;
+ if( sqliteExprIsInteger(pE, &iCol) && iCol>0 && iCol<=pEList->nExpr ){
+ sqliteExprDelete(pE);
+ pE = pOrderBy->a[i].pExpr = sqliteExprDup(pEList->a[iCol-1].pExpr);
+ }
+ if( sqliteExprResolveIds(pParse, pTabList, pEList, pE) ){
+ goto select_end;
+ }
+ if( sqliteExprCheck(pParse, pE, isAgg, 0) ){
+ goto select_end;
+ }
+ if( sqliteExprIsConstant(pE) ){
+ if( sqliteExprIsInteger(pE, &iCol)==0 ){
+ sqliteErrorMsg(pParse,
+ "ORDER BY terms must not be non-integer constants");
+ goto select_end;
+ }else if( iCol<=0 || iCol>pEList->nExpr ){
+ sqliteErrorMsg(pParse,
+ "ORDER BY column number %d out of range - should be "
+ "between 1 and %d", iCol, pEList->nExpr);
+ goto select_end;
+ }
+ }
+ }
+ }
+ if( pGroupBy ){
+ for(i=0; i<pGroupBy->nExpr; i++){
+ int iCol;
+ Expr *pE = pGroupBy->a[i].pExpr;
+ if( sqliteExprIsInteger(pE, &iCol) && iCol>0 && iCol<=pEList->nExpr ){
+ sqliteExprDelete(pE);
+ pE = pGroupBy->a[i].pExpr = sqliteExprDup(pEList->a[iCol-1].pExpr);
+ }
+ if( sqliteExprResolveIds(pParse, pTabList, pEList, pE) ){
+ goto select_end;
+ }
+ if( sqliteExprCheck(pParse, pE, isAgg, 0) ){
+ goto select_end;
+ }
+ if( sqliteExprIsConstant(pE) ){
+ if( sqliteExprIsInteger(pE, &iCol)==0 ){
+ sqliteErrorMsg(pParse,
+ "GROUP BY terms must not be non-integer constants");
+ goto select_end;
+ }else if( iCol<=0 || iCol>pEList->nExpr ){
+ sqliteErrorMsg(pParse,
+ "GROUP BY column number %d out of range - should be "
+ "between 1 and %d", iCol, pEList->nExpr);
+ goto select_end;
+ }
+ }
+ }
+ }
+
+ /* Begin generating code.
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) goto select_end;
+
+ /* Identify column names if we will be using them in a callback. This
+ ** step is skipped if the output is going to some other destination.
+ */
+ if( eDest==SRT_Callback ){
+ generateColumnNames(pParse, pTabList, pEList);
+ }
+
+ /* Generate code for all sub-queries in the FROM clause
+ */
+ for(i=0; i<pTabList->nSrc; i++){
+ const char *zSavedAuthContext;
+ int needRestoreContext;
+
+ if( pTabList->a[i].pSelect==0 ) continue;
+ if( pTabList->a[i].zName!=0 ){
+ zSavedAuthContext = pParse->zAuthContext;
+ pParse->zAuthContext = pTabList->a[i].zName;
+ needRestoreContext = 1;
+ }else{
+ needRestoreContext = 0;
+ }
+ sqliteSelect(pParse, pTabList->a[i].pSelect, SRT_TempTable,
+ pTabList->a[i].iCursor, p, i, &isAgg);
+ if( needRestoreContext ){
+ pParse->zAuthContext = zSavedAuthContext;
+ }
+ pTabList = p->pSrc;
+ pWhere = p->pWhere;
+ if( eDest!=SRT_Union && eDest!=SRT_Except && eDest!=SRT_Discard ){
+ pOrderBy = p->pOrderBy;
+ }
+ pGroupBy = p->pGroupBy;
+ pHaving = p->pHaving;
+ isDistinct = p->isDistinct;
+ }
+
+ /* Check for the special case of a min() or max() function by itself
+ ** in the result set.
+ */
+ if( simpleMinMaxQuery(pParse, p, eDest, iParm) ){
+ rc = 0;
+ goto select_end;
+ }
+
+ /* Check to see if this is a subquery that can be "flattened" into its parent.
+ ** If flattening is a possiblity, do so and return immediately.
+ */
+ if( pParent && pParentAgg &&
+ flattenSubquery(pParse, pParent, parentTab, *pParentAgg, isAgg) ){
+ if( isAgg ) *pParentAgg = 1;
+ return rc;
+ }
+
+ /* Set the limiter.
+ */
+ computeLimitRegisters(pParse, p);
+
+ /* Identify column types if we will be using a callback. This
+ ** step is skipped if the output is going to a destination other
+ ** than a callback.
+ **
+ ** We have to do this separately from the creation of column names
+ ** above because if the pTabList contains views then they will not
+ ** have been resolved and we will not know the column types until
+ ** now.
+ */
+ if( eDest==SRT_Callback ){
+ generateColumnTypes(pParse, pTabList, pEList);
+ }
+
+ /* If the output is destined for a temporary table, open that table.
+ */
+ if( eDest==SRT_TempTable ){
+ sqliteVdbeAddOp(v, OP_OpenTemp, iParm, 0);
+ }
+
+ /* Do an analysis of aggregate expressions.
+ */
+ sqliteAggregateInfoReset(pParse);
+ if( isAgg || pGroupBy ){
+ assert( pParse->nAgg==0 );
+ isAgg = 1;
+ for(i=0; i<pEList->nExpr; i++){
+ if( sqliteExprAnalyzeAggregates(pParse, pEList->a[i].pExpr) ){
+ goto select_end;
+ }
+ }
+ if( pGroupBy ){
+ for(i=0; i<pGroupBy->nExpr; i++){
+ if( sqliteExprAnalyzeAggregates(pParse, pGroupBy->a[i].pExpr) ){
+ goto select_end;
+ }
+ }
+ }
+ if( pHaving && sqliteExprAnalyzeAggregates(pParse, pHaving) ){
+ goto select_end;
+ }
+ if( pOrderBy ){
+ for(i=0; i<pOrderBy->nExpr; i++){
+ if( sqliteExprAnalyzeAggregates(pParse, pOrderBy->a[i].pExpr) ){
+ goto select_end;
+ }
+ }
+ }
+ }
+
+ /* Reset the aggregator
+ */
+ if( isAgg ){
+ sqliteVdbeAddOp(v, OP_AggReset, 0, pParse->nAgg);
+ for(i=0; i<pParse->nAgg; i++){
+ FuncDef *pFunc;
+ if( (pFunc = pParse->aAgg[i].pFunc)!=0 && pFunc->xFinalize!=0 ){
+ sqliteVdbeOp3(v, OP_AggInit, 0, i, (char*)pFunc, P3_POINTER);
+ }
+ }
+ if( pGroupBy==0 ){
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_AggFocus, 0, 0);
+ }
+ }
+
+ /* Initialize the memory cell to NULL
+ */
+ if( eDest==SRT_Mem ){
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, iParm, 1);
+ }
+
+ /* Open a temporary table to use for the distinct set.
+ */
+ if( isDistinct ){
+ distinct = pParse->nTab++;
+ sqliteVdbeAddOp(v, OP_OpenTemp, distinct, 1);
+ }else{
+ distinct = -1;
+ }
+
+ /* Begin the database scan
+ */
+ pWInfo = sqliteWhereBegin(pParse, pTabList, pWhere, 0,
+ pGroupBy ? 0 : &pOrderBy);
+ if( pWInfo==0 ) goto select_end;
+
+ /* Use the standard inner loop if we are not dealing with
+ ** aggregates
+ */
+ if( !isAgg ){
+ if( selectInnerLoop(pParse, p, pEList, 0, 0, pOrderBy, distinct, eDest,
+ iParm, pWInfo->iContinue, pWInfo->iBreak) ){
+ goto select_end;
+ }
+ }
+
+ /* If we are dealing with aggregates, then do the special aggregate
+ ** processing.
+ */
+ else{
+ AggExpr *pAgg;
+ if( pGroupBy ){
+ int lbl1;
+ for(i=0; i<pGroupBy->nExpr; i++){
+ sqliteExprCode(pParse, pGroupBy->a[i].pExpr);
+ }
+ sqliteVdbeAddOp(v, OP_MakeKey, pGroupBy->nExpr, 0);
+ if( pParse->db->file_format>=4 ) sqliteAddKeyType(v, pGroupBy);
+ lbl1 = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_AggFocus, 0, lbl1);
+ for(i=0, pAgg=pParse->aAgg; i<pParse->nAgg; i++, pAgg++){
+ if( pAgg->isAgg ) continue;
+ sqliteExprCode(pParse, pAgg->pExpr);
+ sqliteVdbeAddOp(v, OP_AggSet, 0, i);
+ }
+ sqliteVdbeResolveLabel(v, lbl1);
+ }
+ for(i=0, pAgg=pParse->aAgg; i<pParse->nAgg; i++, pAgg++){
+ Expr *pE;
+ int nExpr;
+ FuncDef *pDef;
+ if( !pAgg->isAgg ) continue;
+ assert( pAgg->pFunc!=0 );
+ assert( pAgg->pFunc->xStep!=0 );
+ pDef = pAgg->pFunc;
+ pE = pAgg->pExpr;
+ assert( pE!=0 );
+ assert( pE->op==TK_AGG_FUNCTION );
+ nExpr = sqliteExprCodeExprList(pParse, pE->pList, pDef->includeTypes);
+ sqliteVdbeAddOp(v, OP_Integer, i, 0);
+ sqliteVdbeOp3(v, OP_AggFunc, 0, nExpr, (char*)pDef, P3_POINTER);
+ }
+ }
+
+ /* End the database scan loop.
+ */
+ sqliteWhereEnd(pWInfo);
+
+ /* If we are processing aggregates, we need to set up a second loop
+ ** over all of the aggregate values and process them.
+ */
+ if( isAgg ){
+ int endagg = sqliteVdbeMakeLabel(v);
+ int startagg;
+ startagg = sqliteVdbeAddOp(v, OP_AggNext, 0, endagg);
+ pParse->useAgg = 1;
+ if( pHaving ){
+ sqliteExprIfFalse(pParse, pHaving, startagg, 1);
+ }
+ if( selectInnerLoop(pParse, p, pEList, 0, 0, pOrderBy, distinct, eDest,
+ iParm, startagg, endagg) ){
+ goto select_end;
+ }
+ sqliteVdbeAddOp(v, OP_Goto, 0, startagg);
+ sqliteVdbeResolveLabel(v, endagg);
+ sqliteVdbeAddOp(v, OP_Noop, 0, 0);
+ pParse->useAgg = 0;
+ }
+
+ /* If there is an ORDER BY clause, then we need to sort the results
+ ** and send them to the callback one by one.
+ */
+ if( pOrderBy ){
+ generateSortTail(p, v, pEList->nExpr, eDest, iParm);
+ }
+
+ /* If this was a subquery, we have now converted the subquery into a
+ ** temporary table. So delete the subquery structure from the parent
+ ** to prevent this subquery from being evaluated again and to force the
+ ** the use of the temporary table.
+ */
+ if( pParent ){
+ assert( pParent->pSrc->nSrc>parentTab );
+ assert( pParent->pSrc->a[parentTab].pSelect==p );
+ sqliteSelectDelete(p);
+ pParent->pSrc->a[parentTab].pSelect = 0;
+ }
+
+ /* The SELECT was successfully coded. Set the return code to 0
+ ** to indicate no errors.
+ */
+ rc = 0;
+
+ /* Control jumps to here if an error is encountered above, or upon
+ ** successful coding of the SELECT.
+ */
+select_end:
+ sqliteAggregateInfoReset(pParse);
+ return rc;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/shell.c b/usr/src/cmd/svc/configd/sqlite/src/shell.c
new file mode 100644
index 0000000000..fe1291e08c
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/shell.c
@@ -0,0 +1,1364 @@
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code to implement the "sqlite" command line
+** utility for accessing SQLite databases.
+**
+** $Id: shell.c,v 1.93 2004/03/17 23:42:13 drh Exp $
+*/
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "sqlite.h"
+#include "sqlite-misc.h" /* SUNW addition */
+#include <ctype.h>
+
+#if !defined(_WIN32) && !defined(WIN32) && !defined(__MACOS__)
+# include <signal.h>
+# include <pwd.h>
+# include <unistd.h>
+# include <sys/types.h>
+#endif
+
+#ifdef __MACOS__
+# include <console.h>
+# include <signal.h>
+# include <unistd.h>
+# include <extras.h>
+# include <Files.h>
+# include <Folders.h>
+#endif
+
+#if defined(HAVE_READLINE) && HAVE_READLINE==1
+# include <readline/readline.h>
+# include <readline/history.h>
+#else
+# define readline(p) local_getline(p,stdin)
+# define add_history(X)
+# define read_history(X)
+# define write_history(X)
+# define stifle_history(X)
+#endif
+
+/* Make sure isatty() has a prototype.
+*/
+extern int isatty();
+
+/*
+** The following is the open SQLite database. We make a pointer
+** to this database a static variable so that it can be accessed
+** by the SIGINT handler to interrupt database processing.
+*/
+static sqlite *db = 0;
+
+/*
+** True if an interrupt (Control-C) has been received.
+*/
+static int seenInterrupt = 0;
+
+/*
+** This is the name of our program. It is set in main(), used
+** in a number of other places, mostly for error messages.
+*/
+static char *Argv0;
+
+/*
+** Prompt strings. Initialized in main. Settable with
+** .prompt main continue
+*/
+static char mainPrompt[20]; /* First line prompt. default: "sqlite> "*/
+static char continuePrompt[20]; /* Continuation prompt. default: " ...> " */
+
+
+/*
+** Determines if a string is a number of not.
+*/
+extern int sqliteIsNumber(const char*);
+
+/*
+** This routine reads a line of text from standard input, stores
+** the text in memory obtained from malloc() and returns a pointer
+** to the text. NULL is returned at end of file, or if malloc()
+** fails.
+**
+** The interface is like "readline" but no command-line editing
+** is done.
+*/
+static char *local_getline(char *zPrompt, FILE *in){
+ char *zLine;
+ int nLine;
+ int n;
+ int eol;
+
+ if( zPrompt && *zPrompt ){
+ printf("%s",zPrompt);
+ fflush(stdout);
+ }
+ nLine = 100;
+ zLine = malloc( nLine );
+ if( zLine==0 ) return 0;
+ n = 0;
+ eol = 0;
+ while( !eol ){
+ if( n+100>nLine ){
+ nLine = nLine*2 + 100;
+ zLine = realloc(zLine, nLine);
+ if( zLine==0 ) return 0;
+ }
+ if( fgets(&zLine[n], nLine - n, in)==0 ){
+ if( n==0 ){
+ free(zLine);
+ return 0;
+ }
+ zLine[n] = 0;
+ eol = 1;
+ break;
+ }
+ while( zLine[n] ){ n++; }
+ if( n>0 && zLine[n-1]=='\n' ){
+ n--;
+ zLine[n] = 0;
+ eol = 1;
+ }
+ }
+ zLine = realloc( zLine, n+1 );
+ return zLine;
+}
+
+/*
+** Retrieve a single line of input text. "isatty" is true if text
+** is coming from a terminal. In that case, we issue a prompt and
+** attempt to use "readline" for command-line editing. If "isatty"
+** is false, use "local_getline" instead of "readline" and issue no prompt.
+**
+** zPrior is a string of prior text retrieved. If not the empty
+** string, then issue a continuation prompt.
+*/
+static char *one_input_line(const char *zPrior, FILE *in){
+ char *zPrompt;
+ char *zResult;
+ if( in!=0 ){
+ return local_getline(0, in);
+ }
+ if( zPrior && zPrior[0] ){
+ zPrompt = continuePrompt;
+ }else{
+ zPrompt = mainPrompt;
+ }
+ zResult = readline(zPrompt);
+ if( zResult ) add_history(zResult);
+ return zResult;
+}
+
+struct previous_mode_data {
+ int valid; /* Is there legit data in here? */
+ int mode;
+ int showHeader;
+ int colWidth[100];
+};
+/*
+** An pointer to an instance of this structure is passed from
+** the main program to the callback. This is used to communicate
+** state and mode information.
+*/
+struct callback_data {
+ sqlite *db; /* The database */
+ int echoOn; /* True to echo input commands */
+ int cnt; /* Number of records displayed so far */
+ FILE *out; /* Write results here */
+ int mode; /* An output mode setting */
+ int showHeader; /* True to show column names in List or Column mode */
+ char *zDestTable; /* Name of destination table when MODE_Insert */
+ char separator[20]; /* Separator character for MODE_List */
+ int colWidth[100]; /* Requested width of each column when in column mode*/
+ int actualWidth[100]; /* Actual width of each column */
+ char nullvalue[20]; /* The text to print when a NULL comes back from
+ ** the database */
+ struct previous_mode_data explainPrev;
+ /* Holds the mode information just before
+ ** .explain ON */
+ char outfile[FILENAME_MAX]; /* Filename for *out */
+ const char *zDbFilename; /* name of the database file */
+ char *zKey; /* Encryption key */
+};
+
+/*
+** These are the allowed modes.
+*/
+#define MODE_Line 0 /* One column per line. Blank line between records */
+#define MODE_Column 1 /* One record per line in neat columns */
+#define MODE_List 2 /* One record per line with a separator */
+#define MODE_Semi 3 /* Same as MODE_List but append ";" to each line */
+#define MODE_Html 4 /* Generate an XHTML table */
+#define MODE_Insert 5 /* Generate SQL "insert" statements */
+#define MODE_NUM_OF 6 /* The number of modes (not a mode itself) */
+
+char *modeDescr[MODE_NUM_OF] = {
+ "line",
+ "column",
+ "list",
+ "semi",
+ "html",
+ "insert"
+};
+
+/*
+** Number of elements in an array
+*/
+#define ArraySize(X) (sizeof(X)/sizeof(X[0]))
+
+/*
+** Output the given string as a quoted string using SQL quoting conventions.
+*/
+static void output_quoted_string(FILE *out, const char *z){
+ int i;
+ int nSingle = 0;
+ for(i=0; z[i]; i++){
+ if( z[i]=='\'' ) nSingle++;
+ }
+ if( nSingle==0 ){
+ fprintf(out,"'%s'",z);
+ }else{
+ fprintf(out,"'");
+ while( *z ){
+ for(i=0; z[i] && z[i]!='\''; i++){}
+ if( i==0 ){
+ fprintf(out,"''");
+ z++;
+ }else if( z[i]=='\'' ){
+ fprintf(out,"%.*s''",i,z);
+ z += i+1;
+ }else{
+ fprintf(out,"%s",z);
+ break;
+ }
+ }
+ fprintf(out,"'");
+ }
+}
+
+/*
+** Output the given string with characters that are special to
+** HTML escaped.
+*/
+static void output_html_string(FILE *out, const char *z){
+ int i;
+ while( *z ){
+ for(i=0; z[i] && z[i]!='<' && z[i]!='&'; i++){}
+ if( i>0 ){
+ fprintf(out,"%.*s",i,z);
+ }
+ if( z[i]=='<' ){
+ fprintf(out,"&lt;");
+ }else if( z[i]=='&' ){
+ fprintf(out,"&amp;");
+ }else{
+ break;
+ }
+ z += i + 1;
+ }
+}
+
+/*
+** This routine runs when the user presses Ctrl-C
+*/
+static void interrupt_handler(int NotUsed){
+ seenInterrupt = 1;
+ if( db ) sqlite_interrupt(db);
+}
+
+/*
+** This is the callback routine that the SQLite library
+** invokes for each row of a query result.
+*/
+static int callback(void *pArg, int nArg, char **azArg, char **azCol){
+ int i;
+ struct callback_data *p = (struct callback_data*)pArg;
+ switch( p->mode ){
+ case MODE_Line: {
+ int w = 5;
+ if( azArg==0 ) break;
+ for(i=0; i<nArg; i++){
+ int len = strlen(azCol[i]);
+ if( len>w ) w = len;
+ }
+ if( p->cnt++>0 ) fprintf(p->out,"\n");
+ for(i=0; i<nArg; i++){
+ fprintf(p->out,"%*s = %s\n", w, azCol[i],
+ azArg[i] ? azArg[i] : p->nullvalue);
+ }
+ break;
+ }
+ case MODE_Column: {
+ if( p->cnt++==0 ){
+ for(i=0; i<nArg; i++){
+ int w, n;
+ if( i<ArraySize(p->colWidth) ){
+ w = p->colWidth[i];
+ }else{
+ w = 0;
+ }
+ if( w<=0 ){
+ w = strlen(azCol[i] ? azCol[i] : "");
+ if( w<10 ) w = 10;
+ n = strlen(azArg && azArg[i] ? azArg[i] : p->nullvalue);
+ if( w<n ) w = n;
+ }
+ if( i<ArraySize(p->actualWidth) ){
+ p->actualWidth[i] = w;
+ }
+ if( p->showHeader ){
+ fprintf(p->out,"%-*.*s%s",w,w,azCol[i], i==nArg-1 ? "\n": " ");
+ }
+ }
+ if( p->showHeader ){
+ for(i=0; i<nArg; i++){
+ int w;
+ if( i<ArraySize(p->actualWidth) ){
+ w = p->actualWidth[i];
+ }else{
+ w = 10;
+ }
+ fprintf(p->out,"%-*.*s%s",w,w,"-----------------------------------"
+ "----------------------------------------------------------",
+ i==nArg-1 ? "\n": " ");
+ }
+ }
+ }
+ if( azArg==0 ) break;
+ for(i=0; i<nArg; i++){
+ int w;
+ if( i<ArraySize(p->actualWidth) ){
+ w = p->actualWidth[i];
+ }else{
+ w = 10;
+ }
+ fprintf(p->out,"%-*.*s%s",w,w,
+ azArg[i] ? azArg[i] : p->nullvalue, i==nArg-1 ? "\n": " ");
+ }
+ break;
+ }
+ case MODE_Semi:
+ case MODE_List: {
+ if( p->cnt++==0 && p->showHeader ){
+ for(i=0; i<nArg; i++){
+ fprintf(p->out,"%s%s",azCol[i], i==nArg-1 ? "\n" : p->separator);
+ }
+ }
+ if( azArg==0 ) break;
+ for(i=0; i<nArg; i++){
+ char *z = azArg[i];
+ if( z==0 ) z = p->nullvalue;
+ fprintf(p->out, "%s", z);
+ if( i<nArg-1 ){
+ fprintf(p->out, "%s", p->separator);
+ }else if( p->mode==MODE_Semi ){
+ fprintf(p->out, ";\n");
+ }else{
+ fprintf(p->out, "\n");
+ }
+ }
+ break;
+ }
+ case MODE_Html: {
+ if( p->cnt++==0 && p->showHeader ){
+ fprintf(p->out,"<TR>");
+ for(i=0; i<nArg; i++){
+ fprintf(p->out,"<TH>%s</TH>",azCol[i]);
+ }
+ fprintf(p->out,"</TR>\n");
+ }
+ if( azArg==0 ) break;
+ fprintf(p->out,"<TR>");
+ for(i=0; i<nArg; i++){
+ fprintf(p->out,"<TD>");
+ output_html_string(p->out, azArg[i] ? azArg[i] : p->nullvalue);
+ fprintf(p->out,"</TD>\n");
+ }
+ fprintf(p->out,"</TR>\n");
+ break;
+ }
+ case MODE_Insert: {
+ if( azArg==0 ) break;
+ fprintf(p->out,"INSERT INTO %s VALUES(",p->zDestTable);
+ for(i=0; i<nArg; i++){
+ char *zSep = i>0 ? ",": "";
+ if( azArg[i]==0 ){
+ fprintf(p->out,"%sNULL",zSep);
+ }else if( sqliteIsNumber(azArg[i]) ){
+ fprintf(p->out,"%s%s",zSep, azArg[i]);
+ }else{
+ if( zSep[0] ) fprintf(p->out,"%s",zSep);
+ output_quoted_string(p->out, azArg[i]);
+ }
+ }
+ fprintf(p->out,");\n");
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+** Set the destination table field of the callback_data structure to
+** the name of the table given. Escape any quote characters in the
+** table name.
+*/
+static void set_table_name(struct callback_data *p, const char *zName){
+ int i, n;
+ int needQuote;
+ char *z;
+
+ if( p->zDestTable ){
+ free(p->zDestTable);
+ p->zDestTable = 0;
+ }
+ if( zName==0 ) return;
+ needQuote = !isalpha(*zName) && *zName!='_';
+ for(i=n=0; zName[i]; i++, n++){
+ if( !isalnum(zName[i]) && zName[i]!='_' ){
+ needQuote = 1;
+ if( zName[i]=='\'' ) n++;
+ }
+ }
+ if( needQuote ) n += 2;
+ z = p->zDestTable = malloc( n+1 );
+ if( z==0 ){
+ fprintf(stderr,"Out of memory!\n");
+ exit(1);
+ }
+ n = 0;
+ if( needQuote ) z[n++] = '\'';
+ for(i=0; zName[i]; i++){
+ z[n++] = zName[i];
+ if( zName[i]=='\'' ) z[n++] = '\'';
+ }
+ if( needQuote ) z[n++] = '\'';
+ z[n] = 0;
+}
+
+/*
+** This is a different callback routine used for dumping the database.
+** Each row received by this callback consists of a table name,
+** the table type ("index" or "table") and SQL to create the table.
+** This routine should print text sufficient to recreate the table.
+*/
+static int dump_callback(void *pArg, int nArg, char **azArg, char **azCol){
+ struct callback_data *p = (struct callback_data *)pArg;
+ if( nArg!=3 ) return 1;
+ fprintf(p->out, "%s;\n", azArg[2]);
+ if( strcmp(azArg[1],"table")==0 ){
+ struct callback_data d2;
+ d2 = *p;
+ d2.mode = MODE_Insert;
+ d2.zDestTable = 0;
+ set_table_name(&d2, azArg[0]);
+ sqlite_exec_printf(p->db,
+ "SELECT * FROM '%q'",
+ callback, &d2, 0, azArg[0]
+ );
+ set_table_name(&d2, 0);
+ }
+ return 0;
+}
+
+/*
+** Text of a help message
+*/
+static char zHelp[] =
+ ".databases List names and files of attached databases\n"
+ ".dump ?TABLE? ... Dump the database in a text format\n"
+ ".echo ON|OFF Turn command echo on or off\n"
+ ".exit Exit this program\n"
+ ".explain ON|OFF Turn output mode suitable for EXPLAIN on or off.\n"
+ ".header(s) ON|OFF Turn display of headers on or off\n"
+ ".help Show this message\n"
+ ".indices TABLE Show names of all indices on TABLE\n"
+ ".mode MODE Set mode to one of \"line(s)\", \"column(s)\", \n"
+ " \"insert\", \"list\", or \"html\"\n"
+ ".mode insert TABLE Generate SQL insert statements for TABLE\n"
+ ".nullvalue STRING Print STRING instead of nothing for NULL data\n"
+ ".output FILENAME Send output to FILENAME\n"
+ ".output stdout Send output to the screen\n"
+ ".prompt MAIN CONTINUE Replace the standard prompts\n"
+ ".quit Exit this program\n"
+ ".read FILENAME Execute SQL in FILENAME\n"
+#ifdef SQLITE_HAS_CODEC
+ ".rekey OLD NEW NEW Change the encryption key\n"
+#endif
+ ".schema ?TABLE? Show the CREATE statements\n"
+ ".separator STRING Change separator string for \"list\" mode\n"
+ ".show Show the current values for various settings\n"
+ ".tables ?PATTERN? List names of tables matching a pattern\n"
+ ".timeout MS Try opening locked tables for MS milliseconds\n"
+ ".width NUM NUM ... Set column widths for \"column\" mode\n"
+;
+
+/* Forward reference */
+static void process_input(struct callback_data *p, FILE *in);
+
+/*
+** Make sure the database is open. If it is not, then open it. If
+** the database fails to open, print an error message and exit.
+*/
+static void open_db(struct callback_data *p){
+ if( p->db==0 ){
+ char *zErrMsg = 0;
+#ifdef SQLITE_HAS_CODEC
+ int n = p->zKey ? strlen(p->zKey) : 0;
+ db = p->db = sqlite_open_encrypted(p->zDbFilename, p->zKey, n, 0, &zErrMsg);
+#else
+ db = p->db = sqlite_open(p->zDbFilename, 0, &zErrMsg);
+#endif
+ if( p->db==0 ){
+ if( zErrMsg ){
+ fprintf(stderr,"Unable to open database \"%s\": %s\n",
+ p->zDbFilename, zErrMsg);
+ }else{
+ fprintf(stderr,"Unable to open database %s\n", p->zDbFilename);
+ }
+ exit(1);
+ }
+ }
+}
+
+/*
+** If an input line begins with "." then invoke this routine to
+** process that line.
+**
+** Return 1 to exit and 0 to continue.
+*/
+static int do_meta_command(char *zLine, struct callback_data *p){
+ int i = 1;
+ int nArg = 0;
+ int n, c;
+ int rc = 0;
+ char *azArg[50];
+
+ /* Parse the input line into tokens.
+ */
+ while( zLine[i] && nArg<ArraySize(azArg) ){
+ while( isspace(zLine[i]) ){ i++; }
+ if( zLine[i]==0 ) break;
+ if( zLine[i]=='\'' || zLine[i]=='"' ){
+ int delim = zLine[i++];
+ azArg[nArg++] = &zLine[i];
+ while( zLine[i] && zLine[i]!=delim ){ i++; }
+ if( zLine[i]==delim ){
+ zLine[i++] = 0;
+ }
+ }else{
+ azArg[nArg++] = &zLine[i];
+ while( zLine[i] && !isspace(zLine[i]) ){ i++; }
+ if( zLine[i] ) zLine[i++] = 0;
+ }
+ }
+
+ /* Process the input line.
+ */
+ if( nArg==0 ) return rc;
+ n = strlen(azArg[0]);
+ c = azArg[0][0];
+ if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 ){
+ struct callback_data data;
+ char *zErrMsg = 0;
+ open_db(p);
+ memcpy(&data, p, sizeof(data));
+ data.showHeader = 1;
+ data.mode = MODE_Column;
+ data.colWidth[0] = 3;
+ data.colWidth[1] = 15;
+ data.colWidth[2] = 58;
+ sqlite_exec(p->db, "PRAGMA database_list; ", callback, &data, &zErrMsg);
+ if( zErrMsg ){
+ fprintf(stderr,"Error: %s\n", zErrMsg);
+ sqlite_freemem(zErrMsg);
+ }
+ }else
+
+ if( c=='d' && strncmp(azArg[0], "dump", n)==0 ){
+ char *zErrMsg = 0;
+ open_db(p);
+ fprintf(p->out, "BEGIN TRANSACTION;\n");
+ if( nArg==1 ){
+ sqlite_exec(p->db,
+ "SELECT name, type, sql FROM sqlite_master "
+ "WHERE type!='meta' AND sql NOT NULL "
+ "ORDER BY substr(type,2,1), name",
+ dump_callback, p, &zErrMsg
+ );
+ }else{
+ int i;
+ for(i=1; i<nArg && zErrMsg==0; i++){
+ sqlite_exec_printf(p->db,
+ "SELECT name, type, sql FROM sqlite_master "
+ "WHERE tbl_name LIKE '%q' AND type!='meta' AND sql NOT NULL "
+ "ORDER BY substr(type,2,1), name",
+ dump_callback, p, &zErrMsg, azArg[i]
+ );
+ }
+ }
+ if( zErrMsg ){
+ fprintf(stderr,"Error: %s\n", zErrMsg);
+ sqlite_freemem(zErrMsg);
+ }else{
+ fprintf(p->out, "COMMIT;\n");
+ }
+ }else
+
+ if( c=='e' && strncmp(azArg[0], "echo", n)==0 && nArg>1 ){
+ int j;
+ char *z = azArg[1];
+ int val = atoi(azArg[1]);
+ for(j=0; z[j]; j++){
+ if( isupper(z[j]) ) z[j] = tolower(z[j]);
+ }
+ if( strcmp(z,"on")==0 ){
+ val = 1;
+ }else if( strcmp(z,"yes")==0 ){
+ val = 1;
+ }
+ p->echoOn = val;
+ }else
+
+ if( c=='e' && strncmp(azArg[0], "exit", n)==0 ){
+ rc = 1;
+ }else
+
+ if( c=='e' && strncmp(azArg[0], "explain", n)==0 ){
+ int j;
+ char *z = nArg>=2 ? azArg[1] : "1";
+ int val = atoi(z);
+ for(j=0; z[j]; j++){
+ if( isupper(z[j]) ) z[j] = tolower(z[j]);
+ }
+ if( strcmp(z,"on")==0 ){
+ val = 1;
+ }else if( strcmp(z,"yes")==0 ){
+ val = 1;
+ }
+ if(val == 1) {
+ if(!p->explainPrev.valid) {
+ p->explainPrev.valid = 1;
+ p->explainPrev.mode = p->mode;
+ p->explainPrev.showHeader = p->showHeader;
+ memcpy(p->explainPrev.colWidth,p->colWidth,sizeof(p->colWidth));
+ }
+ /* We could put this code under the !p->explainValid
+ ** condition so that it does not execute if we are already in
+ ** explain mode. However, always executing it allows us an easy
+ ** was to reset to explain mode in case the user previously
+ ** did an .explain followed by a .width, .mode or .header
+ ** command.
+ */
+ p->mode = MODE_Column;
+ p->showHeader = 1;
+ memset(p->colWidth,0,ArraySize(p->colWidth));
+ p->colWidth[0] = 4;
+ p->colWidth[1] = 12;
+ p->colWidth[2] = 10;
+ p->colWidth[3] = 10;
+ p->colWidth[4] = 35;
+ }else if (p->explainPrev.valid) {
+ p->explainPrev.valid = 0;
+ p->mode = p->explainPrev.mode;
+ p->showHeader = p->explainPrev.showHeader;
+ memcpy(p->colWidth,p->explainPrev.colWidth,sizeof(p->colWidth));
+ }
+ }else
+
+ if( c=='h' && (strncmp(azArg[0], "header", n)==0
+ ||
+ strncmp(azArg[0], "headers", n)==0 )&& nArg>1 ){
+ int j;
+ char *z = azArg[1];
+ int val = atoi(azArg[1]);
+ for(j=0; z[j]; j++){
+ if( isupper(z[j]) ) z[j] = tolower(z[j]);
+ }
+ if( strcmp(z,"on")==0 ){
+ val = 1;
+ }else if( strcmp(z,"yes")==0 ){
+ val = 1;
+ }
+ p->showHeader = val;
+ }else
+
+ if( c=='h' && strncmp(azArg[0], "help", n)==0 ){
+ fprintf(stderr,zHelp);
+ }else
+
+ if( c=='i' && strncmp(azArg[0], "indices", n)==0 && nArg>1 ){
+ struct callback_data data;
+ char *zErrMsg = 0;
+ open_db(p);
+ memcpy(&data, p, sizeof(data));
+ data.showHeader = 0;
+ data.mode = MODE_List;
+ sqlite_exec_printf(p->db,
+ "SELECT name FROM sqlite_master "
+ "WHERE type='index' AND tbl_name LIKE '%q' "
+ "UNION ALL "
+ "SELECT name FROM sqlite_temp_master "
+ "WHERE type='index' AND tbl_name LIKE '%q' "
+ "ORDER BY 1",
+ callback, &data, &zErrMsg, azArg[1], azArg[1]
+ );
+ if( zErrMsg ){
+ fprintf(stderr,"Error: %s\n", zErrMsg);
+ sqlite_freemem(zErrMsg);
+ }
+ }else
+
+ if( c=='m' && strncmp(azArg[0], "mode", n)==0 && nArg>=2 ){
+ int n2 = strlen(azArg[1]);
+ if( strncmp(azArg[1],"line",n2)==0
+ ||
+ strncmp(azArg[1],"lines",n2)==0 ){
+ p->mode = MODE_Line;
+ }else if( strncmp(azArg[1],"column",n2)==0
+ ||
+ strncmp(azArg[1],"columns",n2)==0 ){
+ p->mode = MODE_Column;
+ }else if( strncmp(azArg[1],"list",n2)==0 ){
+ p->mode = MODE_List;
+ }else if( strncmp(azArg[1],"html",n2)==0 ){
+ p->mode = MODE_Html;
+ }else if( strncmp(azArg[1],"insert",n2)==0 ){
+ p->mode = MODE_Insert;
+ if( nArg>=3 ){
+ set_table_name(p, azArg[2]);
+ }else{
+ set_table_name(p, "table");
+ }
+ }else {
+ fprintf(stderr,"mode should be on of: column html insert line list\n");
+ }
+ }else
+
+ if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 && nArg==2 ) {
+ sprintf(p->nullvalue, "%.*s", (int)ArraySize(p->nullvalue)-1, azArg[1]);
+ }else
+
+ if( c=='o' && strncmp(azArg[0], "output", n)==0 && nArg==2 ){
+ if( p->out!=stdout ){
+ fclose(p->out);
+ }
+ if( strcmp(azArg[1],"stdout")==0 ){
+ p->out = stdout;
+ strcpy(p->outfile,"stdout");
+ }else{
+ p->out = fopen(azArg[1], "wb");
+ if( p->out==0 ){
+ fprintf(stderr,"can't write to \"%s\"\n", azArg[1]);
+ p->out = stdout;
+ } else {
+ strcpy(p->outfile,azArg[1]);
+ }
+ }
+ }else
+
+ if( c=='p' && strncmp(azArg[0], "prompt", n)==0 && (nArg==2 || nArg==3)){
+ if( nArg >= 2) {
+ strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1);
+ }
+ if( nArg >= 3) {
+ strncpy(continuePrompt,azArg[2],(int)ArraySize(continuePrompt)-1);
+ }
+ }else
+
+ if( c=='q' && strncmp(azArg[0], "quit", n)==0 ){
+ rc = 1;
+ }else
+
+ if( c=='r' && strncmp(azArg[0], "read", n)==0 && nArg==2 ){
+ FILE *alt = fopen(azArg[1], "rb");
+ if( alt==0 ){
+ fprintf(stderr,"can't open \"%s\"\n", azArg[1]);
+ }else{
+ process_input(p, alt);
+ fclose(alt);
+ }
+ }else
+
+#ifdef SQLITE_HAS_CODEC
+ if( c=='r' && strncmp(azArg[0],"rekey", n)==0 && nArg==4 ){
+ char *zOld = p->zKey;
+ if( zOld==0 ) zOld = "";
+ if( strcmp(azArg[1],zOld) ){
+ fprintf(stderr,"old key is incorrect\n");
+ }else if( strcmp(azArg[2], azArg[3]) ){
+ fprintf(stderr,"2nd copy of new key does not match the 1st\n");
+ }else{
+ sqlite_freemem(p->zKey);
+ p->zKey = sqlite_mprintf("%s", azArg[2]);
+ sqlite_rekey(p->db, p->zKey, strlen(p->zKey));
+ }
+ }else
+#endif
+
+ if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){
+ struct callback_data data;
+ char *zErrMsg = 0;
+ open_db(p);
+ memcpy(&data, p, sizeof(data));
+ data.showHeader = 0;
+ data.mode = MODE_Semi;
+ if( nArg>1 ){
+ extern int sqliteStrICmp(const char*,const char*);
+ if( sqliteStrICmp(azArg[1],"sqlite_master")==0 ){
+ char *new_argv[2], *new_colv[2];
+ new_argv[0] = "CREATE TABLE sqlite_master (\n"
+ " type text,\n"
+ " name text,\n"
+ " tbl_name text,\n"
+ " rootpage integer,\n"
+ " sql text\n"
+ ")";
+ new_argv[1] = 0;
+ new_colv[0] = "sql";
+ new_colv[1] = 0;
+ callback(&data, 1, new_argv, new_colv);
+ }else if( sqliteStrICmp(azArg[1],"sqlite_temp_master")==0 ){
+ char *new_argv[2], *new_colv[2];
+ new_argv[0] = "CREATE TEMP TABLE sqlite_temp_master (\n"
+ " type text,\n"
+ " name text,\n"
+ " tbl_name text,\n"
+ " rootpage integer,\n"
+ " sql text\n"
+ ")";
+ new_argv[1] = 0;
+ new_colv[0] = "sql";
+ new_colv[1] = 0;
+ callback(&data, 1, new_argv, new_colv);
+ }else{
+ sqlite_exec_printf(p->db,
+ "SELECT sql FROM "
+ " (SELECT * FROM sqlite_master UNION ALL"
+ " SELECT * FROM sqlite_temp_master) "
+ "WHERE tbl_name LIKE '%q' AND type!='meta' AND sql NOTNULL "
+ "ORDER BY substr(type,2,1), name",
+ callback, &data, &zErrMsg, azArg[1]);
+ }
+ }else{
+ sqlite_exec(p->db,
+ "SELECT sql FROM "
+ " (SELECT * FROM sqlite_master UNION ALL"
+ " SELECT * FROM sqlite_temp_master) "
+ "WHERE type!='meta' AND sql NOTNULL "
+ "ORDER BY substr(type,2,1), name",
+ callback, &data, &zErrMsg
+ );
+ }
+ if( zErrMsg ){
+ fprintf(stderr,"Error: %s\n", zErrMsg);
+ sqlite_freemem(zErrMsg);
+ }
+ }else
+
+ if( c=='s' && strncmp(azArg[0], "separator", n)==0 && nArg==2 ){
+ sprintf(p->separator, "%.*s", (int)ArraySize(p->separator)-1, azArg[1]);
+ }else
+
+ if( c=='s' && strncmp(azArg[0], "show", n)==0){
+ int i;
+ fprintf(p->out,"%9.9s: %s\n","echo", p->echoOn ? "on" : "off");
+ fprintf(p->out,"%9.9s: %s\n","explain", p->explainPrev.valid ? "on" :"off");
+ fprintf(p->out,"%9.9s: %s\n","headers", p->showHeader ? "on" : "off");
+ fprintf(p->out,"%9.9s: %s\n","mode", modeDescr[p->mode]);
+ fprintf(p->out,"%9.9s: %s\n","nullvalue", p->nullvalue);
+ fprintf(p->out,"%9.9s: %s\n","output",
+ strlen(p->outfile) ? p->outfile : "stdout");
+ fprintf(p->out,"%9.9s: %s\n","separator", p->separator);
+ fprintf(p->out,"%9.9s: ","width");
+ for (i=0;i<(int)ArraySize(p->colWidth) && p->colWidth[i] != 0;i++) {
+ fprintf(p->out,"%d ",p->colWidth[i]);
+ }
+ fprintf(p->out,"\n\n");
+ }else
+
+ if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 ){
+ char **azResult;
+ int nRow, rc;
+ char *zErrMsg;
+ open_db(p);
+ if( nArg==1 ){
+ rc = sqlite_get_table(p->db,
+ "SELECT name FROM sqlite_master "
+ "WHERE type IN ('table','view') "
+ "UNION ALL "
+ "SELECT name FROM sqlite_temp_master "
+ "WHERE type IN ('table','view') "
+ "ORDER BY 1",
+ &azResult, &nRow, 0, &zErrMsg
+ );
+ }else{
+ rc = sqlite_get_table_printf(p->db,
+ "SELECT name FROM sqlite_master "
+ "WHERE type IN ('table','view') AND name LIKE '%%%q%%' "
+ "UNION ALL "
+ "SELECT name FROM sqlite_temp_master "
+ "WHERE type IN ('table','view') AND name LIKE '%%%q%%' "
+ "ORDER BY 1",
+ &azResult, &nRow, 0, &zErrMsg, azArg[1], azArg[1]
+ );
+ }
+ if( zErrMsg ){
+ fprintf(stderr,"Error: %s\n", zErrMsg);
+ sqlite_freemem(zErrMsg);
+ }
+ if( rc==SQLITE_OK ){
+ int len, maxlen = 0;
+ int i, j;
+ int nPrintCol, nPrintRow;
+ for(i=1; i<=nRow; i++){
+ if( azResult[i]==0 ) continue;
+ len = strlen(azResult[i]);
+ if( len>maxlen ) maxlen = len;
+ }
+ nPrintCol = 80/(maxlen+2);
+ if( nPrintCol<1 ) nPrintCol = 1;
+ nPrintRow = (nRow + nPrintCol - 1)/nPrintCol;
+ for(i=0; i<nPrintRow; i++){
+ for(j=i+1; j<=nRow; j+=nPrintRow){
+ char *zSp = j<=nPrintRow ? "" : " ";
+ printf("%s%-*s", zSp, maxlen, azResult[j] ? azResult[j] : "");
+ }
+ printf("\n");
+ }
+ }
+ sqlite_free_table(azResult);
+ }else
+
+ if( c=='t' && n>1 && strncmp(azArg[0], "timeout", n)==0 && nArg>=2 ){
+ open_db(p);
+ sqlite_busy_timeout(p->db, atoi(azArg[1]));
+ }else
+
+ if( c=='w' && strncmp(azArg[0], "width", n)==0 ){
+ int j;
+ for(j=1; j<nArg && j<ArraySize(p->colWidth); j++){
+ p->colWidth[j-1] = atoi(azArg[j]);
+ }
+ }else
+
+ {
+ fprintf(stderr, "unknown command or invalid arguments: "
+ " \"%s\". Enter \".help\" for help\n", azArg[0]);
+ }
+
+ return rc;
+}
+
+/*
+** Return TRUE if the last non-whitespace character in z[] is a semicolon.
+** z[] is N characters long.
+*/
+static int _ends_with_semicolon(const char *z, int N){
+ while( N>0 && isspace(z[N-1]) ){ N--; }
+ return N>0 && z[N-1]==';';
+}
+
+/*
+** Test to see if a line consists entirely of whitespace.
+*/
+static int _all_whitespace(const char *z){
+ for(; *z; z++){
+ if( isspace(*z) ) continue;
+ if( *z=='/' && z[1]=='*' ){
+ z += 2;
+ while( *z && (*z!='*' || z[1]!='/') ){ z++; }
+ if( *z==0 ) return 0;
+ z++;
+ continue;
+ }
+ if( *z=='-' && z[1]=='-' ){
+ z += 2;
+ while( *z && *z!='\n' ){ z++; }
+ if( *z==0 ) return 1;
+ continue;
+ }
+ return 0;
+ }
+ return 1;
+}
+
+/*
+** Return TRUE if the line typed in is an SQL command terminator other
+** than a semi-colon. The SQL Server style "go" command is understood
+** as is the Oracle "/".
+*/
+static int _is_command_terminator(const char *zLine){
+ extern int sqliteStrNICmp(const char*,const char*,int);
+ while( isspace(*zLine) ){ zLine++; };
+ if( zLine[0]=='/' && _all_whitespace(&zLine[1]) ) return 1; /* Oracle */
+ if( sqliteStrNICmp(zLine,"go",2)==0 && _all_whitespace(&zLine[2]) ){
+ return 1; /* SQL Server */
+ }
+ return 0;
+}
+
+/*
+** Read input from *in and process it. If *in==0 then input
+** is interactive - the user is typing it it. Otherwise, input
+** is coming from a file or device. A prompt is issued and history
+** is saved only if input is interactive. An interrupt signal will
+** cause this routine to exit immediately, unless input is interactive.
+*/
+static void process_input(struct callback_data *p, FILE *in){
+ char *zLine;
+ char *zSql = 0;
+ int nSql = 0;
+ char *zErrMsg;
+ int rc;
+ while( fflush(p->out), (zLine = one_input_line(zSql, in))!=0 ){
+ if( seenInterrupt ){
+ if( in!=0 ) break;
+ seenInterrupt = 0;
+ }
+ if( p->echoOn ) printf("%s\n", zLine);
+ if( (zSql==0 || zSql[0]==0) && _all_whitespace(zLine) ) continue;
+ if( zLine && zLine[0]=='.' && nSql==0 ){
+ int rc = do_meta_command(zLine, p);
+ free(zLine);
+ if( rc ) break;
+ continue;
+ }
+ if( _is_command_terminator(zLine) ){
+ strcpy(zLine,";");
+ }
+ if( zSql==0 ){
+ int i;
+ for(i=0; zLine[i] && isspace(zLine[i]); i++){}
+ if( zLine[i]!=0 ){
+ nSql = strlen(zLine);
+ zSql = malloc( nSql+1 );
+ strcpy(zSql, zLine);
+ }
+ }else{
+ int len = strlen(zLine);
+ zSql = realloc( zSql, nSql + len + 2 );
+ if( zSql==0 ){
+ fprintf(stderr,"%s: out of memory!\n", Argv0);
+ exit(1);
+ }
+ strcpy(&zSql[nSql++], "\n");
+ strcpy(&zSql[nSql], zLine);
+ nSql += len;
+ }
+ free(zLine);
+ if( zSql && _ends_with_semicolon(zSql, nSql) && sqlite_complete(zSql) ){
+ p->cnt = 0;
+ open_db(p);
+ rc = sqlite_exec(p->db, zSql, callback, p, &zErrMsg);
+ if( rc || zErrMsg ){
+ if( in!=0 && !p->echoOn ) printf("%s\n",zSql);
+ if( zErrMsg!=0 ){
+ printf("SQL error: %s\n", zErrMsg);
+ sqlite_freemem(zErrMsg);
+ zErrMsg = 0;
+ }else{
+ printf("SQL error: %s\n", sqlite_error_string(rc));
+ }
+ }
+ free(zSql);
+ zSql = 0;
+ nSql = 0;
+ }
+ }
+ if( zSql ){
+ if( !_all_whitespace(zSql) ) printf("Incomplete SQL: %s\n", zSql);
+ free(zSql);
+ }
+}
+
+/*
+** Return a pathname which is the user's home directory. A
+** 0 return indicates an error of some kind. Space to hold the
+** resulting string is obtained from malloc(). The calling
+** function should free the result.
+*/
+static char *find_home_dir(void){
+ char *home_dir = NULL;
+
+#if !defined(_WIN32) && !defined(WIN32) && !defined(__MACOS__)
+ struct passwd *pwent;
+ uid_t uid = getuid();
+ if( (pwent=getpwuid(uid)) != NULL) {
+ home_dir = pwent->pw_dir;
+ }
+#endif
+
+#ifdef __MACOS__
+ char home_path[_MAX_PATH+1];
+ home_dir = getcwd(home_path, _MAX_PATH);
+#endif
+
+ if (!home_dir) {
+ home_dir = getenv("HOME");
+ if (!home_dir) {
+ home_dir = getenv("HOMEPATH"); /* Windows? */
+ }
+ }
+
+#if defined(_WIN32) || defined(WIN32)
+ if (!home_dir) {
+ home_dir = "c:";
+ }
+#endif
+
+ if( home_dir ){
+ char *z = malloc( strlen(home_dir)+1 );
+ if( z ) strcpy(z, home_dir);
+ home_dir = z;
+ }
+
+ return home_dir;
+}
+
+/*
+** Read input from the file given by sqliterc_override. Or if that
+** parameter is NULL, take input from ~/.sqliterc
+*/
+static void process_sqliterc(
+ struct callback_data *p, /* Configuration data */
+ const char *sqliterc_override /* Name of config file. NULL to use default */
+){
+ char *home_dir = NULL;
+ const char *sqliterc = sqliterc_override;
+ char *zBuf;
+ FILE *in = NULL;
+
+ if (sqliterc == NULL) {
+ home_dir = find_home_dir();
+ if( home_dir==0 ){
+ fprintf(stderr,"%s: cannot locate your home directory!\n", Argv0);
+ return;
+ }
+ zBuf = malloc(strlen(home_dir) + 15);
+ if( zBuf==0 ){
+ fprintf(stderr,"%s: out of memory!\n", Argv0);
+ exit(1);
+ }
+ sprintf(zBuf,"%s/.sqliterc",home_dir);
+ free(home_dir);
+ sqliterc = (const char*)zBuf;
+ }
+ in = fopen(sqliterc,"rb");
+ if( in ){
+ if( isatty(fileno(stdout)) ){
+ printf("Loading resources from %s\n",sqliterc);
+ }
+ process_input(p,in);
+ fclose(in);
+ }
+ return;
+}
+
+/*
+** Show available command line options
+*/
+static const char zOptions[] =
+ " -init filename read/process named file\n"
+ " -echo print commands before execution\n"
+ " -[no]header turn headers on or off\n"
+ " -column set output mode to 'column'\n"
+ " -html set output mode to HTML\n"
+#ifdef SQLITE_HAS_CODEC
+ " -key KEY encryption key\n"
+#endif
+ " -line set output mode to 'line'\n"
+ " -list set output mode to 'list'\n"
+ " -separator 'x' set output field separator (|)\n"
+ " -nullvalue 'text' set text string for NULL values\n"
+ " -version show SQLite version\n"
+ " -help show this text, also show dot-commands\n"
+;
+static void usage(int showDetail){
+ fprintf(stderr, "Usage: %s [OPTIONS] FILENAME [SQL]\n", Argv0);
+ if( showDetail ){
+ fprintf(stderr, "Options are:\n%s", zOptions);
+ }else{
+ fprintf(stderr, "Use the -help option for additional information\n");
+ }
+ exit(1);
+}
+
+/*
+** Initialize the state information in data
+*/
+void main_init(struct callback_data *data) {
+ memset(data, 0, sizeof(*data));
+ data->mode = MODE_List;
+ strcpy(data->separator,"|");
+ data->showHeader = 0;
+ strcpy(mainPrompt,"sqlite> ");
+ strcpy(continuePrompt," ...> ");
+}
+
+int main(int argc, char **argv){
+ char *zErrMsg = 0;
+ struct callback_data data;
+ const char *zInitFile = 0;
+ char *zFirstCmd = 0;
+ int i;
+ extern int sqliteOsFileExists(const char*);
+
+ sqlite_temp_directory = "/etc/svc/volatile"; /* SUNW addition */
+
+#ifdef __MACOS__
+ argc = ccommand(&argv);
+#endif
+
+ Argv0 = argv[0];
+ main_init(&data);
+
+ /* Make sure we have a valid signal handler early, before anything
+ ** else is done.
+ */
+#ifdef SIGINT
+ signal(SIGINT, interrupt_handler);
+#endif
+
+ /* Do an initial pass through the command-line argument to locate
+ ** the name of the database file, the name of the initialization file,
+ ** and the first command to execute.
+ */
+ for(i=1; i<argc-1; i++){
+ if( argv[i][0]!='-' ) break;
+ if( strcmp(argv[i],"-separator")==0 || strcmp(argv[i],"-nullvalue")==0 ){
+ i++;
+ }else if( strcmp(argv[i],"-init")==0 ){
+ i++;
+ zInitFile = argv[i];
+ }else if( strcmp(argv[i],"-key")==0 ){
+ i++;
+ data.zKey = sqlite_mprintf("%s",argv[i]);
+ }
+ }
+ if( i<argc ){
+ data.zDbFilename = argv[i++];
+ }else{
+ data.zDbFilename = ":memory:";
+ }
+ if( i<argc ){
+ zFirstCmd = argv[i++];
+ }
+ data.out = stdout;
+
+ /* Go ahead and open the database file if it already exists. If the
+ ** file does not exist, delay opening it. This prevents empty database
+ ** files from being created if a user mistypes the database name argument
+ ** to the sqlite command-line tool.
+ */
+ if( sqliteOsFileExists(data.zDbFilename) ){
+ open_db(&data);
+ }
+
+ /* Process the initialization file if there is one. If no -init option
+ ** is given on the command line, look for a file named ~/.sqliterc and
+ ** try to process it.
+ */
+ process_sqliterc(&data,zInitFile);
+
+ /* Make a second pass through the command-line argument and set
+ ** options. This second pass is delayed until after the initialization
+ ** file is processed so that the command-line arguments will override
+ ** settings in the initialization file.
+ */
+ for(i=1; i<argc && argv[i][0]=='-'; i++){
+ char *z = argv[i];
+ if( strcmp(z,"-init")==0 || strcmp(z,"-key")==0 ){
+ i++;
+ }else if( strcmp(z,"-html")==0 ){
+ data.mode = MODE_Html;
+ }else if( strcmp(z,"-list")==0 ){
+ data.mode = MODE_List;
+ }else if( strcmp(z,"-line")==0 ){
+ data.mode = MODE_Line;
+ }else if( strcmp(z,"-column")==0 ){
+ data.mode = MODE_Column;
+ }else if( strcmp(z,"-separator")==0 ){
+ i++;
+ sprintf(data.separator,"%.*s",(int)sizeof(data.separator)-1,argv[i]);
+ }else if( strcmp(z,"-nullvalue")==0 ){
+ i++;
+ sprintf(data.nullvalue,"%.*s",(int)sizeof(data.nullvalue)-1,argv[i]);
+ }else if( strcmp(z,"-header")==0 ){
+ data.showHeader = 1;
+ }else if( strcmp(z,"-noheader")==0 ){
+ data.showHeader = 0;
+ }else if( strcmp(z,"-echo")==0 ){
+ data.echoOn = 1;
+ }else if( strcmp(z,"-version")==0 ){
+ printf("%s\n", sqlite_version);
+ return 1;
+ }else if( strcmp(z,"-help")==0 ){
+ usage(1);
+ }else{
+ fprintf(stderr,"%s: unknown option: %s\n", Argv0, z);
+ fprintf(stderr,"Use -help for a list of options.\n");
+ return 1;
+ }
+ }
+
+ if( zFirstCmd ){
+ /* Run just the command that follows the database name
+ */
+ if( zFirstCmd[0]=='.' ){
+ do_meta_command(zFirstCmd, &data);
+ exit(0);
+ }else{
+ int rc;
+ open_db(&data);
+ rc = sqlite_exec(data.db, zFirstCmd, callback, &data, &zErrMsg);
+ if( rc!=0 && zErrMsg!=0 ){
+ fprintf(stderr,"SQL error: %s\n", zErrMsg);
+ exit(1);
+ }
+ }
+ }else{
+ /* Run commands received from standard input
+ */
+ if( isatty(fileno(stdout)) && isatty(fileno(stdin)) ){
+ char *zHome;
+ char *zHistory = 0;
+ printf(
+ "SQLite version %s\n"
+ "Enter \".help\" for instructions\n",
+ sqlite_version
+ );
+ zHome = find_home_dir();
+ if( zHome && (zHistory = malloc(strlen(zHome)+20))!=0 ){
+ sprintf(zHistory,"%s/.sqlite_history", zHome);
+ }
+ if( zHistory ) read_history(zHistory);
+ process_input(&data, 0);
+ if( zHistory ){
+ stifle_history(100);
+ write_history(zHistory);
+ }
+ }else{
+ process_input(&data, stdin);
+ }
+ }
+ set_table_name(&data, 0);
+ if( db ) sqlite_close(db);
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/sqlite.h.in b/usr/src/cmd/svc/configd/sqlite/src/sqlite.h.in
new file mode 100644
index 0000000000..d7ab189b43
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/sqlite.h.in
@@ -0,0 +1,871 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the SQLite library
+** presents to client programs.
+**
+** @(#) $Id: sqlite.h.in,v 1.60 2004/03/14 22:12:35 drh Exp $
+*/
+#ifndef _SQLITE_H_
+#define _SQLITE_H_
+#include <stdarg.h> /* Needed for the definition of va_list */
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** The version of the SQLite library.
+*/
+#define SQLITE_VERSION "--VERS--"
+
+/*
+** The version string is also compiled into the library so that a program
+** can check to make sure that the lib*.a file and the *.h file are from
+** the same version.
+*/
+extern const char sqlite_version[];
+
+/*
+** The SQLITE_UTF8 macro is defined if the library expects to see
+** UTF-8 encoded data. The SQLITE_ISO8859 macro is defined if the
+** iso8859 encoded should be used.
+*/
+#define SQLITE_--ENCODING-- 1
+
+/*
+** The following constant holds one of two strings, "UTF-8" or "iso8859",
+** depending on which character encoding the SQLite library expects to
+** see. The character encoding makes a difference for the LIKE and GLOB
+** operators and for the LENGTH() and SUBSTR() functions.
+*/
+extern const char sqlite_encoding[];
+
+/*
+** Each open sqlite database is represented by an instance of the
+** following opaque structure.
+*/
+typedef struct sqlite sqlite;
+
+/*
+** A function to open a new sqlite database.
+**
+** If the database does not exist and mode indicates write
+** permission, then a new database is created. If the database
+** does not exist and mode does not indicate write permission,
+** then the open fails, an error message generated (if errmsg!=0)
+** and the function returns 0.
+**
+** If mode does not indicates user write permission, then the
+** database is opened read-only.
+**
+** The Truth: As currently implemented, all databases are opened
+** for writing all the time. Maybe someday we will provide the
+** ability to open a database readonly. The mode parameters is
+** provided in anticipation of that enhancement.
+*/
+sqlite *sqlite_open(const char *filename, int mode, char **errmsg);
+
+/*
+** A function to close the database.
+**
+** Call this function with a pointer to a structure that was previously
+** returned from sqlite_open() and the corresponding database will by closed.
+*/
+void sqlite_close(sqlite *);
+
+/*
+** The type for a callback function.
+*/
+typedef int (*sqlite_callback)(void*,int,char**, char**);
+
+/*
+** A function to executes one or more statements of SQL.
+**
+** If one or more of the SQL statements are queries, then
+** the callback function specified by the 3rd parameter is
+** invoked once for each row of the query result. This callback
+** should normally return 0. If the callback returns a non-zero
+** value then the query is aborted, all subsequent SQL statements
+** are skipped and the sqlite_exec() function returns the SQLITE_ABORT.
+**
+** The 4th parameter is an arbitrary pointer that is passed
+** to the callback function as its first parameter.
+**
+** The 2nd parameter to the callback function is the number of
+** columns in the query result. The 3rd parameter to the callback
+** is an array of strings holding the values for each column.
+** The 4th parameter to the callback is an array of strings holding
+** the names of each column.
+**
+** The callback function may be NULL, even for queries. A NULL
+** callback is not an error. It just means that no callback
+** will be invoked.
+**
+** If an error occurs while parsing or evaluating the SQL (but
+** not while executing the callback) then an appropriate error
+** message is written into memory obtained from malloc() and
+** *errmsg is made to point to that message. The calling function
+** is responsible for freeing the memory that holds the error
+** message. Use sqlite_freemem() for this. If errmsg==NULL,
+** then no error message is ever written.
+**
+** The return value is is SQLITE_OK if there are no errors and
+** some other return code if there is an error. The particular
+** return value depends on the type of error.
+**
+** If the query could not be executed because a database file is
+** locked or busy, then this function returns SQLITE_BUSY. (This
+** behavior can be modified somewhat using the sqlite_busy_handler()
+** and sqlite_busy_timeout() functions below.)
+*/
+int sqlite_exec(
+ sqlite*, /* An open database */
+ const char *sql, /* SQL to be executed */
+ sqlite_callback, /* Callback function */
+ void *, /* 1st argument to callback function */
+ char **errmsg /* Error msg written here */
+);
+
+/*
+** Return values for sqlite_exec() and sqlite_step()
+*/
+#define SQLITE_OK 0 /* Successful result */
+#define SQLITE_ERROR 1 /* SQL error or missing database */
+#define SQLITE_INTERNAL 2 /* An internal logic error in SQLite */
+#define SQLITE_PERM 3 /* Access permission denied */
+#define SQLITE_ABORT 4 /* Callback routine requested an abort */
+#define SQLITE_BUSY 5 /* The database file is locked */
+#define SQLITE_LOCKED 6 /* A table in the database is locked */
+#define SQLITE_NOMEM 7 /* A malloc() failed */
+#define SQLITE_READONLY 8 /* Attempt to write a readonly database */
+#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite_interrupt() */
+#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */
+#define SQLITE_CORRUPT 11 /* The database disk image is malformed */
+#define SQLITE_NOTFOUND 12 /* (Internal Only) Table or record not found */
+#define SQLITE_FULL 13 /* Insertion failed because database is full */
+#define SQLITE_CANTOPEN 14 /* Unable to open the database file */
+#define SQLITE_PROTOCOL 15 /* Database lock protocol error */
+#define SQLITE_EMPTY 16 /* (Internal Only) Database table is empty */
+#define SQLITE_SCHEMA 17 /* The database schema changed */
+#define SQLITE_TOOBIG 18 /* Too much data for one row of a table */
+#define SQLITE_CONSTRAINT 19 /* Abort due to contraint violation */
+#define SQLITE_MISMATCH 20 /* Data type mismatch */
+#define SQLITE_MISUSE 21 /* Library used incorrectly */
+#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */
+#define SQLITE_AUTH 23 /* Authorization denied */
+#define SQLITE_FORMAT 24 /* Auxiliary database format error */
+#define SQLITE_RANGE 25 /* 2nd parameter to sqlite_bind out of range */
+#define SQLITE_NOTADB 26 /* File opened that is not a database file */
+#define SQLITE_ROW 100 /* sqlite_step() has another row ready */
+#define SQLITE_DONE 101 /* sqlite_step() has finished executing */
+
+/*
+** Each entry in an SQLite table has a unique integer key. (The key is
+** the value of the INTEGER PRIMARY KEY column if there is such a column,
+** otherwise the key is generated at random. The unique key is always
+** available as the ROWID, OID, or _ROWID_ column.) The following routine
+** returns the integer key of the most recent insert in the database.
+**
+** This function is similar to the mysql_insert_id() function from MySQL.
+*/
+int sqlite_last_insert_rowid(sqlite*);
+
+/*
+** This function returns the number of database rows that were changed
+** (or inserted or deleted) by the most recent called sqlite_exec().
+**
+** All changes are counted, even if they were later undone by a
+** ROLLBACK or ABORT. Except, changes associated with creating and
+** dropping tables are not counted.
+**
+** If a callback invokes sqlite_exec() recursively, then the changes
+** in the inner, recursive call are counted together with the changes
+** in the outer call.
+**
+** SQLite implements the command "DELETE FROM table" without a WHERE clause
+** by dropping and recreating the table. (This is much faster than going
+** through and deleting individual elements form the table.) Because of
+** this optimization, the change count for "DELETE FROM table" will be
+** zero regardless of the number of elements that were originally in the
+** table. To get an accurate count of the number of rows deleted, use
+** "DELETE FROM table WHERE 1" instead.
+*/
+int sqlite_changes(sqlite*);
+
+/*
+** This function returns the number of database rows that were changed
+** by the last INSERT, UPDATE, or DELETE statment executed by sqlite_exec(),
+** or by the last VM to run to completion. The change count is not updated
+** by SQL statements other than INSERT, UPDATE or DELETE.
+**
+** Changes are counted, even if they are later undone by a ROLLBACK or
+** ABORT. Changes associated with trigger programs that execute as a
+** result of the INSERT, UPDATE, or DELETE statement are not counted.
+**
+** If a callback invokes sqlite_exec() recursively, then the changes
+** in the inner, recursive call are counted together with the changes
+** in the outer call.
+**
+** SQLite implements the command "DELETE FROM table" without a WHERE clause
+** by dropping and recreating the table. (This is much faster than going
+** through and deleting individual elements form the table.) Because of
+** this optimization, the change count for "DELETE FROM table" will be
+** zero regardless of the number of elements that were originally in the
+** table. To get an accurate count of the number of rows deleted, use
+** "DELETE FROM table WHERE 1" instead.
+**
+******* THIS IS AN EXPERIMENTAL API AND IS SUBJECT TO CHANGE ******
+*/
+int sqlite_last_statement_changes(sqlite*);
+
+/* If the parameter to this routine is one of the return value constants
+** defined above, then this routine returns a constant text string which
+** descripts (in English) the meaning of the return value.
+*/
+const char *sqlite_error_string(int);
+#define sqliteErrStr sqlite_error_string /* Legacy. Do not use in new code. */
+
+/* This function causes any pending database operation to abort and
+** return at its earliest opportunity. This routine is typically
+** called in response to a user action such as pressing "Cancel"
+** or Ctrl-C where the user wants a long query operation to halt
+** immediately.
+*/
+void sqlite_interrupt(sqlite*);
+
+
+/* This function returns true if the given input string comprises
+** one or more complete SQL statements.
+**
+** The algorithm is simple. If the last token other than spaces
+** and comments is a semicolon, then return true. otherwise return
+** false.
+*/
+int sqlite_complete(const char *sql);
+
+/*
+** This routine identifies a callback function that is invoked
+** whenever an attempt is made to open a database table that is
+** currently locked by another process or thread. If the busy callback
+** is NULL, then sqlite_exec() returns SQLITE_BUSY immediately if
+** it finds a locked table. If the busy callback is not NULL, then
+** sqlite_exec() invokes the callback with three arguments. The
+** second argument is the name of the locked table and the third
+** argument is the number of times the table has been busy. If the
+** busy callback returns 0, then sqlite_exec() immediately returns
+** SQLITE_BUSY. If the callback returns non-zero, then sqlite_exec()
+** tries to open the table again and the cycle repeats.
+**
+** The default busy callback is NULL.
+**
+** Sqlite is re-entrant, so the busy handler may start a new query.
+** (It is not clear why anyone would every want to do this, but it
+** is allowed, in theory.) But the busy handler may not close the
+** database. Closing the database from a busy handler will delete
+** data structures out from under the executing query and will
+** probably result in a coredump.
+*/
+void sqlite_busy_handler(sqlite*, int(*)(void*,const char*,int), void*);
+
+/*
+** This routine sets a busy handler that sleeps for a while when a
+** table is locked. The handler will sleep multiple times until
+** at least "ms" milleseconds of sleeping have been done. After
+** "ms" milleseconds of sleeping, the handler returns 0 which
+** causes sqlite_exec() to return SQLITE_BUSY.
+**
+** Calling this routine with an argument less than or equal to zero
+** turns off all busy handlers.
+*/
+void sqlite_busy_timeout(sqlite*, int ms);
+
+/*
+** This next routine is really just a wrapper around sqlite_exec().
+** Instead of invoking a user-supplied callback for each row of the
+** result, this routine remembers each row of the result in memory
+** obtained from malloc(), then returns all of the result after the
+** query has finished.
+**
+** As an example, suppose the query result where this table:
+**
+** Name | Age
+** -----------------------
+** Alice | 43
+** Bob | 28
+** Cindy | 21
+**
+** If the 3rd argument were &azResult then after the function returns
+** azResult will contain the following data:
+**
+** azResult[0] = "Name";
+** azResult[1] = "Age";
+** azResult[2] = "Alice";
+** azResult[3] = "43";
+** azResult[4] = "Bob";
+** azResult[5] = "28";
+** azResult[6] = "Cindy";
+** azResult[7] = "21";
+**
+** Notice that there is an extra row of data containing the column
+** headers. But the *nrow return value is still 3. *ncolumn is
+** set to 2. In general, the number of values inserted into azResult
+** will be ((*nrow) + 1)*(*ncolumn).
+**
+** After the calling function has finished using the result, it should
+** pass the result data pointer to sqlite_free_table() in order to
+** release the memory that was malloc-ed. Because of the way the
+** malloc() happens, the calling function must not try to call
+** malloc() directly. Only sqlite_free_table() is able to release
+** the memory properly and safely.
+**
+** The return value of this routine is the same as from sqlite_exec().
+*/
+int sqlite_get_table(
+ sqlite*, /* An open database */
+ const char *sql, /* SQL to be executed */
+ char ***resultp, /* Result written to a char *[] that this points to */
+ int *nrow, /* Number of result rows written here */
+ int *ncolumn, /* Number of result columns written here */
+ char **errmsg /* Error msg written here */
+);
+
+/*
+** Call this routine to free the memory that sqlite_get_table() allocated.
+*/
+void sqlite_free_table(char **result);
+
+/*
+** The following routines are wrappers around sqlite_exec() and
+** sqlite_get_table(). The only difference between the routines that
+** follow and the originals is that the second argument to the
+** routines that follow is really a printf()-style format
+** string describing the SQL to be executed. Arguments to the format
+** string appear at the end of the argument list.
+**
+** All of the usual printf formatting options apply. In addition, there
+** is a "%q" option. %q works like %s in that it substitutes a null-terminated
+** string from the argument list. But %q also doubles every '\'' character.
+** %q is designed for use inside a string literal. By doubling each '\''
+** character it escapes that character and allows it to be inserted into
+** the string.
+**
+** For example, so some string variable contains text as follows:
+**
+** char *zText = "It's a happy day!";
+**
+** We can use this text in an SQL statement as follows:
+**
+** sqlite_exec_printf(db, "INSERT INTO table VALUES('%q')",
+** callback1, 0, 0, zText);
+**
+** Because the %q format string is used, the '\'' character in zText
+** is escaped and the SQL generated is as follows:
+**
+** INSERT INTO table1 VALUES('It''s a happy day!')
+**
+** This is correct. Had we used %s instead of %q, the generated SQL
+** would have looked like this:
+**
+** INSERT INTO table1 VALUES('It's a happy day!');
+**
+** This second example is an SQL syntax error. As a general rule you
+** should always use %q instead of %s when inserting text into a string
+** literal.
+*/
+int sqlite_exec_printf(
+ sqlite*, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ sqlite_callback, /* Callback function */
+ void *, /* 1st argument to callback function */
+ char **errmsg, /* Error msg written here */
+ ... /* Arguments to the format string. */
+);
+int sqlite_exec_vprintf(
+ sqlite*, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ sqlite_callback, /* Callback function */
+ void *, /* 1st argument to callback function */
+ char **errmsg, /* Error msg written here */
+ va_list ap /* Arguments to the format string. */
+);
+int sqlite_get_table_printf(
+ sqlite*, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ char ***resultp, /* Result written to a char *[] that this points to */
+ int *nrow, /* Number of result rows written here */
+ int *ncolumn, /* Number of result columns written here */
+ char **errmsg, /* Error msg written here */
+ ... /* Arguments to the format string */
+);
+int sqlite_get_table_vprintf(
+ sqlite*, /* An open database */
+ const char *sqlFormat, /* printf-style format string for the SQL */
+ char ***resultp, /* Result written to a char *[] that this points to */
+ int *nrow, /* Number of result rows written here */
+ int *ncolumn, /* Number of result columns written here */
+ char **errmsg, /* Error msg written here */
+ va_list ap /* Arguments to the format string */
+);
+char *sqlite_mprintf(const char*,...);
+char *sqlite_vmprintf(const char*, va_list);
+
+/*
+** Windows systems should call this routine to free memory that
+** is returned in the in the errmsg parameter of sqlite_open() when
+** SQLite is a DLL. For some reason, it does not work to call free()
+** directly.
+*/
+void sqlite_freemem(void *p);
+
+/*
+** Windows systems need functions to call to return the sqlite_version
+** and sqlite_encoding strings.
+*/
+const char *sqlite_libversion(void);
+const char *sqlite_libencoding(void);
+
+/*
+** A pointer to the following structure is used to communicate with
+** the implementations of user-defined functions.
+*/
+typedef struct sqlite_func sqlite_func;
+
+/*
+** Use the following routines to create new user-defined functions. See
+** the documentation for details.
+*/
+int sqlite_create_function(
+ sqlite*, /* Database where the new function is registered */
+ const char *zName, /* Name of the new function */
+ int nArg, /* Number of arguments. -1 means any number */
+ void (*xFunc)(sqlite_func*,int,const char**), /* C code to implement */
+ void *pUserData /* Available via the sqlite_user_data() call */
+);
+int sqlite_create_aggregate(
+ sqlite*, /* Database where the new function is registered */
+ const char *zName, /* Name of the function */
+ int nArg, /* Number of arguments */
+ void (*xStep)(sqlite_func*,int,const char**), /* Called for each row */
+ void (*xFinalize)(sqlite_func*), /* Called once to get final result */
+ void *pUserData /* Available via the sqlite_user_data() call */
+);
+
+/*
+** Use the following routine to define the datatype returned by a
+** user-defined function. The second argument can be one of the
+** constants SQLITE_NUMERIC, SQLITE_TEXT, or SQLITE_ARGS or it
+** can be an integer greater than or equal to zero. When the datatype
+** parameter is non-negative, the type of the result will be the
+** same as the datatype-th argument. If datatype==SQLITE_NUMERIC
+** then the result is always numeric. If datatype==SQLITE_TEXT then
+** the result is always text. If datatype==SQLITE_ARGS then the result
+** is numeric if any argument is numeric and is text otherwise.
+*/
+int sqlite_function_type(
+ sqlite *db, /* The database there the function is registered */
+ const char *zName, /* Name of the function */
+ int datatype /* The datatype for this function */
+);
+#define SQLITE_NUMERIC (-1)
+#define SQLITE_TEXT (-2)
+#define SQLITE_ARGS (-3)
+
+/*
+** The user function implementations call one of the following four routines
+** in order to return their results. The first parameter to each of these
+** routines is a copy of the first argument to xFunc() or xFinialize().
+** The second parameter to these routines is the result to be returned.
+** A NULL can be passed as the second parameter to sqlite_set_result_string()
+** in order to return a NULL result.
+**
+** The 3rd argument to _string and _error is the number of characters to
+** take from the string. If this argument is negative, then all characters
+** up to and including the first '\000' are used.
+**
+** The sqlite_set_result_string() function allocates a buffer to hold the
+** result and returns a pointer to this buffer. The calling routine
+** (that is, the implmentation of a user function) can alter the content
+** of this buffer if desired.
+*/
+char *sqlite_set_result_string(sqlite_func*,const char*,int);
+void sqlite_set_result_int(sqlite_func*,int);
+void sqlite_set_result_double(sqlite_func*,double);
+void sqlite_set_result_error(sqlite_func*,const char*,int);
+
+/*
+** The pUserData parameter to the sqlite_create_function() and
+** sqlite_create_aggregate() routines used to register user functions
+** is available to the implementation of the function using this
+** call.
+*/
+void *sqlite_user_data(sqlite_func*);
+
+/*
+** Aggregate functions use the following routine to allocate
+** a structure for storing their state. The first time this routine
+** is called for a particular aggregate, a new structure of size nBytes
+** is allocated, zeroed, and returned. On subsequent calls (for the
+** same aggregate instance) the same buffer is returned. The implementation
+** of the aggregate can use the returned buffer to accumulate data.
+**
+** The buffer allocated is freed automatically be SQLite.
+*/
+void *sqlite_aggregate_context(sqlite_func*, int nBytes);
+
+/*
+** The next routine returns the number of calls to xStep for a particular
+** aggregate function instance. The current call to xStep counts so this
+** routine always returns at least 1.
+*/
+int sqlite_aggregate_count(sqlite_func*);
+
+/*
+** This routine registers a callback with the SQLite library. The
+** callback is invoked (at compile-time, not at run-time) for each
+** attempt to access a column of a table in the database. The callback
+** returns SQLITE_OK if access is allowed, SQLITE_DENY if the entire
+** SQL statement should be aborted with an error and SQLITE_IGNORE
+** if the column should be treated as a NULL value.
+*/
+int sqlite_set_authorizer(
+ sqlite*,
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
+ void *pUserData
+);
+
+/*
+** The second parameter to the access authorization function above will
+** be one of the values below. These values signify what kind of operation
+** is to be authorized. The 3rd and 4th parameters to the authorization
+** function will be parameters or NULL depending on which of the following
+** codes is used as the second parameter. The 5th parameter is the name
+** of the database ("main", "temp", etc.) if applicable. The 6th parameter
+** is the name of the inner-most trigger or view that is responsible for
+** the access attempt or NULL if this access attempt is directly from
+** input SQL code.
+**
+** Arg-3 Arg-4
+*/
+#define SQLITE_COPY 0 /* Table Name File Name */
+#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */
+#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */
+#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */
+#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */
+#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */
+#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */
+#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */
+#define SQLITE_CREATE_VIEW 8 /* View Name NULL */
+#define SQLITE_DELETE 9 /* Table Name NULL */
+#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */
+#define SQLITE_DROP_TABLE 11 /* Table Name NULL */
+#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */
+#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */
+#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */
+#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */
+#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */
+#define SQLITE_DROP_VIEW 17 /* View Name NULL */
+#define SQLITE_INSERT 18 /* Table Name NULL */
+#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */
+#define SQLITE_READ 20 /* Table Name Column Name */
+#define SQLITE_SELECT 21 /* NULL NULL */
+#define SQLITE_TRANSACTION 22 /* NULL NULL */
+#define SQLITE_UPDATE 23 /* Table Name Column Name */
+#define SQLITE_ATTACH 24 /* Filename NULL */
+#define SQLITE_DETACH 25 /* Database Name NULL */
+
+
+/*
+** The return value of the authorization function should be one of the
+** following constants:
+*/
+/* #define SQLITE_OK 0 // Allow access (This is actually defined above) */
+#define SQLITE_DENY 1 /* Abort the SQL statement with an error */
+#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */
+
+/*
+** Register a function that is called at every invocation of sqlite_exec()
+** or sqlite_compile(). This function can be used (for example) to generate
+** a log file of all SQL executed against a database.
+*/
+void *sqlite_trace(sqlite*, void(*xTrace)(void*,const char*), void*);
+
+/*** The Callback-Free API
+**
+** The following routines implement a new way to access SQLite that does not
+** involve the use of callbacks.
+**
+** An sqlite_vm is an opaque object that represents a single SQL statement
+** that is ready to be executed.
+*/
+typedef struct sqlite_vm sqlite_vm;
+
+/*
+** To execute an SQLite query without the use of callbacks, you first have
+** to compile the SQL using this routine. The 1st parameter "db" is a pointer
+** to an sqlite object obtained from sqlite_open(). The 2nd parameter
+** "zSql" is the text of the SQL to be compiled. The remaining parameters
+** are all outputs.
+**
+** *pzTail is made to point to the first character past the end of the first
+** SQL statement in zSql. This routine only compiles the first statement
+** in zSql, so *pzTail is left pointing to what remains uncompiled.
+**
+** *ppVm is left pointing to a "virtual machine" that can be used to execute
+** the compiled statement. Or if there is an error, *ppVm may be set to NULL.
+** If the input text contained no SQL (if the input is and empty string or
+** a comment) then *ppVm is set to NULL.
+**
+** If any errors are detected during compilation, an error message is written
+** into space obtained from malloc() and *pzErrMsg is made to point to that
+** error message. The calling routine is responsible for freeing the text
+** of this message when it has finished with it. Use sqlite_freemem() to
+** free the message. pzErrMsg may be NULL in which case no error message
+** will be generated.
+**
+** On success, SQLITE_OK is returned. Otherwise and error code is returned.
+*/
+int sqlite_compile(
+ sqlite *db, /* The open database */
+ const char *zSql, /* SQL statement to be compiled */
+ const char **pzTail, /* OUT: uncompiled tail of zSql */
+ sqlite_vm **ppVm, /* OUT: the virtual machine to execute zSql */
+ char **pzErrmsg /* OUT: Error message. */
+);
+
+/*
+** After an SQL statement has been compiled, it is handed to this routine
+** to be executed. This routine executes the statement as far as it can
+** go then returns. The return value will be one of SQLITE_DONE,
+** SQLITE_ERROR, SQLITE_BUSY, SQLITE_ROW, or SQLITE_MISUSE.
+**
+** SQLITE_DONE means that the execute of the SQL statement is complete
+** an no errors have occurred. sqlite_step() should not be called again
+** for the same virtual machine. *pN is set to the number of columns in
+** the result set and *pazColName is set to an array of strings that
+** describe the column names and datatypes. The name of the i-th column
+** is (*pazColName)[i] and the datatype of the i-th column is
+** (*pazColName)[i+*pN]. *pazValue is set to NULL.
+**
+** SQLITE_ERROR means that the virtual machine encountered a run-time
+** error. sqlite_step() should not be called again for the same
+** virtual machine. *pN is set to 0 and *pazColName and *pazValue are set
+** to NULL. Use sqlite_finalize() to obtain the specific error code
+** and the error message text for the error.
+**
+** SQLITE_BUSY means that an attempt to open the database failed because
+** another thread or process is holding a lock. The calling routine
+** can try again to open the database by calling sqlite_step() again.
+** The return code will only be SQLITE_BUSY if no busy handler is registered
+** using the sqlite_busy_handler() or sqlite_busy_timeout() routines. If
+** a busy handler callback has been registered but returns 0, then this
+** routine will return SQLITE_ERROR and sqltie_finalize() will return
+** SQLITE_BUSY when it is called.
+**
+** SQLITE_ROW means that a single row of the result is now available.
+** The data is contained in *pazValue. The value of the i-th column is
+** (*azValue)[i]. *pN and *pazColName are set as described in SQLITE_DONE.
+** Invoke sqlite_step() again to advance to the next row.
+**
+** SQLITE_MISUSE is returned if sqlite_step() is called incorrectly.
+** For example, if you call sqlite_step() after the virtual machine
+** has halted (after a prior call to sqlite_step() has returned SQLITE_DONE)
+** or if you call sqlite_step() with an incorrectly initialized virtual
+** machine or a virtual machine that has been deleted or that is associated
+** with an sqlite structure that has been closed.
+*/
+int sqlite_step(
+ sqlite_vm *pVm, /* The virtual machine to execute */
+ int *pN, /* OUT: Number of columns in result */
+ const char ***pazValue, /* OUT: Column data */
+ const char ***pazColName /* OUT: Column names and datatypes */
+);
+
+/*
+** This routine is called to delete a virtual machine after it has finished
+** executing. The return value is the result code. SQLITE_OK is returned
+** if the statement executed successfully and some other value is returned if
+** there was any kind of error. If an error occurred and pzErrMsg is not
+** NULL, then an error message is written into memory obtained from malloc()
+** and *pzErrMsg is made to point to that error message. The calling routine
+** should use sqlite_freemem() to delete this message when it has finished
+** with it.
+**
+** This routine can be called at any point during the execution of the
+** virtual machine. If the virtual machine has not completed execution
+** when this routine is called, that is like encountering an error or
+** an interrupt. (See sqlite_interrupt().) Incomplete updates may be
+** rolled back and transactions cancelled, depending on the circumstances,
+** and the result code returned will be SQLITE_ABORT.
+*/
+int sqlite_finalize(sqlite_vm*, char **pzErrMsg);
+
+/*
+** This routine deletes the virtual machine, writes any error message to
+** *pzErrMsg and returns an SQLite return code in the same way as the
+** sqlite_finalize() function.
+**
+** Additionally, if ppVm is not NULL, *ppVm is left pointing to a new virtual
+** machine loaded with the compiled version of the original query ready for
+** execution.
+**
+** If sqlite_reset() returns SQLITE_SCHEMA, then *ppVm is set to NULL.
+**
+******* THIS IS AN EXPERIMENTAL API AND IS SUBJECT TO CHANGE ******
+*/
+int sqlite_reset(sqlite_vm*, char **pzErrMsg);
+
+/*
+** If the SQL that was handed to sqlite_compile contains variables that
+** are represeted in the SQL text by a question mark ('?'). This routine
+** is used to assign values to those variables.
+**
+** The first parameter is a virtual machine obtained from sqlite_compile().
+** The 2nd "idx" parameter determines which variable in the SQL statement
+** to bind the value to. The left most '?' is 1. The 3rd parameter is
+** the value to assign to that variable. The 4th parameter is the number
+** of bytes in the value, including the terminating \000 for strings.
+** Finally, the 5th "copy" parameter is TRUE if SQLite should make its
+** own private copy of this value, or false if the space that the 3rd
+** parameter points to will be unchanging and can be used directly by
+** SQLite.
+**
+** Unbound variables are treated as having a value of NULL. To explicitly
+** set a variable to NULL, call this routine with the 3rd parameter as a
+** NULL pointer.
+**
+** If the 4th "len" parameter is -1, then strlen() is used to find the
+** length.
+**
+** This routine can only be called immediately after sqlite_compile()
+** or sqlite_reset() and before any calls to sqlite_step().
+**
+******* THIS IS AN EXPERIMENTAL API AND IS SUBJECT TO CHANGE ******
+*/
+int sqlite_bind(sqlite_vm*, int idx, const char *value, int len, int copy);
+
+/*
+** This routine configures a callback function - the progress callback - that
+** is invoked periodically during long running calls to sqlite_exec(),
+** sqlite_step() and sqlite_get_table(). An example use for this API is to keep
+** a GUI updated during a large query.
+**
+** The progress callback is invoked once for every N virtual machine opcodes,
+** where N is the second argument to this function. The progress callback
+** itself is identified by the third argument to this function. The fourth
+** argument to this function is a void pointer passed to the progress callback
+** function each time it is invoked.
+**
+** If a call to sqlite_exec(), sqlite_step() or sqlite_get_table() results
+** in less than N opcodes being executed, then the progress callback is not
+** invoked.
+**
+** Calling this routine overwrites any previously installed progress callback.
+** To remove the progress callback altogether, pass NULL as the third
+** argument to this function.
+**
+** If the progress callback returns a result other than 0, then the current
+** query is immediately terminated and any database changes rolled back. If the
+** query was part of a larger transaction, then the transaction is not rolled
+** back and remains active. The sqlite_exec() call returns SQLITE_ABORT.
+**
+******* THIS IS AN EXPERIMENTAL API AND IS SUBJECT TO CHANGE ******
+*/
+void sqlite_progress_handler(sqlite*, int, int(*)(void*), void*);
+
+/*
+** Register a callback function to be invoked whenever a new transaction
+** is committed. The pArg argument is passed through to the callback.
+** callback. If the callback function returns non-zero, then the commit
+** is converted into a rollback.
+**
+** If another function was previously registered, its pArg value is returned.
+** Otherwise NULL is returned.
+**
+** Registering a NULL function disables the callback.
+**
+******* THIS IS AN EXPERIMENTAL API AND IS SUBJECT TO CHANGE ******
+*/
+void *sqlite_commit_hook(sqlite*, int(*)(void*), void*);
+
+/*
+** Open an encrypted SQLite database. If pKey==0 or nKey==0, this routine
+** is the same as sqlite_open().
+**
+** The code to implement this API is not available in the public release
+** of SQLite.
+*/
+sqlite *sqlite_open_encrypted(
+ const char *zFilename, /* Name of the encrypted database */
+ const void *pKey, /* Pointer to the key */
+ int nKey, /* Number of bytes in the key */
+ int *pErrcode, /* Write error code here */
+ char **pzErrmsg /* Write error message here */
+);
+
+/*
+** Change the key on an open database. If the current database is not
+** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the
+** database is decrypted.
+**
+** The code to implement this API is not available in the public release
+** of SQLite.
+*/
+int sqlite_rekey(
+ sqlite *db, /* Database to be rekeyed */
+ const void *pKey, int nKey /* The new key */
+);
+
+/*
+** Encode a binary buffer "in" of size n bytes so that it contains
+** no instances of characters '\'' or '\000'. The output is
+** null-terminated and can be used as a string value in an INSERT
+** or UPDATE statement. Use sqlite_decode_binary() to convert the
+** string back into its original binary.
+**
+** The result is written into a preallocated output buffer "out".
+** "out" must be able to hold at least 2 +(257*n)/254 bytes.
+** In other words, the output will be expanded by as much as 3
+** bytes for every 254 bytes of input plus 2 bytes of fixed overhead.
+** (This is approximately 2 + 1.0118*n or about a 1.2% size increase.)
+**
+** The return value is the number of characters in the encoded
+** string, excluding the "\000" terminator.
+**
+** If out==NULL then no output is generated but the routine still returns
+** the number of characters that would have been generated if out had
+** not been NULL.
+*/
+int sqlite_encode_binary(const unsigned char *in, int n, unsigned char *out);
+
+/*
+** Decode the string "in" into binary data and write it into "out".
+** This routine reverses the encoding created by sqlite_encode_binary().
+** The output will always be a few bytes less than the input. The number
+** of bytes of output is returned. If the input is not a well-formed
+** encoding, -1 is returned.
+**
+** The "in" and "out" parameters may point to the same buffer in order
+** to decode a string in place.
+*/
+int sqlite_decode_binary(const unsigned char *in, unsigned char *out);
+
+#ifdef __cplusplus
+} /* End of the 'extern "C"' block */
+#endif
+
+#endif /* _SQLITE_H_ */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/sqliteInt.h b/usr/src/cmd/svc/configd/sqlite/src/sqliteInt.h
new file mode 100644
index 0000000000..046f008356
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/sqliteInt.h
@@ -0,0 +1,1273 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Internal interface definitions for SQLite.
+**
+** @(#) $Id: sqliteInt.h,v 1.220.2.1 2004/07/15 13:37:05 drh Exp $
+*/
+#include "config.h"
+#include "sqlite.h"
+#include "hash.h"
+#include "parse.h"
+#include "btree.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+/*
+** The maximum number of in-memory pages to use for the main database
+** table and for temporary tables.
+*/
+#define MAX_PAGES 2000
+#define TEMP_PAGES 500
+
+/*
+** If the following macro is set to 1, then NULL values are considered
+** distinct for the SELECT DISTINCT statement and for UNION or EXCEPT
+** compound queries. No other SQL database engine (among those tested)
+** works this way except for OCELOT. But the SQL92 spec implies that
+** this is how things should work.
+**
+** If the following macro is set to 0, then NULLs are indistinct for
+** SELECT DISTINCT and for UNION.
+*/
+#define NULL_ALWAYS_DISTINCT 0
+
+/*
+** If the following macro is set to 1, then NULL values are considered
+** distinct when determining whether or not two entries are the same
+** in a UNIQUE index. This is the way PostgreSQL, Oracle, DB2, MySQL,
+** OCELOT, and Firebird all work. The SQL92 spec explicitly says this
+** is the way things are suppose to work.
+**
+** If the following macro is set to 0, the NULLs are indistinct for
+** a UNIQUE index. In this mode, you can only have a single NULL entry
+** for a column declared UNIQUE. This is the way Informix and SQL Server
+** work.
+*/
+#define NULL_DISTINCT_FOR_UNIQUE 1
+
+/*
+** The maximum number of attached databases. This must be at least 2
+** in order to support the main database file (0) and the file used to
+** hold temporary tables (1). And it must be less than 256 because
+** an unsigned character is used to stored the database index.
+*/
+#define MAX_ATTACHED 10
+
+/*
+** The next macro is used to determine where TEMP tables and indices
+** are stored. Possible values:
+**
+** 0 Always use a temporary files
+** 1 Use a file unless overridden by "PRAGMA temp_store"
+** 2 Use memory unless overridden by "PRAGMA temp_store"
+** 3 Always use memory
+*/
+#ifndef TEMP_STORE
+# define TEMP_STORE 1
+#endif
+
+/*
+** When building SQLite for embedded systems where memory is scarce,
+** you can define one or more of the following macros to omit extra
+** features of the library and thus keep the size of the library to
+** a minimum.
+*/
+/* #define SQLITE_OMIT_AUTHORIZATION 1 */
+/* #define SQLITE_OMIT_INMEMORYDB 1 */
+/* #define SQLITE_OMIT_VACUUM 1 */
+/* #define SQLITE_OMIT_DATETIME_FUNCS 1 */
+/* #define SQLITE_OMIT_PROGRESS_CALLBACK 1 */
+
+/*
+** Integers of known sizes. These typedefs might change for architectures
+** where the sizes very. Preprocessor macros are available so that the
+** types can be conveniently redefined at compile-type. Like this:
+**
+** cc '-DUINTPTR_TYPE=long long int' ...
+*/
+#ifndef UINT32_TYPE
+# define UINT32_TYPE unsigned int
+#endif
+#ifndef UINT16_TYPE
+# define UINT16_TYPE unsigned short int
+#endif
+#ifndef INT16_TYPE
+# define INT16_TYPE short int
+#endif
+#ifndef UINT8_TYPE
+# define UINT8_TYPE unsigned char
+#endif
+#ifndef INT8_TYPE
+# define INT8_TYPE signed char
+#endif
+#ifndef INTPTR_TYPE
+# if SQLITE_PTR_SZ==4
+# define INTPTR_TYPE int
+# else
+# define INTPTR_TYPE long long
+# endif
+#endif
+typedef UINT32_TYPE u32; /* 4-byte unsigned integer */
+typedef UINT16_TYPE u16; /* 2-byte unsigned integer */
+typedef INT16_TYPE i16; /* 2-byte signed integer */
+typedef UINT8_TYPE u8; /* 1-byte unsigned integer */
+typedef UINT8_TYPE i8; /* 1-byte signed integer */
+typedef INTPTR_TYPE ptr; /* Big enough to hold a pointer */
+typedef unsigned INTPTR_TYPE uptr; /* Big enough to hold a pointer */
+
+/*
+** Defer sourcing vdbe.h until after the "u8" typedef is defined.
+*/
+#include "vdbe.h"
+
+/*
+** Most C compilers these days recognize "long double", don't they?
+** Just in case we encounter one that does not, we will create a macro
+** for long double so that it can be easily changed to just "double".
+*/
+#ifndef LONGDOUBLE_TYPE
+# define LONGDOUBLE_TYPE long double
+#endif
+
+/*
+** This macro casts a pointer to an integer. Useful for doing
+** pointer arithmetic.
+*/
+#define Addr(X) ((uptr)X)
+
+/*
+** The maximum number of bytes of data that can be put into a single
+** row of a single table. The upper bound on this limit is 16777215
+** bytes (or 16MB-1). We have arbitrarily set the limit to just 1MB
+** here because the overflow page chain is inefficient for really big
+** records and we want to discourage people from thinking that
+** multi-megabyte records are OK. If your needs are different, you can
+** change this define and recompile to increase or decrease the record
+** size.
+**
+** The 16777198 is computed as follows: 238 bytes of payload on the
+** original pages plus 16448 overflow pages each holding 1020 bytes of
+** data.
+*/
+#define MAX_BYTES_PER_ROW 1048576
+/* #define MAX_BYTES_PER_ROW 16777198 */
+
+/*
+** If memory allocation problems are found, recompile with
+**
+** -DMEMORY_DEBUG=1
+**
+** to enable some sanity checking on malloc() and free(). To
+** check for memory leaks, recompile with
+**
+** -DMEMORY_DEBUG=2
+**
+** and a line of text will be written to standard error for
+** each malloc() and free(). This output can be analyzed
+** by an AWK script to determine if there are any leaks.
+*/
+#ifdef MEMORY_DEBUG
+# define sqliteMalloc(X) sqliteMalloc_(X,1,__FILE__,__LINE__)
+# define sqliteMallocRaw(X) sqliteMalloc_(X,0,__FILE__,__LINE__)
+# define sqliteFree(X) sqliteFree_(X,__FILE__,__LINE__)
+# define sqliteRealloc(X,Y) sqliteRealloc_(X,Y,__FILE__,__LINE__)
+# define sqliteStrDup(X) sqliteStrDup_(X,__FILE__,__LINE__)
+# define sqliteStrNDup(X,Y) sqliteStrNDup_(X,Y,__FILE__,__LINE__)
+ void sqliteStrRealloc(char**);
+#else
+# define sqliteRealloc_(X,Y) sqliteRealloc(X,Y)
+# define sqliteStrRealloc(X)
+#endif
+
+/*
+** This variable gets set if malloc() ever fails. After it gets set,
+** the SQLite library shuts down permanently.
+*/
+extern int sqlite_malloc_failed;
+
+/*
+** The following global variables are used for testing and debugging
+** only. They only work if MEMORY_DEBUG is defined.
+*/
+#ifdef MEMORY_DEBUG
+extern int sqlite_nMalloc; /* Number of sqliteMalloc() calls */
+extern int sqlite_nFree; /* Number of sqliteFree() calls */
+extern int sqlite_iMallocFail; /* Fail sqliteMalloc() after this many calls */
+#endif
+
+/*
+** Name of the master database table. The master database table
+** is a special table that holds the names and attributes of all
+** user tables and indices.
+*/
+#define MASTER_NAME "sqlite_master"
+#define TEMP_MASTER_NAME "sqlite_temp_master"
+
+/*
+** The name of the schema table.
+*/
+#define SCHEMA_TABLE(x) (x?TEMP_MASTER_NAME:MASTER_NAME)
+
+/*
+** A convenience macro that returns the number of elements in
+** an array.
+*/
+#define ArraySize(X) (sizeof(X)/sizeof(X[0]))
+
+/*
+** Forward references to structures
+*/
+typedef struct Column Column;
+typedef struct Table Table;
+typedef struct Index Index;
+typedef struct Instruction Instruction;
+typedef struct Expr Expr;
+typedef struct ExprList ExprList;
+typedef struct Parse Parse;
+typedef struct Token Token;
+typedef struct IdList IdList;
+typedef struct SrcList SrcList;
+typedef struct WhereInfo WhereInfo;
+typedef struct WhereLevel WhereLevel;
+typedef struct Select Select;
+typedef struct AggExpr AggExpr;
+typedef struct FuncDef FuncDef;
+typedef struct Trigger Trigger;
+typedef struct TriggerStep TriggerStep;
+typedef struct TriggerStack TriggerStack;
+typedef struct FKey FKey;
+typedef struct Db Db;
+typedef struct AuthContext AuthContext;
+
+/*
+** Each database file to be accessed by the system is an instance
+** of the following structure. There are normally two of these structures
+** in the sqlite.aDb[] array. aDb[0] is the main database file and
+** aDb[1] is the database file used to hold temporary tables. Additional
+** databases may be attached.
+*/
+struct Db {
+ char *zName; /* Name of this database */
+ Btree *pBt; /* The B*Tree structure for this database file */
+ int schema_cookie; /* Database schema version number for this file */
+ Hash tblHash; /* All tables indexed by name */
+ Hash idxHash; /* All (named) indices indexed by name */
+ Hash trigHash; /* All triggers indexed by name */
+ Hash aFKey; /* Foreign keys indexed by to-table */
+ u8 inTrans; /* 0: not writable. 1: Transaction. 2: Checkpoint */
+ u16 flags; /* Flags associated with this database */
+ void *pAux; /* Auxiliary data. Usually NULL */
+ void (*xFreeAux)(void*); /* Routine to free pAux */
+};
+
+/*
+** These macros can be used to test, set, or clear bits in the
+** Db.flags field.
+*/
+#define DbHasProperty(D,I,P) (((D)->aDb[I].flags&(P))==(P))
+#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].flags&(P))!=0)
+#define DbSetProperty(D,I,P) (D)->aDb[I].flags|=(P)
+#define DbClearProperty(D,I,P) (D)->aDb[I].flags&=~(P)
+
+/*
+** Allowed values for the DB.flags field.
+**
+** The DB_Locked flag is set when the first OP_Transaction or OP_Checkpoint
+** opcode is emitted for a database. This prevents multiple occurances
+** of those opcodes for the same database in the same program. Similarly,
+** the DB_Cookie flag is set when the OP_VerifyCookie opcode is emitted,
+** and prevents duplicate OP_VerifyCookies from taking up space and slowing
+** down execution.
+**
+** The DB_SchemaLoaded flag is set after the database schema has been
+** read into internal hash tables.
+**
+** DB_UnresetViews means that one or more views have column names that
+** have been filled out. If the schema changes, these column names might
+** changes and so the view will need to be reset.
+*/
+#define DB_Locked 0x0001 /* OP_Transaction opcode has been emitted */
+#define DB_Cookie 0x0002 /* OP_VerifyCookie opcode has been emiited */
+#define DB_SchemaLoaded 0x0004 /* The schema has been loaded */
+#define DB_UnresetViews 0x0008 /* Some views have defined column names */
+
+
+/*
+** Each database is an instance of the following structure.
+**
+** The sqlite.file_format is initialized by the database file
+** and helps determines how the data in the database file is
+** represented. This field allows newer versions of the library
+** to read and write older databases. The various file formats
+** are as follows:
+**
+** file_format==1 Version 2.1.0.
+** file_format==2 Version 2.2.0. Add support for INTEGER PRIMARY KEY.
+** file_format==3 Version 2.6.0. Fix empty-string index bug.
+** file_format==4 Version 2.7.0. Add support for separate numeric and
+** text datatypes.
+**
+** The sqlite.temp_store determines where temporary database files
+** are stored. If 1, then a file is created to hold those tables. If
+** 2, then they are held in memory. 0 means use the default value in
+** the TEMP_STORE macro.
+**
+** The sqlite.lastRowid records the last insert rowid generated by an
+** insert statement. Inserts on views do not affect its value. Each
+** trigger has its own context, so that lastRowid can be updated inside
+** triggers as usual. The previous value will be restored once the trigger
+** exits. Upon entering a before or instead of trigger, lastRowid is no
+** longer (since after version 2.8.12) reset to -1.
+**
+** The sqlite.nChange does not count changes within triggers and keeps no
+** context. It is reset at start of sqlite_exec.
+** The sqlite.lsChange represents the number of changes made by the last
+** insert, update, or delete statement. It remains constant throughout the
+** length of a statement and is then updated by OP_SetCounts. It keeps a
+** context stack just like lastRowid so that the count of changes
+** within a trigger is not seen outside the trigger. Changes to views do not
+** affect the value of lsChange.
+** The sqlite.csChange keeps track of the number of current changes (since
+** the last statement) and is used to update sqlite_lsChange.
+*/
+struct sqlite {
+ int nDb; /* Number of backends currently in use */
+ Db *aDb; /* All backends */
+ Db aDbStatic[2]; /* Static space for the 2 default backends */
+ int flags; /* Miscellanous flags. See below */
+ u8 file_format; /* What file format version is this database? */
+ u8 safety_level; /* How aggressive at synching data to disk */
+ u8 want_to_close; /* Close after all VDBEs are deallocated */
+ u8 temp_store; /* 1=file, 2=memory, 0=compile-time default */
+ u8 onError; /* Default conflict algorithm */
+ int next_cookie; /* Next value of aDb[0].schema_cookie */
+ int cache_size; /* Number of pages to use in the cache */
+ int nTable; /* Number of tables in the database */
+ void *pBusyArg; /* 1st Argument to the busy callback */
+ int (*xBusyCallback)(void *,const char*,int); /* The busy callback */
+ void *pCommitArg; /* Argument to xCommitCallback() */
+ int (*xCommitCallback)(void*);/* Invoked at every commit. */
+ Hash aFunc; /* All functions that can be in SQL exprs */
+ int lastRowid; /* ROWID of most recent insert (see above) */
+ int priorNewRowid; /* Last randomly generated ROWID */
+ int magic; /* Magic number for detect library misuse */
+ int nChange; /* Number of rows changed (see above) */
+ int lsChange; /* Last statement change count (see above) */
+ int csChange; /* Current statement change count (see above) */
+ struct sqliteInitInfo { /* Information used during initialization */
+ int iDb; /* When back is being initialized */
+ int newTnum; /* Rootpage of table being initialized */
+ u8 busy; /* TRUE if currently initializing */
+ } init;
+ struct Vdbe *pVdbe; /* List of active virtual machines */
+ void (*xTrace)(void*,const char*); /* Trace function */
+ void *pTraceArg; /* Argument to the trace function */
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*);
+ /* Access authorization function */
+ void *pAuthArg; /* 1st argument to the access auth function */
+#endif
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ int (*xProgress)(void *); /* The progress callback */
+ void *pProgressArg; /* Argument to the progress callback */
+ int nProgressOps; /* Number of opcodes for progress callback */
+#endif
+};
+
+/*
+** Possible values for the sqlite.flags and or Db.flags fields.
+**
+** On sqlite.flags, the SQLITE_InTrans value means that we have
+** executed a BEGIN. On Db.flags, SQLITE_InTrans means a statement
+** transaction is active on that particular database file.
+*/
+#define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */
+#define SQLITE_Initialized 0x00000002 /* True after initialization */
+#define SQLITE_Interrupt 0x00000004 /* Cancel current operation */
+#define SQLITE_InTrans 0x00000008 /* True if in a transaction */
+#define SQLITE_InternChanges 0x00000010 /* Uncommitted Hash table changes */
+#define SQLITE_FullColNames 0x00000020 /* Show full column names on SELECT */
+#define SQLITE_ShortColNames 0x00000040 /* Show short columns names */
+#define SQLITE_CountRows 0x00000080 /* Count rows changed by INSERT, */
+ /* DELETE, or UPDATE and return */
+ /* the count using a callback. */
+#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */
+ /* result set is empty */
+#define SQLITE_ReportTypes 0x00000200 /* Include information on datatypes */
+ /* in 4th argument of callback */
+
+/*
+** Possible values for the sqlite.magic field.
+** The numbers are obtained at random and have no special meaning, other
+** than being distinct from one another.
+*/
+#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */
+#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */
+#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */
+#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */
+
+/*
+** Each SQL function is defined by an instance of the following
+** structure. A pointer to this structure is stored in the sqlite.aFunc
+** hash table. When multiple functions have the same name, the hash table
+** points to a linked list of these structures.
+*/
+struct FuncDef {
+ void (*xFunc)(sqlite_func*,int,const char**); /* Regular function */
+ void (*xStep)(sqlite_func*,int,const char**); /* Aggregate function step */
+ void (*xFinalize)(sqlite_func*); /* Aggregate function finializer */
+ signed char nArg; /* Number of arguments. -1 means unlimited */
+ signed char dataType; /* Arg that determines datatype. -1=NUMERIC, */
+ /* -2=TEXT. -3=SQLITE_ARGS */
+ u8 includeTypes; /* Add datatypes to args of xFunc and xStep */
+ void *pUserData; /* User data parameter */
+ FuncDef *pNext; /* Next function with same name */
+};
+
+/*
+** information about each column of an SQL table is held in an instance
+** of this structure.
+*/
+struct Column {
+ char *zName; /* Name of this column */
+ char *zDflt; /* Default value of this column */
+ char *zType; /* Data type for this column */
+ u8 notNull; /* True if there is a NOT NULL constraint */
+ u8 isPrimKey; /* True if this column is part of the PRIMARY KEY */
+ u8 sortOrder; /* Some combination of SQLITE_SO_... values */
+ u8 dottedName; /* True if zName contains a "." character */
+};
+
+/*
+** The allowed sort orders.
+**
+** The TEXT and NUM values use bits that do not overlap with DESC and ASC.
+** That way the two can be combined into a single number.
+*/
+#define SQLITE_SO_UNK 0 /* Use the default collating type. (SCT_NUM) */
+#define SQLITE_SO_TEXT 2 /* Sort using memcmp() */
+#define SQLITE_SO_NUM 4 /* Sort using sqliteCompare() */
+#define SQLITE_SO_TYPEMASK 6 /* Mask to extract the collating sequence */
+#define SQLITE_SO_ASC 0 /* Sort in ascending order */
+#define SQLITE_SO_DESC 1 /* Sort in descending order */
+#define SQLITE_SO_DIRMASK 1 /* Mask to extract the sort direction */
+
+/*
+** Each SQL table is represented in memory by an instance of the
+** following structure.
+**
+** Table.zName is the name of the table. The case of the original
+** CREATE TABLE statement is stored, but case is not significant for
+** comparisons.
+**
+** Table.nCol is the number of columns in this table. Table.aCol is a
+** pointer to an array of Column structures, one for each column.
+**
+** If the table has an INTEGER PRIMARY KEY, then Table.iPKey is the index of
+** the column that is that key. Otherwise Table.iPKey is negative. Note
+** that the datatype of the PRIMARY KEY must be INTEGER for this field to
+** be set. An INTEGER PRIMARY KEY is used as the rowid for each row of
+** the table. If a table has no INTEGER PRIMARY KEY, then a random rowid
+** is generated for each row of the table. Table.hasPrimKey is true if
+** the table has any PRIMARY KEY, INTEGER or otherwise.
+**
+** Table.tnum is the page number for the root BTree page of the table in the
+** database file. If Table.iDb is the index of the database table backend
+** in sqlite.aDb[]. 0 is for the main database and 1 is for the file that
+** holds temporary tables and indices. If Table.isTransient
+** is true, then the table is stored in a file that is automatically deleted
+** when the VDBE cursor to the table is closed. In this case Table.tnum
+** refers VDBE cursor number that holds the table open, not to the root
+** page number. Transient tables are used to hold the results of a
+** sub-query that appears instead of a real table name in the FROM clause
+** of a SELECT statement.
+*/
+struct Table {
+ char *zName; /* Name of the table */
+ int nCol; /* Number of columns in this table */
+ Column *aCol; /* Information about each column */
+ int iPKey; /* If not less then 0, use aCol[iPKey] as the primary key */
+ Index *pIndex; /* List of SQL indexes on this table. */
+ int tnum; /* Root BTree node for this table (see note above) */
+ Select *pSelect; /* NULL for tables. Points to definition if a view. */
+ u8 readOnly; /* True if this table should not be written by the user */
+ u8 iDb; /* Index into sqlite.aDb[] of the backend for this table */
+ u8 isTransient; /* True if automatically deleted when VDBE finishes */
+ u8 hasPrimKey; /* True if there exists a primary key */
+ u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */
+ Trigger *pTrigger; /* List of SQL triggers on this table */
+ FKey *pFKey; /* Linked list of all foreign keys in this table */
+};
+
+/*
+** Each foreign key constraint is an instance of the following structure.
+**
+** A foreign key is associated with two tables. The "from" table is
+** the table that contains the REFERENCES clause that creates the foreign
+** key. The "to" table is the table that is named in the REFERENCES clause.
+** Consider this example:
+**
+** CREATE TABLE ex1(
+** a INTEGER PRIMARY KEY,
+** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x)
+** );
+**
+** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2".
+**
+** Each REFERENCES clause generates an instance of the following structure
+** which is attached to the from-table. The to-table need not exist when
+** the from-table is created. The existance of the to-table is not checked
+** until an attempt is made to insert data into the from-table.
+**
+** The sqlite.aFKey hash table stores pointers to this structure
+** given the name of a to-table. For each to-table, all foreign keys
+** associated with that table are on a linked list using the FKey.pNextTo
+** field.
+*/
+struct FKey {
+ Table *pFrom; /* The table that constains the REFERENCES clause */
+ FKey *pNextFrom; /* Next foreign key in pFrom */
+ char *zTo; /* Name of table that the key points to */
+ FKey *pNextTo; /* Next foreign key that points to zTo */
+ int nCol; /* Number of columns in this key */
+ struct sColMap { /* Mapping of columns in pFrom to columns in zTo */
+ int iFrom; /* Index of column in pFrom */
+ char *zCol; /* Name of column in zTo. If 0 use PRIMARY KEY */
+ } *aCol; /* One entry for each of nCol column s */
+ u8 isDeferred; /* True if constraint checking is deferred till COMMIT */
+ u8 updateConf; /* How to resolve conflicts that occur on UPDATE */
+ u8 deleteConf; /* How to resolve conflicts that occur on DELETE */
+ u8 insertConf; /* How to resolve conflicts that occur on INSERT */
+};
+
+/*
+** SQLite supports many different ways to resolve a contraint
+** error. ROLLBACK processing means that a constraint violation
+** causes the operation in process to fail and for the current transaction
+** to be rolled back. ABORT processing means the operation in process
+** fails and any prior changes from that one operation are backed out,
+** but the transaction is not rolled back. FAIL processing means that
+** the operation in progress stops and returns an error code. But prior
+** changes due to the same operation are not backed out and no rollback
+** occurs. IGNORE means that the particular row that caused the constraint
+** error is not inserted or updated. Processing continues and no error
+** is returned. REPLACE means that preexisting database rows that caused
+** a UNIQUE constraint violation are removed so that the new insert or
+** update can proceed. Processing continues and no error is reported.
+**
+** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys.
+** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the
+** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign
+** key is set to NULL. CASCADE means that a DELETE or UPDATE of the
+** referenced table row is propagated into the row that holds the
+** foreign key.
+**
+** The following symbolic values are used to record which type
+** of action to take.
+*/
+#define OE_None 0 /* There is no constraint to check */
+#define OE_Rollback 1 /* Fail the operation and rollback the transaction */
+#define OE_Abort 2 /* Back out changes but do no rollback transaction */
+#define OE_Fail 3 /* Stop the operation but leave all prior changes */
+#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */
+#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */
+
+#define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */
+#define OE_SetNull 7 /* Set the foreign key value to NULL */
+#define OE_SetDflt 8 /* Set the foreign key value to its default */
+#define OE_Cascade 9 /* Cascade the changes */
+
+#define OE_Default 99 /* Do whatever the default action is */
+
+/*
+** Each SQL index is represented in memory by an
+** instance of the following structure.
+**
+** The columns of the table that are to be indexed are described
+** by the aiColumn[] field of this structure. For example, suppose
+** we have the following table and index:
+**
+** CREATE TABLE Ex1(c1 int, c2 int, c3 text);
+** CREATE INDEX Ex2 ON Ex1(c3,c1);
+**
+** In the Table structure describing Ex1, nCol==3 because there are
+** three columns in the table. In the Index structure describing
+** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed.
+** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the
+** first column to be indexed (c3) has an index of 2 in Ex1.aCol[].
+** The second column to be indexed (c1) has an index of 0 in
+** Ex1.aCol[], hence Ex2.aiColumn[1]==0.
+**
+** The Index.onError field determines whether or not the indexed columns
+** must be unique and what to do if they are not. When Index.onError=OE_None,
+** it means this is not a unique index. Otherwise it is a unique index
+** and the value of Index.onError indicate the which conflict resolution
+** algorithm to employ whenever an attempt is made to insert a non-unique
+** element.
+*/
+struct Index {
+ char *zName; /* Name of this index */
+ int nColumn; /* Number of columns in the table used by this index */
+ int *aiColumn; /* Which columns are used by this index. 1st is 0 */
+ Table *pTable; /* The SQL table being indexed */
+ int tnum; /* Page containing root of this index in database file */
+ u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */
+ u8 autoIndex; /* True if is automatically created (ex: by UNIQUE) */
+ u8 iDb; /* Index in sqlite.aDb[] of where this index is stored */
+ Index *pNext; /* The next index associated with the same table */
+};
+
+/*
+** Each token coming out of the lexer is an instance of
+** this structure. Tokens are also used as part of an expression.
+**
+** Note if Token.z==0 then Token.dyn and Token.n are undefined and
+** may contain random values. Do not make any assuptions about Token.dyn
+** and Token.n when Token.z==0.
+*/
+struct Token {
+ const char *z; /* Text of the token. Not NULL-terminated! */
+ unsigned dyn : 1; /* True for malloced memory, false for static */
+ unsigned n : 31; /* Number of characters in this token */
+};
+
+/*
+** Each node of an expression in the parse tree is an instance
+** of this structure.
+**
+** Expr.op is the opcode. The integer parser token codes are reused
+** as opcodes here. For example, the parser defines TK_GE to be an integer
+** code representing the ">=" operator. This same integer code is reused
+** to represent the greater-than-or-equal-to operator in the expression
+** tree.
+**
+** Expr.pRight and Expr.pLeft are subexpressions. Expr.pList is a list
+** of argument if the expression is a function.
+**
+** Expr.token is the operator token for this node. For some expressions
+** that have subexpressions, Expr.token can be the complete text that gave
+** rise to the Expr. In the latter case, the token is marked as being
+** a compound token.
+**
+** An expression of the form ID or ID.ID refers to a column in a table.
+** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is
+** the integer cursor number of a VDBE cursor pointing to that table and
+** Expr.iColumn is the column number for the specific column. If the
+** expression is used as a result in an aggregate SELECT, then the
+** value is also stored in the Expr.iAgg column in the aggregate so that
+** it can be accessed after all aggregates are computed.
+**
+** If the expression is a function, the Expr.iTable is an integer code
+** representing which function. If the expression is an unbound variable
+** marker (a question mark character '?' in the original SQL) then the
+** Expr.iTable holds the index number for that variable.
+**
+** The Expr.pSelect field points to a SELECT statement. The SELECT might
+** be the right operand of an IN operator. Or, if a scalar SELECT appears
+** in an expression the opcode is TK_SELECT and Expr.pSelect is the only
+** operand.
+*/
+struct Expr {
+ u8 op; /* Operation performed by this node */
+ u8 dataType; /* Either SQLITE_SO_TEXT or SQLITE_SO_NUM */
+ u8 iDb; /* Database referenced by this expression */
+ u8 flags; /* Various flags. See below */
+ Expr *pLeft, *pRight; /* Left and right subnodes */
+ ExprList *pList; /* A list of expressions used as function arguments
+ ** or in "<expr> IN (<expr-list)" */
+ Token token; /* An operand token */
+ Token span; /* Complete text of the expression */
+ int iTable, iColumn; /* When op==TK_COLUMN, then this expr node means the
+ ** iColumn-th field of the iTable-th table. */
+ int iAgg; /* When op==TK_COLUMN and pParse->useAgg==TRUE, pull
+ ** result from the iAgg-th element of the aggregator */
+ Select *pSelect; /* When the expression is a sub-select. Also the
+ ** right side of "<expr> IN (<select>)" */
+};
+
+/*
+** The following are the meanings of bits in the Expr.flags field.
+*/
+#define EP_FromJoin 0x0001 /* Originated in ON or USING clause of a join */
+
+/*
+** These macros can be used to test, set, or clear bits in the
+** Expr.flags field.
+*/
+#define ExprHasProperty(E,P) (((E)->flags&(P))==(P))
+#define ExprHasAnyProperty(E,P) (((E)->flags&(P))!=0)
+#define ExprSetProperty(E,P) (E)->flags|=(P)
+#define ExprClearProperty(E,P) (E)->flags&=~(P)
+
+/*
+** A list of expressions. Each expression may optionally have a
+** name. An expr/name combination can be used in several ways, such
+** as the list of "expr AS ID" fields following a "SELECT" or in the
+** list of "ID = expr" items in an UPDATE. A list of expressions can
+** also be used as the argument to a function, in which case the a.zName
+** field is not used.
+*/
+struct ExprList {
+ int nExpr; /* Number of expressions on the list */
+ int nAlloc; /* Number of entries allocated below */
+ struct ExprList_item {
+ Expr *pExpr; /* The list of expressions */
+ char *zName; /* Token associated with this expression */
+ u8 sortOrder; /* 1 for DESC or 0 for ASC */
+ u8 isAgg; /* True if this is an aggregate like count(*) */
+ u8 done; /* A flag to indicate when processing is finished */
+ } *a; /* One entry for each expression */
+};
+
+/*
+** An instance of this structure can hold a simple list of identifiers,
+** such as the list "a,b,c" in the following statements:
+**
+** INSERT INTO t(a,b,c) VALUES ...;
+** CREATE INDEX idx ON t(a,b,c);
+** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...;
+**
+** The IdList.a.idx field is used when the IdList represents the list of
+** column names after a table name in an INSERT statement. In the statement
+**
+** INSERT INTO t(a,b,c) ...
+**
+** If "a" is the k-th column of table "t", then IdList.a[0].idx==k.
+*/
+struct IdList {
+ int nId; /* Number of identifiers on the list */
+ int nAlloc; /* Number of entries allocated for a[] below */
+ struct IdList_item {
+ char *zName; /* Name of the identifier */
+ int idx; /* Index in some Table.aCol[] of a column named zName */
+ } *a;
+};
+
+/*
+** The following structure describes the FROM clause of a SELECT statement.
+** Each table or subquery in the FROM clause is a separate element of
+** the SrcList.a[] array.
+**
+** With the addition of multiple database support, the following structure
+** can also be used to describe a particular table such as the table that
+** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL,
+** such a table must be a simple name: ID. But in SQLite, the table can
+** now be identified by a database name, a dot, then the table name: ID.ID.
+*/
+struct SrcList {
+ i16 nSrc; /* Number of tables or subqueries in the FROM clause */
+ i16 nAlloc; /* Number of entries allocated in a[] below */
+ struct SrcList_item {
+ char *zDatabase; /* Name of database holding this table */
+ char *zName; /* Name of the table */
+ char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */
+ Table *pTab; /* An SQL table corresponding to zName */
+ Select *pSelect; /* A SELECT statement used in place of a table name */
+ int jointype; /* Type of join between this table and the next */
+ int iCursor; /* The VDBE cursor number used to access this table */
+ Expr *pOn; /* The ON clause of a join */
+ IdList *pUsing; /* The USING clause of a join */
+ } a[1]; /* One entry for each identifier on the list */
+};
+
+/*
+** Permitted values of the SrcList.a.jointype field
+*/
+#define JT_INNER 0x0001 /* Any kind of inner or cross join */
+#define JT_NATURAL 0x0002 /* True for a "natural" join */
+#define JT_LEFT 0x0004 /* Left outer join */
+#define JT_RIGHT 0x0008 /* Right outer join */
+#define JT_OUTER 0x0010 /* The "OUTER" keyword is present */
+#define JT_ERROR 0x0020 /* unknown or unsupported join type */
+
+/*
+** For each nested loop in a WHERE clause implementation, the WhereInfo
+** structure contains a single instance of this structure. This structure
+** is intended to be private the the where.c module and should not be
+** access or modified by other modules.
+*/
+struct WhereLevel {
+ int iMem; /* Memory cell used by this level */
+ Index *pIdx; /* Index used */
+ int iCur; /* Cursor number used for this index */
+ int score; /* How well this indexed scored */
+ int brk; /* Jump here to break out of the loop */
+ int cont; /* Jump here to continue with the next loop cycle */
+ int op, p1, p2; /* Opcode used to terminate the loop */
+ int iLeftJoin; /* Memory cell used to implement LEFT OUTER JOIN */
+ int top; /* First instruction of interior of the loop */
+ int inOp, inP1, inP2;/* Opcode used to implement an IN operator */
+ int bRev; /* Do the scan in the reverse direction */
+};
+
+/*
+** The WHERE clause processing routine has two halves. The
+** first part does the start of the WHERE loop and the second
+** half does the tail of the WHERE loop. An instance of
+** this structure is returned by the first half and passed
+** into the second half to give some continuity.
+*/
+struct WhereInfo {
+ Parse *pParse;
+ SrcList *pTabList; /* List of tables in the join */
+ int iContinue; /* Jump here to continue with next record */
+ int iBreak; /* Jump here to break out of the loop */
+ int nLevel; /* Number of nested loop */
+ int savedNTab; /* Value of pParse->nTab before WhereBegin() */
+ int peakNTab; /* Value of pParse->nTab after WhereBegin() */
+ WhereLevel a[1]; /* Information about each nest loop in the WHERE */
+};
+
+/*
+** An instance of the following structure contains all information
+** needed to generate code for a single SELECT statement.
+**
+** The zSelect field is used when the Select structure must be persistent.
+** Normally, the expression tree points to tokens in the original input
+** string that encodes the select. But if the Select structure must live
+** longer than its input string (for example when it is used to describe
+** a VIEW) we have to make a copy of the input string so that the nodes
+** of the expression tree will have something to point to. zSelect is used
+** to hold that copy.
+**
+** nLimit is set to -1 if there is no LIMIT clause. nOffset is set to 0.
+** If there is a LIMIT clause, the parser sets nLimit to the value of the
+** limit and nOffset to the value of the offset (or 0 if there is not
+** offset). But later on, nLimit and nOffset become the memory locations
+** in the VDBE that record the limit and offset counters.
+*/
+struct Select {
+ ExprList *pEList; /* The fields of the result */
+ u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */
+ u8 isDistinct; /* True if the DISTINCT keyword is present */
+ SrcList *pSrc; /* The FROM clause */
+ Expr *pWhere; /* The WHERE clause */
+ ExprList *pGroupBy; /* The GROUP BY clause */
+ Expr *pHaving; /* The HAVING clause */
+ ExprList *pOrderBy; /* The ORDER BY clause */
+ Select *pPrior; /* Prior select in a compound select statement */
+ int nLimit, nOffset; /* LIMIT and OFFSET values. -1 means not used */
+ int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */
+ char *zSelect; /* Complete text of the SELECT command */
+};
+
+/*
+** The results of a select can be distributed in several ways.
+*/
+#define SRT_Callback 1 /* Invoke a callback with each row of result */
+#define SRT_Mem 2 /* Store result in a memory cell */
+#define SRT_Set 3 /* Store result as unique keys in a table */
+#define SRT_Union 5 /* Store result as keys in a table */
+#define SRT_Except 6 /* Remove result from a UNION table */
+#define SRT_Table 7 /* Store result as data with a unique key */
+#define SRT_TempTable 8 /* Store result in a trasient table */
+#define SRT_Discard 9 /* Do not save the results anywhere */
+#define SRT_Sorter 10 /* Store results in the sorter */
+#define SRT_Subroutine 11 /* Call a subroutine to handle results */
+
+/*
+** When a SELECT uses aggregate functions (like "count(*)" or "avg(f1)")
+** we have to do some additional analysis of expressions. An instance
+** of the following structure holds information about a single subexpression
+** somewhere in the SELECT statement. An array of these structures holds
+** all the information we need to generate code for aggregate
+** expressions.
+**
+** Note that when analyzing a SELECT containing aggregates, both
+** non-aggregate field variables and aggregate functions are stored
+** in the AggExpr array of the Parser structure.
+**
+** The pExpr field points to an expression that is part of either the
+** field list, the GROUP BY clause, the HAVING clause or the ORDER BY
+** clause. The expression will be freed when those clauses are cleaned
+** up. Do not try to delete the expression attached to AggExpr.pExpr.
+**
+** If AggExpr.pExpr==0, that means the expression is "count(*)".
+*/
+struct AggExpr {
+ int isAgg; /* if TRUE contains an aggregate function */
+ Expr *pExpr; /* The expression */
+ FuncDef *pFunc; /* Information about the aggregate function */
+};
+
+/*
+** An SQL parser context. A copy of this structure is passed through
+** the parser and down into all the parser action routine in order to
+** carry around information that is global to the entire parse.
+*/
+struct Parse {
+ sqlite *db; /* The main database structure */
+ int rc; /* Return code from execution */
+ char *zErrMsg; /* An error message */
+ Token sErrToken; /* The token at which the error occurred */
+ Token sFirstToken; /* The first token parsed */
+ Token sLastToken; /* The last token parsed */
+ const char *zTail; /* All SQL text past the last semicolon parsed */
+ Table *pNewTable; /* A table being constructed by CREATE TABLE */
+ Vdbe *pVdbe; /* An engine for executing database bytecode */
+ u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */
+ u8 explain; /* True if the EXPLAIN flag is found on the query */
+ u8 nameClash; /* A permanent table name clashes with temp table name */
+ u8 useAgg; /* If true, extract field values from the aggregator
+ ** while generating expressions. Normally false */
+ int nErr; /* Number of errors seen */
+ int nTab; /* Number of previously allocated VDBE cursors */
+ int nMem; /* Number of memory cells used so far */
+ int nSet; /* Number of sets used so far */
+ int nAgg; /* Number of aggregate expressions */
+ int nVar; /* Number of '?' variables seen in the SQL so far */
+ AggExpr *aAgg; /* An array of aggregate expressions */
+ const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */
+ Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */
+ TriggerStack *trigStack; /* Trigger actions being coded */
+};
+
+/*
+** An instance of the following structure can be declared on a stack and used
+** to save the Parse.zAuthContext value so that it can be restored later.
+*/
+struct AuthContext {
+ const char *zAuthContext; /* Put saved Parse.zAuthContext here */
+ Parse *pParse; /* The Parse structure */
+};
+
+/*
+** Bitfield flags for P2 value in OP_PutIntKey and OP_Delete
+*/
+#define OPFLAG_NCHANGE 1 /* Set to update db->nChange */
+#define OPFLAG_LASTROWID 2 /* Set to update db->lastRowid */
+#define OPFLAG_CSCHANGE 4 /* Set to update db->csChange */
+
+/*
+ * Each trigger present in the database schema is stored as an instance of
+ * struct Trigger.
+ *
+ * Pointers to instances of struct Trigger are stored in two ways.
+ * 1. In the "trigHash" hash table (part of the sqlite* that represents the
+ * database). This allows Trigger structures to be retrieved by name.
+ * 2. All triggers associated with a single table form a linked list, using the
+ * pNext member of struct Trigger. A pointer to the first element of the
+ * linked list is stored as the "pTrigger" member of the associated
+ * struct Table.
+ *
+ * The "step_list" member points to the first element of a linked list
+ * containing the SQL statements specified as the trigger program.
+ */
+struct Trigger {
+ char *name; /* The name of the trigger */
+ char *table; /* The table or view to which the trigger applies */
+ u8 iDb; /* Database containing this trigger */
+ u8 iTabDb; /* Database containing Trigger.table */
+ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */
+ u8 tr_tm; /* One of TK_BEFORE, TK_AFTER */
+ Expr *pWhen; /* The WHEN clause of the expresion (may be NULL) */
+ IdList *pColumns; /* If this is an UPDATE OF <column-list> trigger,
+ the <column-list> is stored here */
+ int foreach; /* One of TK_ROW or TK_STATEMENT */
+ Token nameToken; /* Token containing zName. Use during parsing only */
+
+ TriggerStep *step_list; /* Link list of trigger program steps */
+ Trigger *pNext; /* Next trigger associated with the table */
+};
+
+/*
+ * An instance of struct TriggerStep is used to store a single SQL statement
+ * that is a part of a trigger-program.
+ *
+ * Instances of struct TriggerStep are stored in a singly linked list (linked
+ * using the "pNext" member) referenced by the "step_list" member of the
+ * associated struct Trigger instance. The first element of the linked list is
+ * the first step of the trigger-program.
+ *
+ * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or
+ * "SELECT" statement. The meanings of the other members is determined by the
+ * value of "op" as follows:
+ *
+ * (op == TK_INSERT)
+ * orconf -> stores the ON CONFLICT algorithm
+ * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then
+ * this stores a pointer to the SELECT statement. Otherwise NULL.
+ * target -> A token holding the name of the table to insert into.
+ * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then
+ * this stores values to be inserted. Otherwise NULL.
+ * pIdList -> If this is an INSERT INTO ... (<column-names>) VALUES ...
+ * statement, then this stores the column-names to be
+ * inserted into.
+ *
+ * (op == TK_DELETE)
+ * target -> A token holding the name of the table to delete from.
+ * pWhere -> The WHERE clause of the DELETE statement if one is specified.
+ * Otherwise NULL.
+ *
+ * (op == TK_UPDATE)
+ * target -> A token holding the name of the table to update rows of.
+ * pWhere -> The WHERE clause of the UPDATE statement if one is specified.
+ * Otherwise NULL.
+ * pExprList -> A list of the columns to update and the expressions to update
+ * them to. See sqliteUpdate() documentation of "pChanges"
+ * argument.
+ *
+ */
+struct TriggerStep {
+ int op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */
+ int orconf; /* OE_Rollback etc. */
+ Trigger *pTrig; /* The trigger that this step is a part of */
+
+ Select *pSelect; /* Valid for SELECT and sometimes
+ INSERT steps (when pExprList == 0) */
+ Token target; /* Valid for DELETE, UPDATE, INSERT steps */
+ Expr *pWhere; /* Valid for DELETE, UPDATE steps */
+ ExprList *pExprList; /* Valid for UPDATE statements and sometimes
+ INSERT steps (when pSelect == 0) */
+ IdList *pIdList; /* Valid for INSERT statements only */
+
+ TriggerStep * pNext; /* Next in the link-list */
+};
+
+/*
+ * An instance of struct TriggerStack stores information required during code
+ * generation of a single trigger program. While the trigger program is being
+ * coded, its associated TriggerStack instance is pointed to by the
+ * "pTriggerStack" member of the Parse structure.
+ *
+ * The pTab member points to the table that triggers are being coded on. The
+ * newIdx member contains the index of the vdbe cursor that points at the temp
+ * table that stores the new.* references. If new.* references are not valid
+ * for the trigger being coded (for example an ON DELETE trigger), then newIdx
+ * is set to -1. The oldIdx member is analogous to newIdx, for old.* references.
+ *
+ * The ON CONFLICT policy to be used for the trigger program steps is stored
+ * as the orconf member. If this is OE_Default, then the ON CONFLICT clause
+ * specified for individual triggers steps is used.
+ *
+ * struct TriggerStack has a "pNext" member, to allow linked lists to be
+ * constructed. When coding nested triggers (triggers fired by other triggers)
+ * each nested trigger stores its parent trigger's TriggerStack as the "pNext"
+ * pointer. Once the nested trigger has been coded, the pNext value is restored
+ * to the pTriggerStack member of the Parse stucture and coding of the parent
+ * trigger continues.
+ *
+ * Before a nested trigger is coded, the linked list pointed to by the
+ * pTriggerStack is scanned to ensure that the trigger is not about to be coded
+ * recursively. If this condition is detected, the nested trigger is not coded.
+ */
+struct TriggerStack {
+ Table *pTab; /* Table that triggers are currently being coded on */
+ int newIdx; /* Index of vdbe cursor to "new" temp table */
+ int oldIdx; /* Index of vdbe cursor to "old" temp table */
+ int orconf; /* Current orconf policy */
+ int ignoreJump; /* where to jump to for a RAISE(IGNORE) */
+ Trigger *pTrigger; /* The trigger currently being coded */
+ TriggerStack *pNext; /* Next trigger down on the trigger stack */
+};
+
+/*
+** The following structure contains information used by the sqliteFix...
+** routines as they walk the parse tree to make database references
+** explicit.
+*/
+typedef struct DbFixer DbFixer;
+struct DbFixer {
+ Parse *pParse; /* The parsing context. Error messages written here */
+ const char *zDb; /* Make sure all objects are contained in this database */
+ const char *zType; /* Type of the container - used for error messages */
+ const Token *pName; /* Name of the container - used for error messages */
+};
+
+/*
+ * This global flag is set for performance testing of triggers. When it is set
+ * SQLite will perform the overhead of building new and old trigger references
+ * even when no triggers exist
+ */
+extern int always_code_trigger_setup;
+
+/*
+** Internal function prototypes
+*/
+int sqliteStrICmp(const char *, const char *);
+int sqliteStrNICmp(const char *, const char *, int);
+int sqliteHashNoCase(const char *, int);
+int sqliteIsNumber(const char*);
+int sqliteCompare(const char *, const char *);
+int sqliteSortCompare(const char *, const char *);
+void sqliteRealToSortable(double r, char *);
+#ifdef MEMORY_DEBUG
+ void *sqliteMalloc_(int,int,char*,int);
+ void sqliteFree_(void*,char*,int);
+ void *sqliteRealloc_(void*,int,char*,int);
+ char *sqliteStrDup_(const char*,char*,int);
+ char *sqliteStrNDup_(const char*, int,char*,int);
+ void sqliteCheckMemory(void*,int);
+#else
+ void *sqliteMalloc(int);
+ void *sqliteMallocRaw(int);
+ void sqliteFree(void*);
+ void *sqliteRealloc(void*,int);
+ char *sqliteStrDup(const char*);
+ char *sqliteStrNDup(const char*, int);
+# define sqliteCheckMemory(a,b)
+#endif
+char *sqliteMPrintf(const char*, ...);
+char *sqliteVMPrintf(const char*, va_list);
+void sqliteSetString(char **, const char *, ...);
+void sqliteSetNString(char **, ...);
+void sqliteErrorMsg(Parse*, const char*, ...);
+void sqliteDequote(char*);
+int sqliteKeywordCode(const char*, int);
+int sqliteRunParser(Parse*, const char*, char **);
+void sqliteExec(Parse*);
+Expr *sqliteExpr(int, Expr*, Expr*, Token*);
+void sqliteExprSpan(Expr*,Token*,Token*);
+Expr *sqliteExprFunction(ExprList*, Token*);
+void sqliteExprDelete(Expr*);
+ExprList *sqliteExprListAppend(ExprList*,Expr*,Token*);
+void sqliteExprListDelete(ExprList*);
+int sqliteInit(sqlite*, char**);
+void sqlitePragma(Parse*,Token*,Token*,int);
+void sqliteResetInternalSchema(sqlite*, int);
+void sqliteBeginParse(Parse*,int);
+void sqliteRollbackInternalChanges(sqlite*);
+void sqliteCommitInternalChanges(sqlite*);
+Table *sqliteResultSetOfSelect(Parse*,char*,Select*);
+void sqliteOpenMasterTable(Vdbe *v, int);
+void sqliteStartTable(Parse*,Token*,Token*,int,int);
+void sqliteAddColumn(Parse*,Token*);
+void sqliteAddNotNull(Parse*, int);
+void sqliteAddPrimaryKey(Parse*, IdList*, int);
+void sqliteAddColumnType(Parse*,Token*,Token*);
+void sqliteAddDefaultValue(Parse*,Token*,int);
+int sqliteCollateType(const char*, int);
+void sqliteAddCollateType(Parse*, int);
+void sqliteEndTable(Parse*,Token*,Select*);
+void sqliteCreateView(Parse*,Token*,Token*,Select*,int);
+int sqliteViewGetColumnNames(Parse*,Table*);
+void sqliteDropTable(Parse*, Token*, int);
+void sqliteDeleteTable(sqlite*, Table*);
+void sqliteInsert(Parse*, SrcList*, ExprList*, Select*, IdList*, int);
+IdList *sqliteIdListAppend(IdList*, Token*);
+int sqliteIdListIndex(IdList*,const char*);
+SrcList *sqliteSrcListAppend(SrcList*, Token*, Token*);
+void sqliteSrcListAddAlias(SrcList*, Token*);
+void sqliteSrcListAssignCursors(Parse*, SrcList*);
+void sqliteIdListDelete(IdList*);
+void sqliteSrcListDelete(SrcList*);
+void sqliteCreateIndex(Parse*,Token*,SrcList*,IdList*,int,Token*,Token*);
+void sqliteDropIndex(Parse*, SrcList*);
+void sqliteAddKeyType(Vdbe*, ExprList*);
+void sqliteAddIdxKeyType(Vdbe*, Index*);
+int sqliteSelect(Parse*, Select*, int, int, Select*, int, int*);
+Select *sqliteSelectNew(ExprList*,SrcList*,Expr*,ExprList*,Expr*,ExprList*,
+ int,int,int);
+void sqliteSelectDelete(Select*);
+void sqliteSelectUnbind(Select*);
+Table *sqliteSrcListLookup(Parse*, SrcList*);
+int sqliteIsReadOnly(Parse*, Table*, int);
+void sqliteDeleteFrom(Parse*, SrcList*, Expr*);
+void sqliteUpdate(Parse*, SrcList*, ExprList*, Expr*, int);
+WhereInfo *sqliteWhereBegin(Parse*, SrcList*, Expr*, int, ExprList**);
+void sqliteWhereEnd(WhereInfo*);
+void sqliteExprCode(Parse*, Expr*);
+int sqliteExprCodeExprList(Parse*, ExprList*, int);
+void sqliteExprIfTrue(Parse*, Expr*, int, int);
+void sqliteExprIfFalse(Parse*, Expr*, int, int);
+Table *sqliteFindTable(sqlite*,const char*, const char*);
+Table *sqliteLocateTable(Parse*,const char*, const char*);
+Index *sqliteFindIndex(sqlite*,const char*, const char*);
+void sqliteUnlinkAndDeleteIndex(sqlite*,Index*);
+void sqliteCopy(Parse*, SrcList*, Token*, Token*, int);
+void sqliteVacuum(Parse*, Token*);
+int sqliteRunVacuum(char**, sqlite*);
+int sqliteGlobCompare(const unsigned char*,const unsigned char*);
+int sqliteLikeCompare(const unsigned char*,const unsigned char*);
+char *sqliteTableNameFromToken(Token*);
+int sqliteExprCheck(Parse*, Expr*, int, int*);
+int sqliteExprType(Expr*);
+int sqliteExprCompare(Expr*, Expr*);
+int sqliteFuncId(Token*);
+int sqliteExprResolveIds(Parse*, SrcList*, ExprList*, Expr*);
+int sqliteExprAnalyzeAggregates(Parse*, Expr*);
+Vdbe *sqliteGetVdbe(Parse*);
+void sqliteRandomness(int, void*);
+void sqliteRollbackAll(sqlite*);
+void sqliteCodeVerifySchema(Parse*, int);
+void sqliteBeginTransaction(Parse*, int);
+void sqliteCommitTransaction(Parse*);
+void sqliteRollbackTransaction(Parse*);
+int sqliteExprIsConstant(Expr*);
+int sqliteExprIsInteger(Expr*, int*);
+int sqliteIsRowid(const char*);
+void sqliteGenerateRowDelete(sqlite*, Vdbe*, Table*, int, int);
+void sqliteGenerateRowIndexDelete(sqlite*, Vdbe*, Table*, int, char*);
+void sqliteGenerateConstraintChecks(Parse*,Table*,int,char*,int,int,int,int);
+void sqliteCompleteInsertion(Parse*, Table*, int, char*, int, int, int);
+int sqliteOpenTableAndIndices(Parse*, Table*, int);
+void sqliteBeginWriteOperation(Parse*, int, int);
+void sqliteEndWriteOperation(Parse*);
+Expr *sqliteExprDup(Expr*);
+void sqliteTokenCopy(Token*, Token*);
+ExprList *sqliteExprListDup(ExprList*);
+SrcList *sqliteSrcListDup(SrcList*);
+IdList *sqliteIdListDup(IdList*);
+Select *sqliteSelectDup(Select*);
+FuncDef *sqliteFindFunction(sqlite*,const char*,int,int,int);
+void sqliteRegisterBuiltinFunctions(sqlite*);
+void sqliteRegisterDateTimeFunctions(sqlite*);
+int sqliteSafetyOn(sqlite*);
+int sqliteSafetyOff(sqlite*);
+int sqliteSafetyCheck(sqlite*);
+void sqliteChangeCookie(sqlite*, Vdbe*);
+void sqliteBeginTrigger(Parse*, Token*,int,int,IdList*,SrcList*,int,Expr*,int);
+void sqliteFinishTrigger(Parse*, TriggerStep*, Token*);
+void sqliteDropTrigger(Parse*, SrcList*);
+void sqliteDropTriggerPtr(Parse*, Trigger*, int);
+int sqliteTriggersExist(Parse* , Trigger* , int , int , int, ExprList*);
+int sqliteCodeRowTrigger(Parse*, int, ExprList*, int, Table *, int, int,
+ int, int);
+void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*);
+void sqliteDeleteTriggerStep(TriggerStep*);
+TriggerStep *sqliteTriggerSelectStep(Select*);
+TriggerStep *sqliteTriggerInsertStep(Token*, IdList*, ExprList*, Select*, int);
+TriggerStep *sqliteTriggerUpdateStep(Token*, ExprList*, Expr*, int);
+TriggerStep *sqliteTriggerDeleteStep(Token*, Expr*);
+void sqliteDeleteTrigger(Trigger*);
+int sqliteJoinType(Parse*, Token*, Token*, Token*);
+void sqliteCreateForeignKey(Parse*, IdList*, Token*, IdList*, int);
+void sqliteDeferForeignKey(Parse*, int);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ void sqliteAuthRead(Parse*,Expr*,SrcList*);
+ int sqliteAuthCheck(Parse*,int, const char*, const char*, const char*);
+ void sqliteAuthContextPush(Parse*, AuthContext*, const char*);
+ void sqliteAuthContextPop(AuthContext*);
+#else
+# define sqliteAuthRead(a,b,c)
+# define sqliteAuthCheck(a,b,c,d,e) SQLITE_OK
+# define sqliteAuthContextPush(a,b,c)
+# define sqliteAuthContextPop(a) ((void)(a))
+#endif
+void sqliteAttach(Parse*, Token*, Token*, Token*);
+void sqliteDetach(Parse*, Token*);
+int sqliteBtreeFactory(const sqlite *db, const char *zFilename,
+ int mode, int nPg, Btree **ppBtree);
+int sqliteFixInit(DbFixer*, Parse*, int, const char*, const Token*);
+int sqliteFixSrcList(DbFixer*, SrcList*);
+int sqliteFixSelect(DbFixer*, Select*);
+int sqliteFixExpr(DbFixer*, Expr*);
+int sqliteFixExprList(DbFixer*, ExprList*);
+int sqliteFixTriggerStep(DbFixer*, TriggerStep*);
+double sqliteAtoF(const char *z, const char **);
+char *sqlite_snprintf(int,char*,const char*,...);
+int sqliteFitsIn32Bits(const char *);
diff --git a/usr/src/cmd/svc/configd/sqlite/src/table.c b/usr/src/cmd/svc/configd/sqlite/src/table.c
new file mode 100644
index 0000000000..1cbbcb3b14
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/table.c
@@ -0,0 +1,206 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the sqlite_get_table() and sqlite_free_table()
+** interface routines. These are just wrappers around the main
+** interface routine of sqlite_exec().
+**
+** These routines are in a separate files so that they will not be linked
+** if they are not used.
+*/
+#include <stdlib.h>
+#include <string.h>
+#include "sqliteInt.h"
+
+/*
+** This structure is used to pass data from sqlite_get_table() through
+** to the callback function is uses to build the result.
+*/
+typedef struct TabResult {
+ char **azResult;
+ char *zErrMsg;
+ int nResult;
+ int nAlloc;
+ int nRow;
+ int nColumn;
+ long nData;
+ int rc;
+} TabResult;
+
+/*
+** This routine is called once for each row in the result table. Its job
+** is to fill in the TabResult structure appropriately, allocating new
+** memory as necessary.
+*/
+static int sqlite_get_table_cb(void *pArg, int nCol, char **argv, char **colv){
+ TabResult *p = (TabResult*)pArg;
+ int need;
+ int i;
+ char *z;
+
+ /* Make sure there is enough space in p->azResult to hold everything
+ ** we need to remember from this invocation of the callback.
+ */
+ if( p->nRow==0 && argv!=0 ){
+ need = nCol*2;
+ }else{
+ need = nCol;
+ }
+ if( p->nData + need >= p->nAlloc ){
+ char **azNew;
+ p->nAlloc = p->nAlloc*2 + need + 1;
+ azNew = realloc( p->azResult, sizeof(char*)*p->nAlloc );
+ if( azNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ return 1;
+ }
+ p->azResult = azNew;
+ }
+
+ /* If this is the first row, then generate an extra row containing
+ ** the names of all columns.
+ */
+ if( p->nRow==0 ){
+ p->nColumn = nCol;
+ for(i=0; i<nCol; i++){
+ if( colv[i]==0 ){
+ z = 0;
+ }else{
+ z = malloc( strlen(colv[i])+1 );
+ if( z==0 ){
+ p->rc = SQLITE_NOMEM;
+ return 1;
+ }
+ strcpy(z, colv[i]);
+ }
+ p->azResult[p->nData++] = z;
+ }
+ }else if( p->nColumn!=nCol ){
+ sqliteSetString(&p->zErrMsg,
+ "sqlite_get_table() called with two or more incompatible queries",
+ (char*)0);
+ p->rc = SQLITE_ERROR;
+ return 1;
+ }
+
+ /* Copy over the row data
+ */
+ if( argv!=0 ){
+ for(i=0; i<nCol; i++){
+ if( argv[i]==0 ){
+ z = 0;
+ }else{
+ z = malloc( strlen(argv[i])+1 );
+ if( z==0 ){
+ p->rc = SQLITE_NOMEM;
+ return 1;
+ }
+ strcpy(z, argv[i]);
+ }
+ p->azResult[p->nData++] = z;
+ }
+ p->nRow++;
+ }
+ return 0;
+}
+
+/*
+** Query the database. But instead of invoking a callback for each row,
+** malloc() for space to hold the result and return the entire results
+** at the conclusion of the call.
+**
+** The result that is written to ***pazResult is held in memory obtained
+** from malloc(). But the caller cannot free this memory directly.
+** Instead, the entire table should be passed to sqlite_free_table() when
+** the calling procedure is finished using it.
+*/
+int sqlite_get_table(
+ sqlite *db, /* The database on which the SQL executes */
+ const char *zSql, /* The SQL to be executed */
+ char ***pazResult, /* Write the result table here */
+ int *pnRow, /* Write the number of rows in the result here */
+ int *pnColumn, /* Write the number of columns of result here */
+ char **pzErrMsg /* Write error messages here */
+){
+ int rc;
+ TabResult res;
+ if( pazResult==0 ){ return SQLITE_ERROR; }
+ *pazResult = 0;
+ if( pnColumn ) *pnColumn = 0;
+ if( pnRow ) *pnRow = 0;
+ res.zErrMsg = 0;
+ res.nResult = 0;
+ res.nRow = 0;
+ res.nColumn = 0;
+ res.nData = 1;
+ res.nAlloc = 20;
+ res.rc = SQLITE_OK;
+ res.azResult = malloc( sizeof(char*)*res.nAlloc );
+ if( res.azResult==0 ){
+ return SQLITE_NOMEM;
+ }
+ res.azResult[0] = 0;
+ rc = sqlite_exec(db, zSql, sqlite_get_table_cb, &res, pzErrMsg);
+ if( res.azResult ){
+ res.azResult[0] = (char*)res.nData;
+ }
+ if( rc==SQLITE_ABORT ){
+ sqlite_free_table(&res.azResult[1]);
+ if( res.zErrMsg ){
+ if( pzErrMsg ){
+ free(*pzErrMsg);
+ *pzErrMsg = res.zErrMsg;
+ sqliteStrRealloc(pzErrMsg);
+ }else{
+ sqliteFree(res.zErrMsg);
+ }
+ }
+ return res.rc;
+ }
+ sqliteFree(res.zErrMsg);
+ if( rc!=SQLITE_OK ){
+ sqlite_free_table(&res.azResult[1]);
+ return rc;
+ }
+ if( res.nAlloc>res.nData ){
+ char **azNew;
+ azNew = realloc( res.azResult, sizeof(char*)*(res.nData+1) );
+ if( azNew==0 ){
+ sqlite_free_table(&res.azResult[1]);
+ return SQLITE_NOMEM;
+ }
+ res.nAlloc = res.nData+1;
+ res.azResult = azNew;
+ }
+ *pazResult = &res.azResult[1];
+ if( pnColumn ) *pnColumn = res.nColumn;
+ if( pnRow ) *pnRow = res.nRow;
+ return rc;
+}
+
+/*
+** This routine frees the space the sqlite_get_table() malloced.
+*/
+void sqlite_free_table(
+ char **azResult /* Result returned from from sqlite_get_table() */
+){
+ if( azResult ){
+ int i, n;
+ azResult--;
+ if( azResult==0 ) return;
+ n = (int)(long)azResult[0];
+ for(i=1; i<n; i++){ if( azResult[i] ) free(azResult[i]); }
+ free(azResult);
+ }
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/tclsqlite.c b/usr/src/cmd/svc/configd/sqlite/src/tclsqlite.c
new file mode 100644
index 0000000000..85d2029b53
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/tclsqlite.c
@@ -0,0 +1,1296 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** A TCL Interface to SQLite
+**
+** $Id: tclsqlite.c,v 1.59.2.1 2004/06/19 11:57:40 drh Exp $
+*/
+#ifndef NO_TCL /* Omit this whole file if TCL is unavailable */
+
+#include "sqliteInt.h"
+#include "tcl.h"
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+/*
+** If TCL uses UTF-8 and SQLite is configured to use iso8859, then we
+** have to do a translation when going between the two. Set the
+** UTF_TRANSLATION_NEEDED macro to indicate that we need to do
+** this translation.
+*/
+#if defined(TCL_UTF_MAX) && !defined(SQLITE_UTF8)
+# define UTF_TRANSLATION_NEEDED 1
+#endif
+
+/*
+** New SQL functions can be created as TCL scripts. Each such function
+** is described by an instance of the following structure.
+*/
+typedef struct SqlFunc SqlFunc;
+struct SqlFunc {
+ Tcl_Interp *interp; /* The TCL interpret to execute the function */
+ char *zScript; /* The script to be run */
+ SqlFunc *pNext; /* Next function on the list of them all */
+};
+
+/*
+** There is one instance of this structure for each SQLite database
+** that has been opened by the SQLite TCL interface.
+*/
+typedef struct SqliteDb SqliteDb;
+struct SqliteDb {
+ sqlite *db; /* The "real" database structure */
+ Tcl_Interp *interp; /* The interpreter used for this database */
+ char *zBusy; /* The busy callback routine */
+ char *zCommit; /* The commit hook callback routine */
+ char *zTrace; /* The trace callback routine */
+ char *zProgress; /* The progress callback routine */
+ char *zAuth; /* The authorization callback routine */
+ SqlFunc *pFunc; /* List of SQL functions */
+ int rc; /* Return code of most recent sqlite_exec() */
+};
+
+/*
+** An instance of this structure passes information thru the sqlite
+** logic from the original TCL command into the callback routine.
+*/
+typedef struct CallbackData CallbackData;
+struct CallbackData {
+ Tcl_Interp *interp; /* The TCL interpreter */
+ char *zArray; /* The array into which data is written */
+ Tcl_Obj *pCode; /* The code to execute for each row */
+ int once; /* Set for first callback only */
+ int tcl_rc; /* Return code from TCL script */
+ int nColName; /* Number of entries in the azColName[] array */
+ char **azColName; /* Column names translated to UTF-8 */
+};
+
+#ifdef UTF_TRANSLATION_NEEDED
+/*
+** Called for each row of the result.
+**
+** This version is used when TCL expects UTF-8 data but the database
+** uses the ISO8859 format. A translation must occur from ISO8859 into
+** UTF-8.
+*/
+static int DbEvalCallback(
+ void *clientData, /* An instance of CallbackData */
+ int nCol, /* Number of columns in the result */
+ char ** azCol, /* Data for each column */
+ char ** azN /* Name for each column */
+){
+ CallbackData *cbData = (CallbackData*)clientData;
+ int i, rc;
+ Tcl_DString dCol;
+ Tcl_DStringInit(&dCol);
+ if( cbData->azColName==0 ){
+ assert( cbData->once );
+ cbData->once = 0;
+ if( cbData->zArray[0] ){
+ Tcl_SetVar2(cbData->interp, cbData->zArray, "*", "", 0);
+ }
+ cbData->azColName = malloc( nCol*sizeof(char*) );
+ if( cbData->azColName==0 ){ return 1; }
+ cbData->nColName = nCol;
+ for(i=0; i<nCol; i++){
+ Tcl_ExternalToUtfDString(NULL, azN[i], -1, &dCol);
+ cbData->azColName[i] = malloc( Tcl_DStringLength(&dCol) + 1 );
+ if( cbData->azColName[i] ){
+ strcpy(cbData->azColName[i], Tcl_DStringValue(&dCol));
+ }else{
+ return 1;
+ }
+ if( cbData->zArray[0] ){
+ Tcl_SetVar2(cbData->interp, cbData->zArray, "*",
+ Tcl_DStringValue(&dCol), TCL_LIST_ELEMENT|TCL_APPEND_VALUE);
+ if( azN[nCol]!=0 ){
+ Tcl_DString dType;
+ Tcl_DStringInit(&dType);
+ Tcl_DStringAppend(&dType, "typeof:", -1);
+ Tcl_DStringAppend(&dType, Tcl_DStringValue(&dCol), -1);
+ Tcl_DStringFree(&dCol);
+ Tcl_ExternalToUtfDString(NULL, azN[i+nCol], -1, &dCol);
+ Tcl_SetVar2(cbData->interp, cbData->zArray,
+ Tcl_DStringValue(&dType), Tcl_DStringValue(&dCol),
+ TCL_LIST_ELEMENT|TCL_APPEND_VALUE);
+ Tcl_DStringFree(&dType);
+ }
+ }
+
+ Tcl_DStringFree(&dCol);
+ }
+ }
+ if( azCol!=0 ){
+ if( cbData->zArray[0] ){
+ for(i=0; i<nCol; i++){
+ char *z = azCol[i];
+ if( z==0 ) z = "";
+ Tcl_DStringInit(&dCol);
+ Tcl_ExternalToUtfDString(NULL, z, -1, &dCol);
+ Tcl_SetVar2(cbData->interp, cbData->zArray, cbData->azColName[i],
+ Tcl_DStringValue(&dCol), 0);
+ Tcl_DStringFree(&dCol);
+ }
+ }else{
+ for(i=0; i<nCol; i++){
+ char *z = azCol[i];
+ if( z==0 ) z = "";
+ Tcl_DStringInit(&dCol);
+ Tcl_ExternalToUtfDString(NULL, z, -1, &dCol);
+ Tcl_SetVar(cbData->interp, cbData->azColName[i],
+ Tcl_DStringValue(&dCol), 0);
+ Tcl_DStringFree(&dCol);
+ }
+ }
+ }
+ rc = Tcl_EvalObj(cbData->interp, cbData->pCode);
+ if( rc==TCL_CONTINUE ) rc = TCL_OK;
+ cbData->tcl_rc = rc;
+ return rc!=TCL_OK;
+}
+#endif /* UTF_TRANSLATION_NEEDED */
+
+#ifndef UTF_TRANSLATION_NEEDED
+/*
+** Called for each row of the result.
+**
+** This version is used when either of the following is true:
+**
+** (1) This version of TCL uses UTF-8 and the data in the
+** SQLite database is already in the UTF-8 format.
+**
+** (2) This version of TCL uses ISO8859 and the data in the
+** SQLite database is already in the ISO8859 format.
+*/
+static int DbEvalCallback(
+ void *clientData, /* An instance of CallbackData */
+ int nCol, /* Number of columns in the result */
+ char ** azCol, /* Data for each column */
+ char ** azN /* Name for each column */
+){
+ CallbackData *cbData = (CallbackData*)clientData;
+ int i, rc;
+ if( azCol==0 || (cbData->once && cbData->zArray[0]) ){
+ Tcl_SetVar2(cbData->interp, cbData->zArray, "*", "", 0);
+ for(i=0; i<nCol; i++){
+ Tcl_SetVar2(cbData->interp, cbData->zArray, "*", azN[i],
+ TCL_LIST_ELEMENT|TCL_APPEND_VALUE);
+ if( azN[nCol] ){
+ char *z = sqlite_mprintf("typeof:%s", azN[i]);
+ Tcl_SetVar2(cbData->interp, cbData->zArray, z, azN[i+nCol],
+ TCL_LIST_ELEMENT|TCL_APPEND_VALUE);
+ sqlite_freemem(z);
+ }
+ }
+ cbData->once = 0;
+ }
+ if( azCol!=0 ){
+ if( cbData->zArray[0] ){
+ for(i=0; i<nCol; i++){
+ char *z = azCol[i];
+ if( z==0 ) z = "";
+ Tcl_SetVar2(cbData->interp, cbData->zArray, azN[i], z, 0);
+ }
+ }else{
+ for(i=0; i<nCol; i++){
+ char *z = azCol[i];
+ if( z==0 ) z = "";
+ Tcl_SetVar(cbData->interp, azN[i], z, 0);
+ }
+ }
+ }
+ rc = Tcl_EvalObj(cbData->interp, cbData->pCode);
+ if( rc==TCL_CONTINUE ) rc = TCL_OK;
+ cbData->tcl_rc = rc;
+ return rc!=TCL_OK;
+}
+#endif
+
+/*
+** This is an alternative callback for database queries. Instead
+** of invoking a TCL script to handle the result, this callback just
+** appends each column of the result to a list. After the query
+** is complete, the list is returned.
+*/
+static int DbEvalCallback2(
+ void *clientData, /* An instance of CallbackData */
+ int nCol, /* Number of columns in the result */
+ char ** azCol, /* Data for each column */
+ char ** azN /* Name for each column */
+){
+ Tcl_Obj *pList = (Tcl_Obj*)clientData;
+ int i;
+ if( azCol==0 ) return 0;
+ for(i=0; i<nCol; i++){
+ Tcl_Obj *pElem;
+ if( azCol[i] && *azCol[i] ){
+#ifdef UTF_TRANSLATION_NEEDED
+ Tcl_DString dCol;
+ Tcl_DStringInit(&dCol);
+ Tcl_ExternalToUtfDString(NULL, azCol[i], -1, &dCol);
+ pElem = Tcl_NewStringObj(Tcl_DStringValue(&dCol), -1);
+ Tcl_DStringFree(&dCol);
+#else
+ pElem = Tcl_NewStringObj(azCol[i], -1);
+#endif
+ }else{
+ pElem = Tcl_NewObj();
+ }
+ Tcl_ListObjAppendElement(0, pList, pElem);
+ }
+ return 0;
+}
+
+/*
+** This is a second alternative callback for database queries. A the
+** first column of the first row of the result is made the TCL result.
+*/
+static int DbEvalCallback3(
+ void *clientData, /* An instance of CallbackData */
+ int nCol, /* Number of columns in the result */
+ char ** azCol, /* Data for each column */
+ char ** azN /* Name for each column */
+){
+ Tcl_Interp *interp = (Tcl_Interp*)clientData;
+ Tcl_Obj *pElem;
+ if( azCol==0 ) return 1;
+ if( nCol==0 ) return 1;
+#ifdef UTF_TRANSLATION_NEEDED
+ {
+ Tcl_DString dCol;
+ Tcl_DStringInit(&dCol);
+ Tcl_ExternalToUtfDString(NULL, azCol[0], -1, &dCol);
+ pElem = Tcl_NewStringObj(Tcl_DStringValue(&dCol), -1);
+ Tcl_DStringFree(&dCol);
+ }
+#else
+ pElem = Tcl_NewStringObj(azCol[0], -1);
+#endif
+ Tcl_SetObjResult(interp, pElem);
+ return 1;
+}
+
+/*
+** Called when the command is deleted.
+*/
+static void DbDeleteCmd(void *db){
+ SqliteDb *pDb = (SqliteDb*)db;
+ sqlite_close(pDb->db);
+ while( pDb->pFunc ){
+ SqlFunc *pFunc = pDb->pFunc;
+ pDb->pFunc = pFunc->pNext;
+ Tcl_Free((char*)pFunc);
+ }
+ if( pDb->zBusy ){
+ Tcl_Free(pDb->zBusy);
+ }
+ if( pDb->zTrace ){
+ Tcl_Free(pDb->zTrace);
+ }
+ if( pDb->zAuth ){
+ Tcl_Free(pDb->zAuth);
+ }
+ Tcl_Free((char*)pDb);
+}
+
+/*
+** This routine is called when a database file is locked while trying
+** to execute SQL.
+*/
+static int DbBusyHandler(void *cd, const char *zTable, int nTries){
+ SqliteDb *pDb = (SqliteDb*)cd;
+ int rc;
+ char zVal[30];
+ char *zCmd;
+ Tcl_DString cmd;
+
+ Tcl_DStringInit(&cmd);
+ Tcl_DStringAppend(&cmd, pDb->zBusy, -1);
+ Tcl_DStringAppendElement(&cmd, zTable);
+ sprintf(zVal, " %d", nTries);
+ Tcl_DStringAppend(&cmd, zVal, -1);
+ zCmd = Tcl_DStringValue(&cmd);
+ rc = Tcl_Eval(pDb->interp, zCmd);
+ Tcl_DStringFree(&cmd);
+ if( rc!=TCL_OK || atoi(Tcl_GetStringResult(pDb->interp)) ){
+ return 0;
+ }
+ return 1;
+}
+
+/*
+** This routine is invoked as the 'progress callback' for the database.
+*/
+static int DbProgressHandler(void *cd){
+ SqliteDb *pDb = (SqliteDb*)cd;
+ int rc;
+
+ assert( pDb->zProgress );
+ rc = Tcl_Eval(pDb->interp, pDb->zProgress);
+ if( rc!=TCL_OK || atoi(Tcl_GetStringResult(pDb->interp)) ){
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** This routine is called by the SQLite trace handler whenever a new
+** block of SQL is executed. The TCL script in pDb->zTrace is executed.
+*/
+static void DbTraceHandler(void *cd, const char *zSql){
+ SqliteDb *pDb = (SqliteDb*)cd;
+ Tcl_DString str;
+
+ Tcl_DStringInit(&str);
+ Tcl_DStringAppend(&str, pDb->zTrace, -1);
+ Tcl_DStringAppendElement(&str, zSql);
+ Tcl_Eval(pDb->interp, Tcl_DStringValue(&str));
+ Tcl_DStringFree(&str);
+ Tcl_ResetResult(pDb->interp);
+}
+
+/*
+** This routine is called when a transaction is committed. The
+** TCL script in pDb->zCommit is executed. If it returns non-zero or
+** if it throws an exception, the transaction is rolled back instead
+** of being committed.
+*/
+static int DbCommitHandler(void *cd){
+ SqliteDb *pDb = (SqliteDb*)cd;
+ int rc;
+
+ rc = Tcl_Eval(pDb->interp, pDb->zCommit);
+ if( rc!=TCL_OK || atoi(Tcl_GetStringResult(pDb->interp)) ){
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** This routine is called to evaluate an SQL function implemented
+** using TCL script.
+*/
+static void tclSqlFunc(sqlite_func *context, int argc, const char **argv){
+ SqlFunc *p = sqlite_user_data(context);
+ Tcl_DString cmd;
+ int i;
+ int rc;
+
+ Tcl_DStringInit(&cmd);
+ Tcl_DStringAppend(&cmd, p->zScript, -1);
+ for(i=0; i<argc; i++){
+ Tcl_DStringAppendElement(&cmd, argv[i] ? argv[i] : "");
+ }
+ rc = Tcl_Eval(p->interp, Tcl_DStringValue(&cmd));
+ if( rc ){
+ sqlite_set_result_error(context, Tcl_GetStringResult(p->interp), -1);
+ }else{
+ sqlite_set_result_string(context, Tcl_GetStringResult(p->interp), -1);
+ }
+}
+#ifndef SQLITE_OMIT_AUTHORIZATION
+/*
+** This is the authentication function. It appends the authentication
+** type code and the two arguments to zCmd[] then invokes the result
+** on the interpreter. The reply is examined to determine if the
+** authentication fails or succeeds.
+*/
+static int auth_callback(
+ void *pArg,
+ int code,
+ const char *zArg1,
+ const char *zArg2,
+ const char *zArg3,
+ const char *zArg4
+){
+ char *zCode;
+ Tcl_DString str;
+ int rc;
+ const char *zReply;
+ SqliteDb *pDb = (SqliteDb*)pArg;
+
+ switch( code ){
+ case SQLITE_COPY : zCode="SQLITE_COPY"; break;
+ case SQLITE_CREATE_INDEX : zCode="SQLITE_CREATE_INDEX"; break;
+ case SQLITE_CREATE_TABLE : zCode="SQLITE_CREATE_TABLE"; break;
+ case SQLITE_CREATE_TEMP_INDEX : zCode="SQLITE_CREATE_TEMP_INDEX"; break;
+ case SQLITE_CREATE_TEMP_TABLE : zCode="SQLITE_CREATE_TEMP_TABLE"; break;
+ case SQLITE_CREATE_TEMP_TRIGGER: zCode="SQLITE_CREATE_TEMP_TRIGGER"; break;
+ case SQLITE_CREATE_TEMP_VIEW : zCode="SQLITE_CREATE_TEMP_VIEW"; break;
+ case SQLITE_CREATE_TRIGGER : zCode="SQLITE_CREATE_TRIGGER"; break;
+ case SQLITE_CREATE_VIEW : zCode="SQLITE_CREATE_VIEW"; break;
+ case SQLITE_DELETE : zCode="SQLITE_DELETE"; break;
+ case SQLITE_DROP_INDEX : zCode="SQLITE_DROP_INDEX"; break;
+ case SQLITE_DROP_TABLE : zCode="SQLITE_DROP_TABLE"; break;
+ case SQLITE_DROP_TEMP_INDEX : zCode="SQLITE_DROP_TEMP_INDEX"; break;
+ case SQLITE_DROP_TEMP_TABLE : zCode="SQLITE_DROP_TEMP_TABLE"; break;
+ case SQLITE_DROP_TEMP_TRIGGER : zCode="SQLITE_DROP_TEMP_TRIGGER"; break;
+ case SQLITE_DROP_TEMP_VIEW : zCode="SQLITE_DROP_TEMP_VIEW"; break;
+ case SQLITE_DROP_TRIGGER : zCode="SQLITE_DROP_TRIGGER"; break;
+ case SQLITE_DROP_VIEW : zCode="SQLITE_DROP_VIEW"; break;
+ case SQLITE_INSERT : zCode="SQLITE_INSERT"; break;
+ case SQLITE_PRAGMA : zCode="SQLITE_PRAGMA"; break;
+ case SQLITE_READ : zCode="SQLITE_READ"; break;
+ case SQLITE_SELECT : zCode="SQLITE_SELECT"; break;
+ case SQLITE_TRANSACTION : zCode="SQLITE_TRANSACTION"; break;
+ case SQLITE_UPDATE : zCode="SQLITE_UPDATE"; break;
+ case SQLITE_ATTACH : zCode="SQLITE_ATTACH"; break;
+ case SQLITE_DETACH : zCode="SQLITE_DETACH"; break;
+ default : zCode="????"; break;
+ }
+ Tcl_DStringInit(&str);
+ Tcl_DStringAppend(&str, pDb->zAuth, -1);
+ Tcl_DStringAppendElement(&str, zCode);
+ Tcl_DStringAppendElement(&str, zArg1 ? zArg1 : "");
+ Tcl_DStringAppendElement(&str, zArg2 ? zArg2 : "");
+ Tcl_DStringAppendElement(&str, zArg3 ? zArg3 : "");
+ Tcl_DStringAppendElement(&str, zArg4 ? zArg4 : "");
+ rc = Tcl_GlobalEval(pDb->interp, Tcl_DStringValue(&str));
+ Tcl_DStringFree(&str);
+ zReply = Tcl_GetStringResult(pDb->interp);
+ if( strcmp(zReply,"SQLITE_OK")==0 ){
+ rc = SQLITE_OK;
+ }else if( strcmp(zReply,"SQLITE_DENY")==0 ){
+ rc = SQLITE_DENY;
+ }else if( strcmp(zReply,"SQLITE_IGNORE")==0 ){
+ rc = SQLITE_IGNORE;
+ }else{
+ rc = 999;
+ }
+ return rc;
+}
+#endif /* SQLITE_OMIT_AUTHORIZATION */
+
+/*
+** The "sqlite" command below creates a new Tcl command for each
+** connection it opens to an SQLite database. This routine is invoked
+** whenever one of those connection-specific commands is executed
+** in Tcl. For example, if you run Tcl code like this:
+**
+** sqlite db1 "my_database"
+** db1 close
+**
+** The first command opens a connection to the "my_database" database
+** and calls that connection "db1". The second command causes this
+** subroutine to be invoked.
+*/
+static int DbObjCmd(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){
+ SqliteDb *pDb = (SqliteDb*)cd;
+ int choice;
+ int rc = TCL_OK;
+ static const char *DB_strs[] = {
+ "authorizer", "busy", "changes",
+ "close", "commit_hook", "complete",
+ "errorcode", "eval", "function",
+ "last_insert_rowid", "last_statement_changes", "onecolumn",
+ "progress", "rekey", "timeout",
+ "trace",
+ 0
+ };
+ enum DB_enum {
+ DB_AUTHORIZER, DB_BUSY, DB_CHANGES,
+ DB_CLOSE, DB_COMMIT_HOOK, DB_COMPLETE,
+ DB_ERRORCODE, DB_EVAL, DB_FUNCTION,
+ DB_LAST_INSERT_ROWID, DB_LAST_STATEMENT_CHANGES, DB_ONECOLUMN,
+ DB_PROGRESS, DB_REKEY, DB_TIMEOUT,
+ DB_TRACE
+ };
+
+ if( objc<2 ){
+ Tcl_WrongNumArgs(interp, 1, objv, "SUBCOMMAND ...");
+ return TCL_ERROR;
+ }
+ if( Tcl_GetIndexFromObj(interp, objv[1], DB_strs, "option", 0, &choice) ){
+ return TCL_ERROR;
+ }
+
+ switch( (enum DB_enum)choice ){
+
+ /* $db authorizer ?CALLBACK?
+ **
+ ** Invoke the given callback to authorize each SQL operation as it is
+ ** compiled. 5 arguments are appended to the callback before it is
+ ** invoked:
+ **
+ ** (1) The authorization type (ex: SQLITE_CREATE_TABLE, SQLITE_INSERT, ...)
+ ** (2) First descriptive name (depends on authorization type)
+ ** (3) Second descriptive name
+ ** (4) Name of the database (ex: "main", "temp")
+ ** (5) Name of trigger that is doing the access
+ **
+ ** The callback should return on of the following strings: SQLITE_OK,
+ ** SQLITE_IGNORE, or SQLITE_DENY. Any other return value is an error.
+ **
+ ** If this method is invoked with no arguments, the current authorization
+ ** callback string is returned.
+ */
+ case DB_AUTHORIZER: {
+ if( objc>3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "?CALLBACK?");
+ }else if( objc==2 ){
+ if( pDb->zAuth ){
+ Tcl_AppendResult(interp, pDb->zAuth, 0);
+ }
+ }else{
+ char *zAuth;
+ int len;
+ if( pDb->zAuth ){
+ Tcl_Free(pDb->zAuth);
+ }
+ zAuth = Tcl_GetStringFromObj(objv[2], &len);
+ if( zAuth && len>0 ){
+ pDb->zAuth = Tcl_Alloc( len + 1 );
+ strcpy(pDb->zAuth, zAuth);
+ }else{
+ pDb->zAuth = 0;
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( pDb->zAuth ){
+ pDb->interp = interp;
+ sqlite_set_authorizer(pDb->db, auth_callback, pDb);
+ }else{
+ sqlite_set_authorizer(pDb->db, 0, 0);
+ }
+#endif
+ }
+ break;
+ }
+
+ /* $db busy ?CALLBACK?
+ **
+ ** Invoke the given callback if an SQL statement attempts to open
+ ** a locked database file.
+ */
+ case DB_BUSY: {
+ if( objc>3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "CALLBACK");
+ return TCL_ERROR;
+ }else if( objc==2 ){
+ if( pDb->zBusy ){
+ Tcl_AppendResult(interp, pDb->zBusy, 0);
+ }
+ }else{
+ char *zBusy;
+ int len;
+ if( pDb->zBusy ){
+ Tcl_Free(pDb->zBusy);
+ }
+ zBusy = Tcl_GetStringFromObj(objv[2], &len);
+ if( zBusy && len>0 ){
+ pDb->zBusy = Tcl_Alloc( len + 1 );
+ strcpy(pDb->zBusy, zBusy);
+ }else{
+ pDb->zBusy = 0;
+ }
+ if( pDb->zBusy ){
+ pDb->interp = interp;
+ sqlite_busy_handler(pDb->db, DbBusyHandler, pDb);
+ }else{
+ sqlite_busy_handler(pDb->db, 0, 0);
+ }
+ }
+ break;
+ }
+
+ /* $db progress ?N CALLBACK?
+ **
+ ** Invoke the given callback every N virtual machine opcodes while executing
+ ** queries.
+ */
+ case DB_PROGRESS: {
+ if( objc==2 ){
+ if( pDb->zProgress ){
+ Tcl_AppendResult(interp, pDb->zProgress, 0);
+ }
+ }else if( objc==4 ){
+ char *zProgress;
+ int len;
+ int N;
+ if( TCL_OK!=Tcl_GetIntFromObj(interp, objv[2], &N) ){
+ return TCL_ERROR;
+ };
+ if( pDb->zProgress ){
+ Tcl_Free(pDb->zProgress);
+ }
+ zProgress = Tcl_GetStringFromObj(objv[3], &len);
+ if( zProgress && len>0 ){
+ pDb->zProgress = Tcl_Alloc( len + 1 );
+ strcpy(pDb->zProgress, zProgress);
+ }else{
+ pDb->zProgress = 0;
+ }
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ if( pDb->zProgress ){
+ pDb->interp = interp;
+ sqlite_progress_handler(pDb->db, N, DbProgressHandler, pDb);
+ }else{
+ sqlite_progress_handler(pDb->db, 0, 0, 0);
+ }
+#endif
+ }else{
+ Tcl_WrongNumArgs(interp, 2, objv, "N CALLBACK");
+ return TCL_ERROR;
+ }
+ break;
+ }
+
+ /*
+ ** $db changes
+ **
+ ** Return the number of rows that were modified, inserted, or deleted by
+ ** the most recent "eval".
+ */
+ case DB_CHANGES: {
+ Tcl_Obj *pResult;
+ int nChange;
+ if( objc!=2 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return TCL_ERROR;
+ }
+ nChange = sqlite_changes(pDb->db);
+ pResult = Tcl_GetObjResult(interp);
+ Tcl_SetIntObj(pResult, nChange);
+ break;
+ }
+
+ /*
+ ** $db last_statement_changes
+ **
+ ** Return the number of rows that were modified, inserted, or deleted by
+ ** the last statment to complete execution (excluding changes due to
+ ** triggers)
+ */
+ case DB_LAST_STATEMENT_CHANGES: {
+ Tcl_Obj *pResult;
+ int lsChange;
+ if( objc!=2 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return TCL_ERROR;
+ }
+ lsChange = sqlite_last_statement_changes(pDb->db);
+ pResult = Tcl_GetObjResult(interp);
+ Tcl_SetIntObj(pResult, lsChange);
+ break;
+ }
+
+ /* $db close
+ **
+ ** Shutdown the database
+ */
+ case DB_CLOSE: {
+ Tcl_DeleteCommand(interp, Tcl_GetStringFromObj(objv[0], 0));
+ break;
+ }
+
+ /* $db commit_hook ?CALLBACK?
+ **
+ ** Invoke the given callback just before committing every SQL transaction.
+ ** If the callback throws an exception or returns non-zero, then the
+ ** transaction is aborted. If CALLBACK is an empty string, the callback
+ ** is disabled.
+ */
+ case DB_COMMIT_HOOK: {
+ if( objc>3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "?CALLBACK?");
+ }else if( objc==2 ){
+ if( pDb->zCommit ){
+ Tcl_AppendResult(interp, pDb->zCommit, 0);
+ }
+ }else{
+ char *zCommit;
+ int len;
+ if( pDb->zCommit ){
+ Tcl_Free(pDb->zCommit);
+ }
+ zCommit = Tcl_GetStringFromObj(objv[2], &len);
+ if( zCommit && len>0 ){
+ pDb->zCommit = Tcl_Alloc( len + 1 );
+ strcpy(pDb->zCommit, zCommit);
+ }else{
+ pDb->zCommit = 0;
+ }
+ if( pDb->zCommit ){
+ pDb->interp = interp;
+ sqlite_commit_hook(pDb->db, DbCommitHandler, pDb);
+ }else{
+ sqlite_commit_hook(pDb->db, 0, 0);
+ }
+ }
+ break;
+ }
+
+ /* $db complete SQL
+ **
+ ** Return TRUE if SQL is a complete SQL statement. Return FALSE if
+ ** additional lines of input are needed. This is similar to the
+ ** built-in "info complete" command of Tcl.
+ */
+ case DB_COMPLETE: {
+ Tcl_Obj *pResult;
+ int isComplete;
+ if( objc!=3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "SQL");
+ return TCL_ERROR;
+ }
+ isComplete = sqlite_complete( Tcl_GetStringFromObj(objv[2], 0) );
+ pResult = Tcl_GetObjResult(interp);
+ Tcl_SetBooleanObj(pResult, isComplete);
+ break;
+ }
+
+ /*
+ ** $db errorcode
+ **
+ ** Return the numeric error code that was returned by the most recent
+ ** call to sqlite_exec().
+ */
+ case DB_ERRORCODE: {
+ Tcl_SetObjResult(interp, Tcl_NewIntObj(pDb->rc));
+ break;
+ }
+
+ /*
+ ** $db eval $sql ?array { ...code... }?
+ **
+ ** The SQL statement in $sql is evaluated. For each row, the values are
+ ** placed in elements of the array named "array" and ...code... is executed.
+ ** If "array" and "code" are omitted, then no callback is every invoked.
+ ** If "array" is an empty string, then the values are placed in variables
+ ** that have the same name as the fields extracted by the query.
+ */
+ case DB_EVAL: {
+ CallbackData cbData;
+ char *zErrMsg;
+ char *zSql;
+#ifdef UTF_TRANSLATION_NEEDED
+ Tcl_DString dSql;
+ int i;
+#endif
+
+ if( objc!=5 && objc!=3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "SQL ?ARRAY-NAME CODE?");
+ return TCL_ERROR;
+ }
+ pDb->interp = interp;
+ zSql = Tcl_GetStringFromObj(objv[2], 0);
+#ifdef UTF_TRANSLATION_NEEDED
+ Tcl_DStringInit(&dSql);
+ Tcl_UtfToExternalDString(NULL, zSql, -1, &dSql);
+ zSql = Tcl_DStringValue(&dSql);
+#endif
+ Tcl_IncrRefCount(objv[2]);
+ if( objc==5 ){
+ cbData.interp = interp;
+ cbData.once = 1;
+ cbData.zArray = Tcl_GetStringFromObj(objv[3], 0);
+ cbData.pCode = objv[4];
+ cbData.tcl_rc = TCL_OK;
+ cbData.nColName = 0;
+ cbData.azColName = 0;
+ zErrMsg = 0;
+ Tcl_IncrRefCount(objv[3]);
+ Tcl_IncrRefCount(objv[4]);
+ rc = sqlite_exec(pDb->db, zSql, DbEvalCallback, &cbData, &zErrMsg);
+ Tcl_DecrRefCount(objv[4]);
+ Tcl_DecrRefCount(objv[3]);
+ if( cbData.tcl_rc==TCL_BREAK ){ cbData.tcl_rc = TCL_OK; }
+ }else{
+ Tcl_Obj *pList = Tcl_NewObj();
+ cbData.tcl_rc = TCL_OK;
+ rc = sqlite_exec(pDb->db, zSql, DbEvalCallback2, pList, &zErrMsg);
+ Tcl_SetObjResult(interp, pList);
+ }
+ pDb->rc = rc;
+ if( rc==SQLITE_ABORT ){
+ if( zErrMsg ) free(zErrMsg);
+ rc = cbData.tcl_rc;
+ }else if( zErrMsg ){
+ Tcl_SetResult(interp, zErrMsg, TCL_VOLATILE);
+ free(zErrMsg);
+ rc = TCL_ERROR;
+ }else if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, sqlite_error_string(rc), 0);
+ rc = TCL_ERROR;
+ }else{
+ }
+ Tcl_DecrRefCount(objv[2]);
+#ifdef UTF_TRANSLATION_NEEDED
+ Tcl_DStringFree(&dSql);
+ if( objc==5 && cbData.azColName ){
+ for(i=0; i<cbData.nColName; i++){
+ if( cbData.azColName[i] ) free(cbData.azColName[i]);
+ }
+ free(cbData.azColName);
+ cbData.azColName = 0;
+ }
+#endif
+ return rc;
+ }
+
+ /*
+ ** $db function NAME SCRIPT
+ **
+ ** Create a new SQL function called NAME. Whenever that function is
+ ** called, invoke SCRIPT to evaluate the function.
+ */
+ case DB_FUNCTION: {
+ SqlFunc *pFunc;
+ char *zName;
+ char *zScript;
+ int nScript;
+ if( objc!=4 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "NAME SCRIPT");
+ return TCL_ERROR;
+ }
+ zName = Tcl_GetStringFromObj(objv[2], 0);
+ zScript = Tcl_GetStringFromObj(objv[3], &nScript);
+ pFunc = (SqlFunc*)Tcl_Alloc( sizeof(*pFunc) + nScript + 1 );
+ if( pFunc==0 ) return TCL_ERROR;
+ pFunc->interp = interp;
+ pFunc->pNext = pDb->pFunc;
+ pFunc->zScript = (char*)&pFunc[1];
+ strcpy(pFunc->zScript, zScript);
+ sqlite_create_function(pDb->db, zName, -1, tclSqlFunc, pFunc);
+ sqlite_function_type(pDb->db, zName, SQLITE_NUMERIC);
+ break;
+ }
+
+ /*
+ ** $db last_insert_rowid
+ **
+ ** Return an integer which is the ROWID for the most recent insert.
+ */
+ case DB_LAST_INSERT_ROWID: {
+ Tcl_Obj *pResult;
+ int rowid;
+ if( objc!=2 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return TCL_ERROR;
+ }
+ rowid = sqlite_last_insert_rowid(pDb->db);
+ pResult = Tcl_GetObjResult(interp);
+ Tcl_SetIntObj(pResult, rowid);
+ break;
+ }
+
+ /*
+ ** $db onecolumn SQL
+ **
+ ** Return a single column from a single row of the given SQL query.
+ */
+ case DB_ONECOLUMN: {
+ char *zSql;
+ char *zErrMsg = 0;
+ if( objc!=3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "SQL");
+ return TCL_ERROR;
+ }
+ zSql = Tcl_GetStringFromObj(objv[2], 0);
+ rc = sqlite_exec(pDb->db, zSql, DbEvalCallback3, interp, &zErrMsg);
+ if( rc==SQLITE_ABORT ){
+ rc = SQLITE_OK;
+ }else if( zErrMsg ){
+ Tcl_SetResult(interp, zErrMsg, TCL_VOLATILE);
+ free(zErrMsg);
+ rc = TCL_ERROR;
+ }else if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, sqlite_error_string(rc), 0);
+ rc = TCL_ERROR;
+ }
+ break;
+ }
+
+ /*
+ ** $db rekey KEY
+ **
+ ** Change the encryption key on the currently open database.
+ */
+ case DB_REKEY: {
+ int nKey;
+ void *pKey;
+ if( objc!=3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "KEY");
+ return TCL_ERROR;
+ }
+ pKey = Tcl_GetByteArrayFromObj(objv[2], &nKey);
+#ifdef SQLITE_HAS_CODEC
+ rc = sqlite_rekey(pDb->db, pKey, nKey);
+ if( rc ){
+ Tcl_AppendResult(interp, sqlite_error_string(rc), 0);
+ rc = TCL_ERROR;
+ }
+#endif
+ break;
+ }
+
+ /*
+ ** $db timeout MILLESECONDS
+ **
+ ** Delay for the number of milliseconds specified when a file is locked.
+ */
+ case DB_TIMEOUT: {
+ int ms;
+ if( objc!=3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "MILLISECONDS");
+ return TCL_ERROR;
+ }
+ if( Tcl_GetIntFromObj(interp, objv[2], &ms) ) return TCL_ERROR;
+ sqlite_busy_timeout(pDb->db, ms);
+ break;
+ }
+
+ /* $db trace ?CALLBACK?
+ **
+ ** Make arrangements to invoke the CALLBACK routine for each SQL statement
+ ** that is executed. The text of the SQL is appended to CALLBACK before
+ ** it is executed.
+ */
+ case DB_TRACE: {
+ if( objc>3 ){
+ Tcl_WrongNumArgs(interp, 2, objv, "?CALLBACK?");
+ }else if( objc==2 ){
+ if( pDb->zTrace ){
+ Tcl_AppendResult(interp, pDb->zTrace, 0);
+ }
+ }else{
+ char *zTrace;
+ int len;
+ if( pDb->zTrace ){
+ Tcl_Free(pDb->zTrace);
+ }
+ zTrace = Tcl_GetStringFromObj(objv[2], &len);
+ if( zTrace && len>0 ){
+ pDb->zTrace = Tcl_Alloc( len + 1 );
+ strcpy(pDb->zTrace, zTrace);
+ }else{
+ pDb->zTrace = 0;
+ }
+ if( pDb->zTrace ){
+ pDb->interp = interp;
+ sqlite_trace(pDb->db, DbTraceHandler, pDb);
+ }else{
+ sqlite_trace(pDb->db, 0, 0);
+ }
+ }
+ break;
+ }
+
+ } /* End of the SWITCH statement */
+ return rc;
+}
+
+/*
+** sqlite DBNAME FILENAME ?MODE? ?-key KEY?
+**
+** This is the main Tcl command. When the "sqlite" Tcl command is
+** invoked, this routine runs to process that command.
+**
+** The first argument, DBNAME, is an arbitrary name for a new
+** database connection. This command creates a new command named
+** DBNAME that is used to control that connection. The database
+** connection is deleted when the DBNAME command is deleted.
+**
+** The second argument is the name of the directory that contains
+** the sqlite database that is to be accessed.
+**
+** For testing purposes, we also support the following:
+**
+** sqlite -encoding
+**
+** Return the encoding used by LIKE and GLOB operators. Choices
+** are UTF-8 and iso8859.
+**
+** sqlite -version
+**
+** Return the version number of the SQLite library.
+**
+** sqlite -tcl-uses-utf
+**
+** Return "1" if compiled with a Tcl uses UTF-8. Return "0" if
+** not. Used by tests to make sure the library was compiled
+** correctly.
+*/
+static int DbMain(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){
+ int mode;
+ SqliteDb *p;
+ void *pKey = 0;
+ int nKey = 0;
+ const char *zArg;
+ char *zErrMsg;
+ const char *zFile;
+ char zBuf[80];
+ if( objc==2 ){
+ zArg = Tcl_GetStringFromObj(objv[1], 0);
+ if( strcmp(zArg,"-encoding")==0 ){
+ Tcl_AppendResult(interp,sqlite_encoding,0);
+ return TCL_OK;
+ }
+ if( strcmp(zArg,"-version")==0 ){
+ Tcl_AppendResult(interp,sqlite_version,0);
+ return TCL_OK;
+ }
+ if( strcmp(zArg,"-has-codec")==0 ){
+#ifdef SQLITE_HAS_CODEC
+ Tcl_AppendResult(interp,"1",0);
+#else
+ Tcl_AppendResult(interp,"0",0);
+#endif
+ return TCL_OK;
+ }
+ if( strcmp(zArg,"-tcl-uses-utf")==0 ){
+#ifdef TCL_UTF_MAX
+ Tcl_AppendResult(interp,"1",0);
+#else
+ Tcl_AppendResult(interp,"0",0);
+#endif
+ return TCL_OK;
+ }
+ }
+ if( objc==5 || objc==6 ){
+ zArg = Tcl_GetStringFromObj(objv[objc-2], 0);
+ if( strcmp(zArg,"-key")==0 ){
+ pKey = Tcl_GetByteArrayFromObj(objv[objc-1], &nKey);
+ objc -= 2;
+ }
+ }
+ if( objc!=3 && objc!=4 ){
+ Tcl_WrongNumArgs(interp, 1, objv,
+#ifdef SQLITE_HAS_CODEC
+ "HANDLE FILENAME ?-key CODEC-KEY?"
+#else
+ "HANDLE FILENAME ?MODE?"
+#endif
+ );
+ return TCL_ERROR;
+ }
+ if( objc==3 ){
+ mode = 0666;
+ }else if( Tcl_GetIntFromObj(interp, objv[3], &mode)!=TCL_OK ){
+ return TCL_ERROR;
+ }
+ zErrMsg = 0;
+ p = (SqliteDb*)Tcl_Alloc( sizeof(*p) );
+ if( p==0 ){
+ Tcl_SetResult(interp, "malloc failed", TCL_STATIC);
+ return TCL_ERROR;
+ }
+ memset(p, 0, sizeof(*p));
+ zFile = Tcl_GetStringFromObj(objv[2], 0);
+#ifdef SQLITE_HAS_CODEC
+ p->db = sqlite_open_encrypted(zFile, pKey, nKey, 0, &zErrMsg);
+#else
+ p->db = sqlite_open(zFile, mode, &zErrMsg);
+#endif
+ if( p->db==0 ){
+ Tcl_SetResult(interp, zErrMsg, TCL_VOLATILE);
+ Tcl_Free((char*)p);
+ free(zErrMsg);
+ return TCL_ERROR;
+ }
+ zArg = Tcl_GetStringFromObj(objv[1], 0);
+ Tcl_CreateObjCommand(interp, zArg, DbObjCmd, (char*)p, DbDeleteCmd);
+
+ /* The return value is the value of the sqlite* pointer
+ */
+ sprintf(zBuf, "%p", p->db);
+ if( strncmp(zBuf,"0x",2) ){
+ sprintf(zBuf, "0x%p", p->db);
+ }
+ Tcl_AppendResult(interp, zBuf, 0);
+
+ /* If compiled with SQLITE_TEST turned on, then register the "md5sum"
+ ** SQL function.
+ */
+#ifdef SQLITE_TEST
+ {
+ extern void Md5_Register(sqlite*);
+ Md5_Register(p->db);
+ }
+#endif
+ return TCL_OK;
+}
+
+/*
+** Provide a dummy Tcl_InitStubs if we are using this as a static
+** library.
+*/
+#ifndef USE_TCL_STUBS
+# undef Tcl_InitStubs
+# define Tcl_InitStubs(a,b,c)
+#endif
+
+/*
+** Initialize this module.
+**
+** This Tcl module contains only a single new Tcl command named "sqlite".
+** (Hence there is no namespace. There is no point in using a namespace
+** if the extension only supplies one new name!) The "sqlite" command is
+** used to open a new SQLite database. See the DbMain() routine above
+** for additional information.
+*/
+int Sqlite_Init(Tcl_Interp *interp){
+ Tcl_InitStubs(interp, "8.0", 0);
+ Tcl_CreateObjCommand(interp, "sqlite", (Tcl_ObjCmdProc*)DbMain, 0, 0);
+ Tcl_PkgProvide(interp, "sqlite", "2.0");
+ return TCL_OK;
+}
+int Tclsqlite_Init(Tcl_Interp *interp){
+ Tcl_InitStubs(interp, "8.0", 0);
+ Tcl_CreateObjCommand(interp, "sqlite", (Tcl_ObjCmdProc*)DbMain, 0, 0);
+ Tcl_PkgProvide(interp, "sqlite", "2.0");
+ return TCL_OK;
+}
+int Sqlite_SafeInit(Tcl_Interp *interp){
+ return TCL_OK;
+}
+int Tclsqlite_SafeInit(Tcl_Interp *interp){
+ return TCL_OK;
+}
+
+#if 0
+/*
+** If compiled using mktclapp, this routine runs to initialize
+** everything.
+*/
+int Et_AppInit(Tcl_Interp *interp){
+ return Sqlite_Init(interp);
+}
+#endif
+/***************************************************************************
+** The remaining code is only included if the TCLSH macro is defined to
+** be an integer greater than 0
+*/
+#if defined(TCLSH) && TCLSH>0
+
+/*
+** If the macro TCLSH is defined and is one, then put in code for the
+** "main" routine that implement a interactive shell into which the user
+** can type TCL commands.
+*/
+#if TCLSH==1
+static char zMainloop[] =
+ "set line {}\n"
+ "while {![eof stdin]} {\n"
+ "if {$line!=\"\"} {\n"
+ "puts -nonewline \"> \"\n"
+ "} else {\n"
+ "puts -nonewline \"% \"\n"
+ "}\n"
+ "flush stdout\n"
+ "append line [gets stdin]\n"
+ "if {[info complete $line]} {\n"
+ "if {[catch {uplevel #0 $line} result]} {\n"
+ "puts stderr \"Error: $result\"\n"
+ "} elseif {$result!=\"\"} {\n"
+ "puts $result\n"
+ "}\n"
+ "set line {}\n"
+ "} else {\n"
+ "append line \\n\n"
+ "}\n"
+ "}\n"
+;
+#endif /* TCLSH==1 */
+
+int Libsqlite_Init( Tcl_Interp *interp) {
+#ifdef TCL_THREADS
+ if (Thread_Init(interp) == TCL_ERROR) {
+ return TCL_ERROR;
+ }
+#endif
+ Sqlite_Init(interp);
+#ifdef SQLITE_TEST
+ {
+ extern int Sqlitetest1_Init(Tcl_Interp*);
+ extern int Sqlitetest2_Init(Tcl_Interp*);
+ extern int Sqlitetest3_Init(Tcl_Interp*);
+ extern int Md5_Init(Tcl_Interp*);
+ Sqlitetest1_Init(interp);
+ Sqlitetest2_Init(interp);
+ Sqlitetest3_Init(interp);
+ Md5_Init(interp);
+ Tcl_StaticPackage(interp, "sqlite", Libsqlite_Init, Libsqlite_Init);
+ }
+#endif
+ return TCL_OK;
+}
+
+#define TCLSH_MAIN main /* Needed to fake out mktclapp */
+#if TCLSH==1
+int TCLSH_MAIN(int argc, char **argv){
+#ifndef TCL_THREADS
+ Tcl_Interp *interp;
+ Tcl_FindExecutable(argv[0]);
+ interp = Tcl_CreateInterp();
+ Libsqlite_Init(interp);
+ if( argc>=2 ){
+ int i;
+ Tcl_SetVar(interp,"argv0",argv[1],TCL_GLOBAL_ONLY);
+ Tcl_SetVar(interp,"argv", "", TCL_GLOBAL_ONLY);
+ for(i=2; i<argc; i++){
+ Tcl_SetVar(interp, "argv", argv[i],
+ TCL_GLOBAL_ONLY | TCL_LIST_ELEMENT | TCL_APPEND_VALUE);
+ }
+ if( Tcl_EvalFile(interp, argv[1])!=TCL_OK ){
+ const char *zInfo = Tcl_GetVar(interp, "errorInfo", TCL_GLOBAL_ONLY);
+ if( zInfo==0 ) zInfo = interp->result;
+ fprintf(stderr,"%s: %s\n", *argv, zInfo);
+ return TCL_ERROR;
+ }
+ }else{
+ Tcl_GlobalEval(interp, zMainloop);
+ }
+ return 0;
+#else
+ Tcl_Main(argc, argv, Libsqlite_Init);
+#endif /* TCL_THREADS */
+ return 0;
+}
+#endif /* TCLSH==1 */
+
+
+/*
+** If the macro TCLSH is set to 2, then implement a space analysis tool.
+*/
+#if TCLSH==2
+static char zAnalysis[] =
+#include "spaceanal_tcl.h"
+;
+
+int main(int argc, char **argv){
+ Tcl_Interp *interp;
+ int i;
+ Tcl_FindExecutable(argv[0]);
+ interp = Tcl_CreateInterp();
+ Libsqlite_Init(interp);
+ Tcl_SetVar(interp,"argv0",argv[0],TCL_GLOBAL_ONLY);
+ Tcl_SetVar(interp,"argv", "", TCL_GLOBAL_ONLY);
+ for(i=1; i<argc; i++){
+ Tcl_SetVar(interp, "argv", argv[i],
+ TCL_GLOBAL_ONLY | TCL_LIST_ELEMENT | TCL_APPEND_VALUE);
+ }
+ if( Tcl_GlobalEval(interp, zAnalysis)!=TCL_OK ){
+ const char *zInfo = Tcl_GetVar(interp, "errorInfo", TCL_GLOBAL_ONLY);
+ if( zInfo==0 ) zInfo = interp->result;
+ fprintf(stderr,"%s: %s\n", *argv, zInfo);
+ return TCL_ERROR;
+ }
+ return 0;
+}
+#endif /* TCLSH==2 */
+
+#endif /* TCLSH */
+
+#endif /* NO_TCL */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/test1.c b/usr/src/cmd/svc/configd/sqlite/src/test1.c
new file mode 100644
index 0000000000..6434d90500
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/test1.c
@@ -0,0 +1,1030 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Code for testing the printf() interface to SQLite. This code
+** is not included in the SQLite library. It is used for automated
+** testing of the SQLite library.
+**
+** $Id: test1.c,v 1.36.2.1 2004/05/07 00:57:06 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "tcl.h"
+#include "os.h"
+#include <stdlib.h>
+#include <string.h>
+
+#if OS_WIN
+# define PTR_FMT "%x"
+#else
+# define PTR_FMT "%p"
+#endif
+
+/*
+** Decode a pointer to an sqlite object.
+*/
+static int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite **ppDb){
+ if( sscanf(zA, PTR_FMT, (void**)ppDb)!=1 &&
+ (zA[0]!='0' || zA[1]!='x' || sscanf(&zA[2], PTR_FMT, (void**)ppDb)!=1)
+ ){
+ Tcl_AppendResult(interp, "\"", zA, "\" is not a valid pointer value", 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Decode a pointer to an sqlite_vm object.
+*/
+static int getVmPointer(Tcl_Interp *interp, const char *zArg, sqlite_vm **ppVm){
+ if( sscanf(zArg, PTR_FMT, (void**)ppVm)!=1 ){
+ Tcl_AppendResult(interp, "\"", zArg, "\" is not a valid pointer value", 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Generate a text representation of a pointer that can be understood
+** by the getDbPointer and getVmPointer routines above.
+**
+** The problem is, on some machines (Solaris) if you do a printf with
+** "%p" you cannot turn around and do a scanf with the same "%p" and
+** get your pointer back. You have to prepend a "0x" before it will
+** work. Or at least that is what is reported to me (drh). But this
+** behavior varies from machine to machine. The solution used her is
+** to test the string right after it is generated to see if it can be
+** understood by scanf, and if not, try prepending an "0x" to see if
+** that helps. If nothing works, a fatal error is generated.
+*/
+static int makePointerStr(Tcl_Interp *interp, char *zPtr, void *p){
+ void *p2;
+ sprintf(zPtr, PTR_FMT, p);
+ if( sscanf(zPtr, PTR_FMT, &p2)!=1 || p2!=p ){
+ sprintf(zPtr, "0x" PTR_FMT, p);
+ if( sscanf(zPtr, PTR_FMT, &p2)!=1 || p2!=p ){
+ Tcl_AppendResult(interp, "unable to convert a pointer to a string "
+ "in the file " __FILE__ " in function makePointerStr(). Please "
+ "report this problem to the SQLite mailing list or as a new but "
+ "report. Please provide detailed information about how you compiled "
+ "SQLite and what computer you are running on.", 0);
+ return TCL_ERROR;
+ }
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_open filename
+**
+** Returns: The name of an open database.
+*/
+static int sqlite_test_open(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ char *zErr = 0;
+ char zBuf[100];
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FILENAME\"", 0);
+ return TCL_ERROR;
+ }
+ db = sqlite_open(argv[1], 0666, &zErr);
+ if( db==0 ){
+ Tcl_AppendResult(interp, zErr, 0);
+ free(zErr);
+ return TCL_ERROR;
+ }
+ if( makePointerStr(interp, zBuf, db) ) return TCL_ERROR;
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** The callback routine for sqlite_exec_printf().
+*/
+static int exec_printf_cb(void *pArg, int argc, char **argv, char **name){
+ Tcl_DString *str = (Tcl_DString*)pArg;
+ int i;
+
+ if( Tcl_DStringLength(str)==0 ){
+ for(i=0; i<argc; i++){
+ Tcl_DStringAppendElement(str, name[i] ? name[i] : "NULL");
+ }
+ }
+ for(i=0; i<argc; i++){
+ Tcl_DStringAppendElement(str, argv[i] ? argv[i] : "NULL");
+ }
+ return 0;
+}
+
+/*
+** Usage: sqlite_exec_printf DB FORMAT STRING
+**
+** Invoke the sqlite_exec_printf() interface using the open database
+** DB. The SQL is the string FORMAT. The format string should contain
+** one %s or %q. STRING is the value inserted into %s or %q.
+*/
+static int test_exec_printf(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ Tcl_DString str;
+ int rc;
+ char *zErr = 0;
+ char zBuf[30];
+ if( argc!=4 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " DB FORMAT STRING", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ Tcl_DStringInit(&str);
+ rc = sqlite_exec_printf(db, argv[2], exec_printf_cb, &str, &zErr, argv[3]);
+ sprintf(zBuf, "%d", rc);
+ Tcl_AppendElement(interp, zBuf);
+ Tcl_AppendElement(interp, rc==SQLITE_OK ? Tcl_DStringValue(&str) : zErr);
+ Tcl_DStringFree(&str);
+ if( zErr ) free(zErr);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_mprintf_z_test SEPARATOR ARG0 ARG1 ...
+**
+** Test the %z format of mprintf(). Use multiple mprintf() calls to
+** concatenate arg0 through argn using separator as the separator.
+** Return the result.
+*/
+static int test_mprintf_z(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ char *zResult = 0;
+ int i;
+
+ for(i=2; i<argc; i++){
+ zResult = sqliteMPrintf("%z%s%s", zResult, argv[1], argv[i]);
+ }
+ Tcl_AppendResult(interp, zResult, 0);
+ sqliteFree(zResult);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_get_table_printf DB FORMAT STRING
+**
+** Invoke the sqlite_get_table_printf() interface using the open database
+** DB. The SQL is the string FORMAT. The format string should contain
+** one %s or %q. STRING is the value inserted into %s or %q.
+*/
+static int test_get_table_printf(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ Tcl_DString str;
+ int rc;
+ char *zErr = 0;
+ int nRow, nCol;
+ char **aResult;
+ int i;
+ char zBuf[30];
+ if( argc!=4 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " DB FORMAT STRING", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ Tcl_DStringInit(&str);
+ rc = sqlite_get_table_printf(db, argv[2], &aResult, &nRow, &nCol,
+ &zErr, argv[3]);
+ sprintf(zBuf, "%d", rc);
+ Tcl_AppendElement(interp, zBuf);
+ if( rc==SQLITE_OK ){
+ sprintf(zBuf, "%d", nRow);
+ Tcl_AppendElement(interp, zBuf);
+ sprintf(zBuf, "%d", nCol);
+ Tcl_AppendElement(interp, zBuf);
+ for(i=0; i<(nRow+1)*nCol; i++){
+ Tcl_AppendElement(interp, aResult[i] ? aResult[i] : "NULL");
+ }
+ }else{
+ Tcl_AppendElement(interp, zErr);
+ }
+ sqlite_free_table(aResult);
+ if( zErr ) free(zErr);
+ return TCL_OK;
+}
+
+
+/*
+** Usage: sqlite_last_insert_rowid DB
+**
+** Returns the integer ROWID of the most recent insert.
+*/
+static int test_last_rowid(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ char zBuf[30];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " DB\"", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ sprintf(zBuf, "%d", sqlite_last_insert_rowid(db));
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: sqlite_close DB
+**
+** Closes the database opened by sqlite_open.
+*/
+static int sqlite_test_close(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FILENAME\"", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ sqlite_close(db);
+ return TCL_OK;
+}
+
+/*
+** Implementation of the x_coalesce() function.
+** Return the first argument non-NULL argument.
+*/
+static void ifnullFunc(sqlite_func *context, int argc, const char **argv){
+ int i;
+ for(i=0; i<argc; i++){
+ if( argv[i] ){
+ sqlite_set_result_string(context, argv[i], -1);
+ break;
+ }
+ }
+}
+
+/*
+** A structure into which to accumulate text.
+*/
+struct dstr {
+ int nAlloc; /* Space allocated */
+ int nUsed; /* Space used */
+ char *z; /* The space */
+};
+
+/*
+** Append text to a dstr
+*/
+static void dstrAppend(struct dstr *p, const char *z, int divider){
+ int n = strlen(z);
+ if( p->nUsed + n + 2 > p->nAlloc ){
+ char *zNew;
+ p->nAlloc = p->nAlloc*2 + n + 200;
+ zNew = sqliteRealloc(p->z, p->nAlloc);
+ if( zNew==0 ){
+ sqliteFree(p->z);
+ memset(p, 0, sizeof(*p));
+ return;
+ }
+ p->z = zNew;
+ }
+ if( divider && p->nUsed>0 ){
+ p->z[p->nUsed++] = divider;
+ }
+ memcpy(&p->z[p->nUsed], z, n+1);
+ p->nUsed += n;
+}
+
+/*
+** Invoked for each callback from sqliteExecFunc
+*/
+static int execFuncCallback(void *pData, int argc, char **argv, char **NotUsed){
+ struct dstr *p = (struct dstr*)pData;
+ int i;
+ for(i=0; i<argc; i++){
+ if( argv[i]==0 ){
+ dstrAppend(p, "NULL", ' ');
+ }else{
+ dstrAppend(p, argv[i], ' ');
+ }
+ }
+ return 0;
+}
+
+/*
+** Implementation of the x_sqlite_exec() function. This function takes
+** a single argument and attempts to execute that argument as SQL code.
+** This is illegal and should set the SQLITE_MISUSE flag on the database.
+**
+** 2004-Jan-07: We have changed this to make it legal to call sqlite_exec()
+** from within a function call.
+**
+** This routine simulates the effect of having two threads attempt to
+** use the same database at the same time.
+*/
+static void sqliteExecFunc(sqlite_func *context, int argc, const char **argv){
+ struct dstr x;
+ memset(&x, 0, sizeof(x));
+ sqlite_exec((sqlite*)sqlite_user_data(context), argv[0],
+ execFuncCallback, &x, 0);
+ sqlite_set_result_string(context, x.z, x.nUsed);
+ sqliteFree(x.z);
+}
+
+/*
+** Usage: sqlite_test_create_function DB
+**
+** Call the sqlite_create_function API on the given database in order
+** to create a function named "x_coalesce". This function does the same thing
+** as the "coalesce" function. This function also registers an SQL function
+** named "x_sqlite_exec" that invokes sqlite_exec(). Invoking sqlite_exec()
+** in this way is illegal recursion and should raise an SQLITE_MISUSE error.
+** The effect is similar to trying to use the same database connection from
+** two threads at the same time.
+**
+** The original motivation for this routine was to be able to call the
+** sqlite_create_function function while a query is in progress in order
+** to test the SQLITE_MISUSE detection logic.
+*/
+static int test_create_function(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ extern void Md5_Register(sqlite*);
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FILENAME\"", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ sqlite_create_function(db, "x_coalesce", -1, ifnullFunc, 0);
+ sqlite_create_function(db, "x_sqlite_exec", 1, sqliteExecFunc, db);
+ return TCL_OK;
+}
+
+/*
+** Routines to implement the x_count() aggregate function.
+*/
+typedef struct CountCtx CountCtx;
+struct CountCtx {
+ int n;
+};
+static void countStep(sqlite_func *context, int argc, const char **argv){
+ CountCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( (argc==0 || argv[0]) && p ){
+ p->n++;
+ }
+}
+static void countFinalize(sqlite_func *context){
+ CountCtx *p;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ sqlite_set_result_int(context, p ? p->n : 0);
+}
+
+/*
+** Usage: sqlite_test_create_aggregate DB
+**
+** Call the sqlite_create_function API on the given database in order
+** to create a function named "x_count". This function does the same thing
+** as the "md5sum" function.
+**
+** The original motivation for this routine was to be able to call the
+** sqlite_create_aggregate function while a query is in progress in order
+** to test the SQLITE_MISUSE detection logic.
+*/
+static int test_create_aggregate(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FILENAME\"", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ sqlite_create_aggregate(db, "x_count", 0, countStep, countFinalize, 0);
+ sqlite_create_aggregate(db, "x_count", 1, countStep, countFinalize, 0);
+ return TCL_OK;
+}
+
+
+
+/*
+** Usage: sqlite_mprintf_int FORMAT INTEGER INTEGER INTEGER
+**
+** Call mprintf with three integer arguments
+*/
+static int sqlite_mprintf_int(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ int a[3], i;
+ char *z;
+ if( argc!=5 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FORMAT INT INT INT\"", 0);
+ return TCL_ERROR;
+ }
+ for(i=2; i<5; i++){
+ if( Tcl_GetInt(interp, argv[i], &a[i-2]) ) return TCL_ERROR;
+ }
+ z = sqlite_mprintf(argv[1], a[0], a[1], a[2]);
+ Tcl_AppendResult(interp, z, 0);
+ sqlite_freemem(z);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_mprintf_str FORMAT INTEGER INTEGER STRING
+**
+** Call mprintf with two integer arguments and one string argument
+*/
+static int sqlite_mprintf_str(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ int a[3], i;
+ char *z;
+ if( argc<4 || argc>5 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FORMAT INT INT ?STRING?\"", 0);
+ return TCL_ERROR;
+ }
+ for(i=2; i<4; i++){
+ if( Tcl_GetInt(interp, argv[i], &a[i-2]) ) return TCL_ERROR;
+ }
+ z = sqlite_mprintf(argv[1], a[0], a[1], argc>4 ? argv[4] : NULL);
+ Tcl_AppendResult(interp, z, 0);
+ sqlite_freemem(z);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_mprintf_str FORMAT INTEGER INTEGER DOUBLE
+**
+** Call mprintf with two integer arguments and one double argument
+*/
+static int sqlite_mprintf_double(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ int a[3], i;
+ double r;
+ char *z;
+ if( argc!=5 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FORMAT INT INT STRING\"", 0);
+ return TCL_ERROR;
+ }
+ for(i=2; i<4; i++){
+ if( Tcl_GetInt(interp, argv[i], &a[i-2]) ) return TCL_ERROR;
+ }
+ if( Tcl_GetDouble(interp, argv[4], &r) ) return TCL_ERROR;
+ z = sqlite_mprintf(argv[1], a[0], a[1], r);
+ Tcl_AppendResult(interp, z, 0);
+ sqlite_freemem(z);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_mprintf_str FORMAT DOUBLE DOUBLE
+**
+** Call mprintf with a single double argument which is the product of the
+** two arguments given above. This is used to generate overflow and underflow
+** doubles to test that they are converted properly.
+*/
+static int sqlite_mprintf_scaled(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ int i;
+ double r[2];
+ char *z;
+ if( argc!=4 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FORMAT DOUBLE DOUBLE\"", 0);
+ return TCL_ERROR;
+ }
+ for(i=2; i<4; i++){
+ if( Tcl_GetDouble(interp, argv[i], &r[i-2]) ) return TCL_ERROR;
+ }
+ z = sqlite_mprintf(argv[1], r[0]*r[1]);
+ Tcl_AppendResult(interp, z, 0);
+ sqlite_freemem(z);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_malloc_fail N
+**
+** Rig sqliteMalloc() to fail on the N-th call. Turn off this mechanism
+** and reset the sqlite_malloc_failed variable is N==0.
+*/
+#ifdef MEMORY_DEBUG
+static int sqlite_malloc_fail(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ int n;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR;
+ sqlite_iMallocFail = n;
+ sqlite_malloc_failed = 0;
+ return TCL_OK;
+}
+#endif
+
+/*
+** Usage: sqlite_malloc_stat
+**
+** Return the number of prior calls to sqliteMalloc() and sqliteFree().
+*/
+#ifdef MEMORY_DEBUG
+static int sqlite_malloc_stat(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ char zBuf[200];
+ sprintf(zBuf, "%d %d %d", sqlite_nMalloc, sqlite_nFree, sqlite_iMallocFail);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+#endif
+
+/*
+** Usage: sqlite_abort
+**
+** Shutdown the process immediately. This is not a clean shutdown.
+** This command is used to test the recoverability of a database in
+** the event of a program crash.
+*/
+static int sqlite_abort(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ assert( interp==0 ); /* This will always fail */
+ return TCL_OK;
+}
+
+/*
+** The following routine is a user-defined SQL function whose purpose
+** is to test the sqlite_set_result() API.
+*/
+static void testFunc(sqlite_func *context, int argc, const char **argv){
+ while( argc>=2 ){
+ if( argv[0]==0 ){
+ sqlite_set_result_error(context, "first argument to test function "
+ "may not be NULL", -1);
+ }else if( sqliteStrICmp(argv[0],"string")==0 ){
+ sqlite_set_result_string(context, argv[1], -1);
+ }else if( argv[1]==0 ){
+ sqlite_set_result_error(context, "2nd argument may not be NULL if the "
+ "first argument is not \"string\"", -1);
+ }else if( sqliteStrICmp(argv[0],"int")==0 ){
+ sqlite_set_result_int(context, atoi(argv[1]));
+ }else if( sqliteStrICmp(argv[0],"double")==0 ){
+ sqlite_set_result_double(context, sqliteAtoF(argv[1], 0));
+ }else{
+ sqlite_set_result_error(context,"first argument should be one of: "
+ "string int double", -1);
+ }
+ argc -= 2;
+ argv += 2;
+ }
+}
+
+/*
+** Usage: sqlite_register_test_function DB NAME
+**
+** Register the test SQL function on the database DB under the name NAME.
+*/
+static int test_register_func(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ int rc;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " DB FUNCTION-NAME", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ rc = sqlite_create_function(db, argv[2], -1, testFunc, 0);
+ if( rc!=0 ){
+ Tcl_AppendResult(interp, sqlite_error_string(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** This SQLite callback records the datatype of all columns.
+**
+** The pArg argument is really a pointer to a TCL interpreter. The
+** column names are inserted as the result of this interpreter.
+**
+** This routine returns non-zero which causes the query to abort.
+*/
+static int rememberDataTypes(void *pArg, int nCol, char **argv, char **colv){
+ int i;
+ Tcl_Interp *interp = (Tcl_Interp*)pArg;
+ Tcl_Obj *pList, *pElem;
+ if( colv[nCol+1]==0 ){
+ return 1;
+ }
+ pList = Tcl_NewObj();
+ for(i=0; i<nCol; i++){
+ pElem = Tcl_NewStringObj(colv[i+nCol] ? colv[i+nCol] : "NULL", -1);
+ Tcl_ListObjAppendElement(interp, pList, pElem);
+ }
+ Tcl_SetObjResult(interp, pList);
+ return 1;
+}
+
+/*
+** Invoke an SQL statement but ignore all the data in the result. Instead,
+** return a list that consists of the datatypes of the various columns.
+**
+** This only works if "PRAGMA show_datatypes=on" has been executed against
+** the database connection.
+*/
+static int sqlite_datatypes(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ int rc;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " DB SQL", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ rc = sqlite_exec(db, argv[2], rememberDataTypes, interp, 0);
+ if( rc!=0 && rc!=SQLITE_ABORT ){
+ Tcl_AppendResult(interp, sqlite_error_string(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_compile DB SQL ?TAILVAR?
+**
+** Attempt to compile an SQL statement. Return a pointer to the virtual
+** machine used to execute that statement. Unprocessed SQL is written
+** into TAILVAR.
+*/
+static int test_compile(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite *db;
+ sqlite_vm *vm;
+ int rc;
+ char *zErr = 0;
+ const char *zTail;
+ char zBuf[50];
+ if( argc!=3 && argc!=4 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " DB SQL TAILVAR", 0);
+ return TCL_ERROR;
+ }
+ if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR;
+ rc = sqlite_compile(db, argv[2], argc==4 ? &zTail : 0, &vm, &zErr);
+ if( argc==4 ) Tcl_SetVar(interp, argv[3], zTail, 0);
+ if( rc ){
+ assert( vm==0 );
+ sprintf(zBuf, "(%d) ", rc);
+ Tcl_AppendResult(interp, zBuf, zErr, 0);
+ sqlite_freemem(zErr);
+ return TCL_ERROR;
+ }
+ if( vm ){
+ if( makePointerStr(interp, zBuf, vm) ) return TCL_ERROR;
+ Tcl_AppendResult(interp, zBuf, 0);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_step VM ?NVAR? ?VALUEVAR? ?COLNAMEVAR?
+**
+** Step a virtual machine. Return a the result code as a string.
+** Column results are written into three variables.
+*/
+static int test_step(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite_vm *vm;
+ int rc, i;
+ const char **azValue = 0;
+ const char **azColName = 0;
+ int N = 0;
+ char *zRc;
+ char zBuf[50];
+ if( argc<2 || argc>5 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " VM NVAR VALUEVAR COLNAMEVAR", 0);
+ return TCL_ERROR;
+ }
+ if( getVmPointer(interp, argv[1], &vm) ) return TCL_ERROR;
+ rc = sqlite_step(vm, argc>=3?&N:0, argc>=4?&azValue:0, argc==5?&azColName:0);
+ if( argc>=3 ){
+ sprintf(zBuf, "%d", N);
+ Tcl_SetVar(interp, argv[2], zBuf, 0);
+ }
+ if( argc>=4 ){
+ Tcl_SetVar(interp, argv[3], "", 0);
+ if( azValue ){
+ for(i=0; i<N; i++){
+ Tcl_SetVar(interp, argv[3], azValue[i] ? azValue[i] : "",
+ TCL_APPEND_VALUE | TCL_LIST_ELEMENT);
+ }
+ }
+ }
+ if( argc==5 ){
+ Tcl_SetVar(interp, argv[4], "", 0);
+ if( azColName ){
+ for(i=0; i<N*2; i++){
+ Tcl_SetVar(interp, argv[4], azColName[i] ? azColName[i] : "",
+ TCL_APPEND_VALUE | TCL_LIST_ELEMENT);
+ }
+ }
+ }
+ switch( rc ){
+ case SQLITE_DONE: zRc = "SQLITE_DONE"; break;
+ case SQLITE_BUSY: zRc = "SQLITE_BUSY"; break;
+ case SQLITE_ROW: zRc = "SQLITE_ROW"; break;
+ case SQLITE_ERROR: zRc = "SQLITE_ERROR"; break;
+ case SQLITE_MISUSE: zRc = "SQLITE_MISUSE"; break;
+ default: zRc = "unknown"; break;
+ }
+ Tcl_AppendResult(interp, zRc, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_finalize VM
+**
+** Shutdown a virtual machine.
+*/
+static int test_finalize(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite_vm *vm;
+ int rc;
+ char *zErrMsg = 0;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " VM\"", 0);
+ return TCL_ERROR;
+ }
+ if( getVmPointer(interp, argv[1], &vm) ) return TCL_ERROR;
+ rc = sqlite_finalize(vm, &zErrMsg);
+ if( rc ){
+ char zBuf[50];
+ sprintf(zBuf, "(%d) ", rc);
+ Tcl_AppendResult(interp, zBuf, zErrMsg, 0);
+ sqlite_freemem(zErrMsg);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: sqlite_reset VM
+**
+** Reset a virtual machine and prepare it to be run again.
+*/
+static int test_reset(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite_vm *vm;
+ int rc;
+ char *zErrMsg = 0;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " VM\"", 0);
+ return TCL_ERROR;
+ }
+ if( getVmPointer(interp, argv[1], &vm) ) return TCL_ERROR;
+ rc = sqlite_reset(vm, &zErrMsg);
+ if( rc ){
+ char zBuf[50];
+ sprintf(zBuf, "(%d) ", rc);
+ Tcl_AppendResult(interp, zBuf, zErrMsg, 0);
+ sqlite_freemem(zErrMsg);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** This is the "static_bind_value" that variables are bound to when
+** the FLAG option of sqlite_bind is "static"
+*/
+static char *sqlite_static_bind_value = 0;
+
+/*
+** Usage: sqlite_bind VM IDX VALUE FLAGS
+**
+** Sets the value of the IDX-th occurance of "?" in the original SQL
+** string. VALUE is the new value. If FLAGS=="null" then VALUE is
+** ignored and the value is set to NULL. If FLAGS=="static" then
+** the value is set to the value of a static variable named
+** "sqlite_static_bind_value". If FLAGS=="normal" then a copy
+** of the VALUE is made.
+*/
+static int test_bind(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ sqlite_vm *vm;
+ int rc;
+ int idx;
+ if( argc!=5 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " VM IDX VALUE (null|static|normal)\"", 0);
+ return TCL_ERROR;
+ }
+ if( getVmPointer(interp, argv[1], &vm) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &idx) ) return TCL_ERROR;
+ if( strcmp(argv[4],"null")==0 ){
+ rc = sqlite_bind(vm, idx, 0, 0, 0);
+ }else if( strcmp(argv[4],"static")==0 ){
+ rc = sqlite_bind(vm, idx, sqlite_static_bind_value, -1, 0);
+ }else if( strcmp(argv[4],"normal")==0 ){
+ rc = sqlite_bind(vm, idx, argv[3], -1, 1);
+ }else{
+ Tcl_AppendResult(interp, "4th argument should be "
+ "\"null\" or \"static\" or \"normal\"", 0);
+ return TCL_ERROR;
+ }
+ if( rc ){
+ char zBuf[50];
+ sprintf(zBuf, "(%d) ", rc);
+ Tcl_AppendResult(interp, zBuf, sqlite_error_string(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: breakpoint
+**
+** This routine exists for one purpose - to provide a place to put a
+** breakpoint with GDB that can be triggered using TCL code. The use
+** for this is when a particular test fails on (say) the 1485th iteration.
+** In the TCL test script, we can add code like this:
+**
+** if {$i==1485} breakpoint
+**
+** Then run testfixture in the debugger and wait for the breakpoint to
+** fire. Then additional breakpoints can be set to trace down the bug.
+*/
+static int test_breakpoint(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ return TCL_OK; /* Do nothing */
+}
+
+/*
+** Register commands with the TCL interpreter.
+*/
+int Sqlitetest1_Init(Tcl_Interp *interp){
+ extern int sqlite_search_count;
+ extern int sqlite_interrupt_count;
+ extern int sqlite_open_file_count;
+ extern int sqlite_current_time;
+ extern int sqlite_temp_directory;
+ static struct {
+ char *zName;
+ Tcl_CmdProc *xProc;
+ } aCmd[] = {
+ { "sqlite_mprintf_int", (Tcl_CmdProc*)sqlite_mprintf_int },
+ { "sqlite_mprintf_str", (Tcl_CmdProc*)sqlite_mprintf_str },
+ { "sqlite_mprintf_double", (Tcl_CmdProc*)sqlite_mprintf_double },
+ { "sqlite_mprintf_scaled", (Tcl_CmdProc*)sqlite_mprintf_scaled },
+ { "sqlite_mprintf_z_test", (Tcl_CmdProc*)test_mprintf_z },
+ { "sqlite_open", (Tcl_CmdProc*)sqlite_test_open },
+ { "sqlite_last_insert_rowid", (Tcl_CmdProc*)test_last_rowid },
+ { "sqlite_exec_printf", (Tcl_CmdProc*)test_exec_printf },
+ { "sqlite_get_table_printf", (Tcl_CmdProc*)test_get_table_printf },
+ { "sqlite_close", (Tcl_CmdProc*)sqlite_test_close },
+ { "sqlite_create_function", (Tcl_CmdProc*)test_create_function },
+ { "sqlite_create_aggregate", (Tcl_CmdProc*)test_create_aggregate },
+ { "sqlite_register_test_function", (Tcl_CmdProc*)test_register_func },
+ { "sqlite_abort", (Tcl_CmdProc*)sqlite_abort },
+ { "sqlite_datatypes", (Tcl_CmdProc*)sqlite_datatypes },
+#ifdef MEMORY_DEBUG
+ { "sqlite_malloc_fail", (Tcl_CmdProc*)sqlite_malloc_fail },
+ { "sqlite_malloc_stat", (Tcl_CmdProc*)sqlite_malloc_stat },
+#endif
+ { "sqlite_compile", (Tcl_CmdProc*)test_compile },
+ { "sqlite_step", (Tcl_CmdProc*)test_step },
+ { "sqlite_finalize", (Tcl_CmdProc*)test_finalize },
+ { "sqlite_bind", (Tcl_CmdProc*)test_bind },
+ { "sqlite_reset", (Tcl_CmdProc*)test_reset },
+ { "breakpoint", (Tcl_CmdProc*)test_breakpoint },
+ };
+ int i;
+
+ for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){
+ Tcl_CreateCommand(interp, aCmd[i].zName, aCmd[i].xProc, 0, 0);
+ }
+ Tcl_LinkVar(interp, "sqlite_search_count",
+ (char*)&sqlite_search_count, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "sqlite_interrupt_count",
+ (char*)&sqlite_interrupt_count, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "sqlite_open_file_count",
+ (char*)&sqlite_open_file_count, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "sqlite_current_time",
+ (char*)&sqlite_current_time, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "sqlite_static_bind_value",
+ (char*)&sqlite_static_bind_value, TCL_LINK_STRING);
+ Tcl_LinkVar(interp, "sqlite_temp_directory",
+ (char*)&sqlite_temp_directory, TCL_LINK_STRING);
+ return TCL_OK;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/test2.c b/usr/src/cmd/svc/configd/sqlite/src/test2.c
new file mode 100644
index 0000000000..2d85769d05
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/test2.c
@@ -0,0 +1,570 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Code for testing the pager.c module in SQLite. This code
+** is not included in the SQLite library. It is used for automated
+** testing of the SQLite library.
+**
+** $Id: test2.c,v 1.16 2004/02/10 01:54:28 drh Exp $
+*/
+#include "os.h"
+#include "sqliteInt.h"
+#include "pager.h"
+#include "tcl.h"
+#include <stdlib.h>
+#include <string.h>
+
+/*
+** Interpret an SQLite error number
+*/
+static char *errorName(int rc){
+ char *zName;
+ switch( rc ){
+ case SQLITE_OK: zName = "SQLITE_OK"; break;
+ case SQLITE_ERROR: zName = "SQLITE_ERROR"; break;
+ case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break;
+ case SQLITE_PERM: zName = "SQLITE_PERM"; break;
+ case SQLITE_ABORT: zName = "SQLITE_ABORT"; break;
+ case SQLITE_BUSY: zName = "SQLITE_BUSY"; break;
+ case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break;
+ case SQLITE_READONLY: zName = "SQLITE_READONLY"; break;
+ case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break;
+ case SQLITE_IOERR: zName = "SQLITE_IOERR"; break;
+ case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break;
+ case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break;
+ case SQLITE_FULL: zName = "SQLITE_FULL"; break;
+ case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break;
+ case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break;
+ case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break;
+ case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break;
+ case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break;
+ case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break;
+ case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break;
+ case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break;
+ case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break;
+ default: zName = "SQLITE_Unknown"; break;
+ }
+ return zName;
+}
+
+/*
+** Usage: pager_open FILENAME N-PAGE
+**
+** Open a new pager
+*/
+static int pager_open(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int nPage;
+ int rc;
+ char zBuf[100];
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FILENAME N-PAGE\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[2], &nPage) ) return TCL_ERROR;
+ rc = sqlitepager_open(&pPager, argv[1], nPage, 0, 1);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"0x%x",(int)pPager);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_close ID
+**
+** Close the given pager.
+*/
+static int pager_close(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ rc = sqlitepager_close(pPager);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_rollback ID
+**
+** Rollback changes
+*/
+static int pager_rollback(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ rc = sqlitepager_rollback(pPager);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_commit ID
+**
+** Commit all changes
+*/
+static int pager_commit(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ rc = sqlitepager_commit(pPager);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_ckpt_begin ID
+**
+** Start a new checkpoint.
+*/
+static int pager_ckpt_begin(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ rc = sqlitepager_ckpt_begin(pPager);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_ckpt_rollback ID
+**
+** Rollback changes to a checkpoint
+*/
+static int pager_ckpt_rollback(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ rc = sqlitepager_ckpt_rollback(pPager);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_ckpt_commit ID
+**
+** Commit changes to a checkpoint
+*/
+static int pager_ckpt_commit(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ rc = sqlitepager_ckpt_commit(pPager);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_stats ID
+**
+** Return pager statistics.
+*/
+static int pager_stats(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ int i, *a;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ a = sqlitepager_stats(pPager);
+ for(i=0; i<9; i++){
+ static char *zName[] = {
+ "ref", "page", "max", "size", "state", "err",
+ "hit", "miss", "ovfl",
+ };
+ char zBuf[100];
+ Tcl_AppendElement(interp, zName[i]);
+ sprintf(zBuf,"%d",a[i]);
+ Tcl_AppendElement(interp, zBuf);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: pager_pagecount ID
+**
+** Return the size of the database file.
+*/
+static int pager_pagecount(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ char zBuf[100];
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ sprintf(zBuf,"%d",sqlitepager_pagecount(pPager));
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: page_get ID PGNO
+**
+** Return a pointer to a page from the database.
+*/
+static int page_get(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ char zBuf[100];
+ void *pPage;
+ int pgno;
+ int rc;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID PGNO\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &pgno) ) return TCL_ERROR;
+ rc = sqlitepager_get(pPager, pgno, &pPage);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"0x%x",(int)pPage);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: page_lookup ID PGNO
+**
+** Return a pointer to a page if the page is already in cache.
+** If not in cache, return an empty string.
+*/
+static int page_lookup(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Pager *pPager;
+ char zBuf[100];
+ void *pPage;
+ int pgno;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID PGNO\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPager) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &pgno) ) return TCL_ERROR;
+ pPage = sqlitepager_lookup(pPager, pgno);
+ if( pPage ){
+ sprintf(zBuf,"0x%x",(int)pPage);
+ Tcl_AppendResult(interp, zBuf, 0);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: page_unref PAGE
+**
+** Drop a pointer to a page.
+*/
+static int page_unref(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ void *pPage;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " PAGE\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPage) ) return TCL_ERROR;
+ rc = sqlitepager_unref(pPage);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: page_read PAGE
+**
+** Return the content of a page
+*/
+static int page_read(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ char zBuf[100];
+ void *pPage;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " PAGE\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPage) ) return TCL_ERROR;
+ memcpy(zBuf, pPage, sizeof(zBuf));
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: page_number PAGE
+**
+** Return the page number for a page.
+*/
+static int page_number(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ char zBuf[100];
+ void *pPage;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " PAGE\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPage) ) return TCL_ERROR;
+ sprintf(zBuf, "%d", sqlitepager_pagenumber(pPage));
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: page_write PAGE DATA
+**
+** Write something into a page.
+*/
+static int page_write(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ void *pPage;
+ int rc;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " PAGE DATA\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pPage) ) return TCL_ERROR;
+ rc = sqlitepager_write(pPage);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ strncpy((char*)pPage, argv[2], SQLITE_USABLE_SIZE-1);
+ ((char*)pPage)[SQLITE_USABLE_SIZE-1] = 0;
+ return TCL_OK;
+}
+
+/*
+** Usage: fake_big_file N FILENAME
+**
+** Write a few bytes at the N megabyte point of FILENAME. This will
+** create a large file. If the file was a valid SQLite database, then
+** the next time the database is opened, SQLite will begin allocating
+** new pages after N. If N is 2096 or bigger, this will test the
+** ability of SQLite to write to large files.
+*/
+static int fake_big_file(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int rc;
+ int n;
+ off_t offset;
+ OsFile fd;
+ int readOnly = 0;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " N-MEGABYTES FILE\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR;
+ rc = sqliteOsOpenReadWrite(argv[2], &fd, &readOnly);
+ if( rc ){
+ Tcl_AppendResult(interp, "open failed: ", errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ offset = n;
+ offset *= 1024*1024;
+ rc = sqliteOsSeek(&fd, offset);
+ if( rc ){
+ Tcl_AppendResult(interp, "seek failed: ", errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ rc = sqliteOsWrite(&fd, "Hello, World!", 14);
+ sqliteOsClose(&fd);
+ if( rc ){
+ Tcl_AppendResult(interp, "write failed: ", errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Register commands with the TCL interpreter.
+*/
+int Sqlitetest2_Init(Tcl_Interp *interp){
+ extern int sqlite_io_error_pending;
+ char zBuf[100];
+ static struct {
+ char *zName;
+ Tcl_CmdProc *xProc;
+ } aCmd[] = {
+ { "pager_open", (Tcl_CmdProc*)pager_open },
+ { "pager_close", (Tcl_CmdProc*)pager_close },
+ { "pager_commit", (Tcl_CmdProc*)pager_commit },
+ { "pager_rollback", (Tcl_CmdProc*)pager_rollback },
+ { "pager_ckpt_begin", (Tcl_CmdProc*)pager_ckpt_begin },
+ { "pager_ckpt_commit", (Tcl_CmdProc*)pager_ckpt_commit },
+ { "pager_ckpt_rollback", (Tcl_CmdProc*)pager_ckpt_rollback },
+ { "pager_stats", (Tcl_CmdProc*)pager_stats },
+ { "pager_pagecount", (Tcl_CmdProc*)pager_pagecount },
+ { "page_get", (Tcl_CmdProc*)page_get },
+ { "page_lookup", (Tcl_CmdProc*)page_lookup },
+ { "page_unref", (Tcl_CmdProc*)page_unref },
+ { "page_read", (Tcl_CmdProc*)page_read },
+ { "page_write", (Tcl_CmdProc*)page_write },
+ { "page_number", (Tcl_CmdProc*)page_number },
+ { "fake_big_file", (Tcl_CmdProc*)fake_big_file },
+ };
+ int i;
+ for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){
+ Tcl_CreateCommand(interp, aCmd[i].zName, aCmd[i].xProc, 0, 0);
+ }
+ Tcl_LinkVar(interp, "sqlite_io_error_pending",
+ (char*)&sqlite_io_error_pending, TCL_LINK_INT);
+#ifdef SQLITE_TEST
+ Tcl_LinkVar(interp, "journal_format",
+ (char*)&journal_format, TCL_LINK_INT);
+#endif
+ sprintf(zBuf, "%d", SQLITE_PAGE_SIZE);
+ Tcl_SetVar(interp, "SQLITE_PAGE_SIZE", zBuf, TCL_GLOBAL_ONLY);
+ sprintf(zBuf, "%d", SQLITE_PAGE_RESERVE);
+ Tcl_SetVar(interp, "SQLITE_PAGE_RESERVE", zBuf, TCL_GLOBAL_ONLY);
+ sprintf(zBuf, "%d", SQLITE_USABLE_SIZE);
+ Tcl_SetVar(interp, "SQLITE_USABLE_SIZE", zBuf, TCL_GLOBAL_ONLY);
+ return TCL_OK;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/test3.c b/usr/src/cmd/svc/configd/sqlite/src/test3.c
new file mode 100644
index 0000000000..8b3fb8e507
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/test3.c
@@ -0,0 +1,995 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Code for testing the btree.c module in SQLite. This code
+** is not included in the SQLite library. It is used for automated
+** testing of the SQLite library.
+**
+** $Id: test3.c,v 1.23 2003/04/13 18:26:52 paul Exp $
+*/
+#include "sqliteInt.h"
+#include "pager.h"
+#include "btree.h"
+#include "tcl.h"
+#include <stdlib.h>
+#include <string.h>
+
+/*
+** Interpret an SQLite error number
+*/
+static char *errorName(int rc){
+ char *zName;
+ switch( rc ){
+ case SQLITE_OK: zName = "SQLITE_OK"; break;
+ case SQLITE_ERROR: zName = "SQLITE_ERROR"; break;
+ case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break;
+ case SQLITE_PERM: zName = "SQLITE_PERM"; break;
+ case SQLITE_ABORT: zName = "SQLITE_ABORT"; break;
+ case SQLITE_BUSY: zName = "SQLITE_BUSY"; break;
+ case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break;
+ case SQLITE_READONLY: zName = "SQLITE_READONLY"; break;
+ case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break;
+ case SQLITE_IOERR: zName = "SQLITE_IOERR"; break;
+ case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break;
+ case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break;
+ case SQLITE_FULL: zName = "SQLITE_FULL"; break;
+ case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break;
+ case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break;
+ default: zName = "SQLITE_Unknown"; break;
+ }
+ return zName;
+}
+
+/*
+** Usage: btree_open FILENAME
+**
+** Open a new database
+*/
+static int btree_open(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ char zBuf[100];
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " FILENAME\"", 0);
+ return TCL_ERROR;
+ }
+ rc = sqliteBtreeFactory(0, argv[1], 0, 1000, &pBt);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"%p", pBt);
+ if( strncmp(zBuf,"0x",2) ){
+ sprintf(zBuf, "0x%p", pBt);
+ }
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_close ID
+**
+** Close the given database.
+*/
+static int btree_close(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ rc = sqliteBtreeClose(pBt);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_begin_transaction ID
+**
+** Start a new transaction
+*/
+static int btree_begin_transaction(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ rc = sqliteBtreeBeginTrans(pBt);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_rollback ID
+**
+** Rollback changes
+*/
+static int btree_rollback(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ rc = sqliteBtreeRollback(pBt);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_commit ID
+**
+** Commit all changes
+*/
+static int btree_commit(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ rc = sqliteBtreeCommit(pBt);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_create_table ID
+**
+** Create a new table in the database
+*/
+static int btree_create_table(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc, iTable;
+ char zBuf[30];
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ rc = sqliteBtreeCreateTable(pBt, &iTable);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf, "%d", iTable);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_drop_table ID TABLENUM
+**
+** Delete an entire table from the database
+*/
+static int btree_drop_table(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int iTable;
+ int rc;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID TABLENUM\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &iTable) ) return TCL_ERROR;
+ rc = sqliteBtreeDropTable(pBt, iTable);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_clear_table ID TABLENUM
+**
+** Remove all entries from the given table but keep the table around.
+*/
+static int btree_clear_table(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int iTable;
+ int rc;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID TABLENUM\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &iTable) ) return TCL_ERROR;
+ rc = sqliteBtreeClearTable(pBt, iTable);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_get_meta ID
+**
+** Return meta data
+*/
+static int btree_get_meta(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ int i;
+ int aMeta[SQLITE_N_BTREE_META];
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ rc = sqliteBtreeGetMeta(pBt, aMeta);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ for(i=0; i<SQLITE_N_BTREE_META; i++){
+ char zBuf[30];
+ sprintf(zBuf,"%d",aMeta[i]);
+ Tcl_AppendElement(interp, zBuf);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_update_meta ID METADATA...
+**
+** Return meta data
+*/
+static int btree_update_meta(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int rc;
+ int i;
+ int aMeta[SQLITE_N_BTREE_META];
+
+ if( argc!=2+SQLITE_N_BTREE_META ){
+ char zBuf[30];
+ sprintf(zBuf,"%d",SQLITE_N_BTREE_META);
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID METADATA...\" (METADATA is ", zBuf, " integers)", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ for(i=0; i<SQLITE_N_BTREE_META; i++){
+ if( Tcl_GetInt(interp, argv[i+2], &aMeta[i]) ) return TCL_ERROR;
+ }
+ rc = sqliteBtreeUpdateMeta(pBt, aMeta);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_page_dump ID PAGENUM
+**
+** Print a disassembly of a page on standard output
+*/
+static int btree_page_dump(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int iPage;
+ int rc;
+
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &iPage) ) return TCL_ERROR;
+ rc = sqliteBtreePageDump(pBt, iPage, 0);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_tree_dump ID PAGENUM
+**
+** Print a disassembly of a page and all its child pages on standard output
+*/
+static int btree_tree_dump(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int iPage;
+ int rc;
+
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &iPage) ) return TCL_ERROR;
+ rc = sqliteBtreePageDump(pBt, iPage, 1);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_pager_stats ID
+**
+** Returns pager statistics
+*/
+static int btree_pager_stats(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int i;
+ int *a;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ a = sqlitepager_stats(sqliteBtreePager(pBt));
+ for(i=0; i<9; i++){
+ static char *zName[] = {
+ "ref", "page", "max", "size", "state", "err",
+ "hit", "miss", "ovfl",
+ };
+ char zBuf[100];
+ Tcl_AppendElement(interp, zName[i]);
+ sprintf(zBuf,"%d",a[i]);
+ Tcl_AppendElement(interp, zBuf);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_pager_ref_dump ID
+**
+** Print out all outstanding pages.
+*/
+static int btree_pager_ref_dump(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ sqlitepager_refdump(sqliteBtreePager(pBt));
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_integrity_check ID ROOT ...
+**
+** Look through every page of the given BTree file to verify correct
+** formatting and linkage. Return a line of text for each problem found.
+** Return an empty string if everything worked.
+*/
+static int btree_integrity_check(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ char *zResult;
+ int nRoot;
+ int *aRoot;
+ int i;
+
+ if( argc<3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID ROOT ...\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ nRoot = argc-2;
+ aRoot = malloc( sizeof(int)*(argc-2) );
+ for(i=0; i<argc-2; i++){
+ if( Tcl_GetInt(interp, argv[i+2], &aRoot[i]) ) return TCL_ERROR;
+ }
+ zResult = sqliteBtreeIntegrityCheck(pBt, aRoot, nRoot);
+ if( zResult ){
+ Tcl_AppendResult(interp, zResult, 0);
+ sqliteFree(zResult);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: btree_cursor ID TABLENUM WRITEABLE
+**
+** Create a new cursor. Return the ID for the cursor.
+*/
+static int btree_cursor(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ Btree *pBt;
+ int iTable;
+ BtCursor *pCur;
+ int rc;
+ int wrFlag;
+ char zBuf[30];
+
+ if( argc!=4 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID TABLENUM WRITEABLE\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pBt) ) return TCL_ERROR;
+ if( Tcl_GetInt(interp, argv[2], &iTable) ) return TCL_ERROR;
+ if( Tcl_GetBoolean(interp, argv[3], &wrFlag) ) return TCL_ERROR;
+ rc = sqliteBtreeCursor(pBt, iTable, wrFlag, &pCur);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"0x%x", (int)pCur);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_close_cursor ID
+**
+** Close a cursor opened using btree_cursor.
+*/
+static int btree_close_cursor(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeCloseCursor(pCur);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_move_to ID KEY
+**
+** Move the cursor to the entry with the given key.
+*/
+static int btree_move_to(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int res;
+ char zBuf[20];
+
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID KEY\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeMoveto(pCur, argv[2], strlen(argv[2]), &res);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ if( res<0 ) res = -1;
+ if( res>0 ) res = 1;
+ sprintf(zBuf,"%d",res);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_delete ID
+**
+** Delete the entry that the cursor is pointing to
+*/
+static int btree_delete(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeDelete(pCur);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_insert ID KEY DATA
+**
+** Create a new entry with the given key and data. If an entry already
+** exists with the same key the old entry is overwritten.
+*/
+static int btree_insert(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+
+ if( argc!=4 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID KEY DATA\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeInsert(pCur, argv[2], strlen(argv[2]),
+ argv[3], strlen(argv[3]));
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_next ID
+**
+** Move the cursor to the next entry in the table. Return 0 on success
+** or 1 if the cursor was already on the last entry in the table or if
+** the table is empty.
+*/
+static int btree_next(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int res = 0;
+ char zBuf[100];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeNext(pCur, &res);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"%d",res);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_prev ID
+**
+** Move the cursor to the previous entry in the table. Return 0 on
+** success and 1 if the cursor was already on the first entry in
+** the table or if the table was empty.
+*/
+static int btree_prev(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int res = 0;
+ char zBuf[100];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreePrevious(pCur, &res);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"%d",res);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_first ID
+**
+** Move the cursor to the first entry in the table. Return 0 if the
+** cursor was left point to something and 1 if the table is empty.
+*/
+static int btree_first(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int res = 0;
+ char zBuf[100];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeFirst(pCur, &res);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"%d",res);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_last ID
+**
+** Move the cursor to the last entry in the table. Return 0 if the
+** cursor was left point to something and 1 if the table is empty.
+*/
+static int btree_last(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int res = 0;
+ char zBuf[100];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeLast(pCur, &res);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ sprintf(zBuf,"%d",res);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_key ID
+**
+** Return the key for the entry at which the cursor is pointing.
+*/
+static int btree_key(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int n;
+ char *zBuf;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ sqliteBtreeKeySize(pCur, &n);
+ zBuf = malloc( n+1 );
+ rc = sqliteBtreeKey(pCur, 0, n, zBuf);
+ if( rc!=n ){
+ char zMsg[100];
+ free(zBuf);
+ sprintf(zMsg, "truncated key: got %d of %d bytes", rc, n);
+ Tcl_AppendResult(interp, zMsg, 0);
+ return TCL_ERROR;
+ }
+ zBuf[n] = 0;
+ Tcl_AppendResult(interp, zBuf, 0);
+ free(zBuf);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_data ID
+**
+** Return the data for the entry at which the cursor is pointing.
+*/
+static int btree_data(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int n;
+ char *zBuf;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ sqliteBtreeDataSize(pCur, &n);
+ zBuf = malloc( n+1 );
+ rc = sqliteBtreeData(pCur, 0, n, zBuf);
+ if( rc!=n ){
+ char zMsg[100];
+ free(zBuf);
+ sprintf(zMsg, "truncated data: got %d of %d bytes", rc, n);
+ Tcl_AppendResult(interp, zMsg, 0);
+ return TCL_ERROR;
+ }
+ zBuf[n] = 0;
+ Tcl_AppendResult(interp, zBuf, 0);
+ free(zBuf);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_payload_size ID
+**
+** Return the number of bytes of payload
+*/
+static int btree_payload_size(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int n1, n2;
+ char zBuf[50];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ sqliteBtreeKeySize(pCur, &n1);
+ sqliteBtreeDataSize(pCur, &n2);
+ sprintf(zBuf, "%d", n1+n2);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Usage: btree_cursor_dump ID
+**
+** Return eight integers containing information about the entry the
+** cursor is pointing to:
+**
+** aResult[0] = The page number
+** aResult[1] = The entry number
+** aResult[2] = Total number of entries on this page
+** aResult[3] = Size of this entry
+** aResult[4] = Number of free bytes on this page
+** aResult[5] = Number of free blocks on the page
+** aResult[6] = Page number of the left child of this entry
+** aResult[7] = Page number of the right child for the whole page
+*/
+static int btree_cursor_dump(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ BtCursor *pCur;
+ int rc;
+ int i, j;
+ int aResult[8];
+ char zBuf[400];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID\"", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[1], (int*)&pCur) ) return TCL_ERROR;
+ rc = sqliteBtreeCursorDump(pCur, aResult);
+ if( rc ){
+ Tcl_AppendResult(interp, errorName(rc), 0);
+ return TCL_ERROR;
+ }
+ j = 0;
+ for(i=0; i<sizeof(aResult)/sizeof(aResult[0]); i++){
+ sprintf(&zBuf[j]," %d", aResult[i]);
+ j += strlen(&zBuf[j]);
+ }
+ Tcl_AppendResult(interp, &zBuf[1], 0);
+ return SQLITE_OK;
+}
+
+/*
+** Register commands with the TCL interpreter.
+*/
+int Sqlitetest3_Init(Tcl_Interp *interp){
+ static struct {
+ char *zName;
+ Tcl_CmdProc *xProc;
+ } aCmd[] = {
+ { "btree_open", (Tcl_CmdProc*)btree_open },
+ { "btree_close", (Tcl_CmdProc*)btree_close },
+ { "btree_begin_transaction", (Tcl_CmdProc*)btree_begin_transaction },
+ { "btree_commit", (Tcl_CmdProc*)btree_commit },
+ { "btree_rollback", (Tcl_CmdProc*)btree_rollback },
+ { "btree_create_table", (Tcl_CmdProc*)btree_create_table },
+ { "btree_drop_table", (Tcl_CmdProc*)btree_drop_table },
+ { "btree_clear_table", (Tcl_CmdProc*)btree_clear_table },
+ { "btree_get_meta", (Tcl_CmdProc*)btree_get_meta },
+ { "btree_update_meta", (Tcl_CmdProc*)btree_update_meta },
+ { "btree_page_dump", (Tcl_CmdProc*)btree_page_dump },
+ { "btree_tree_dump", (Tcl_CmdProc*)btree_tree_dump },
+ { "btree_pager_stats", (Tcl_CmdProc*)btree_pager_stats },
+ { "btree_pager_ref_dump", (Tcl_CmdProc*)btree_pager_ref_dump },
+ { "btree_cursor", (Tcl_CmdProc*)btree_cursor },
+ { "btree_close_cursor", (Tcl_CmdProc*)btree_close_cursor },
+ { "btree_move_to", (Tcl_CmdProc*)btree_move_to },
+ { "btree_delete", (Tcl_CmdProc*)btree_delete },
+ { "btree_insert", (Tcl_CmdProc*)btree_insert },
+ { "btree_next", (Tcl_CmdProc*)btree_next },
+ { "btree_prev", (Tcl_CmdProc*)btree_prev },
+ { "btree_key", (Tcl_CmdProc*)btree_key },
+ { "btree_data", (Tcl_CmdProc*)btree_data },
+ { "btree_payload_size", (Tcl_CmdProc*)btree_payload_size },
+ { "btree_first", (Tcl_CmdProc*)btree_first },
+ { "btree_last", (Tcl_CmdProc*)btree_last },
+ { "btree_cursor_dump", (Tcl_CmdProc*)btree_cursor_dump },
+ { "btree_integrity_check", (Tcl_CmdProc*)btree_integrity_check },
+ };
+ int i;
+
+ for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){
+ Tcl_CreateCommand(interp, aCmd[i].zName, aCmd[i].xProc, 0, 0);
+ }
+ Tcl_LinkVar(interp, "pager_refinfo_enable", (char*)&pager_refinfo_enable,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "btree_native_byte_order",(char*)&btree_native_byte_order,
+ TCL_LINK_INT);
+ return TCL_OK;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/test4.c b/usr/src/cmd/svc/configd/sqlite/src/test4.c
new file mode 100644
index 0000000000..966ae6c9b1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/test4.c
@@ -0,0 +1,637 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 December 18
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Code for testing the the SQLite library in a multithreaded environment.
+**
+** $Id: test4.c,v 1.3 2004/04/23 17:04:45 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "tcl.h"
+#if defined(OS_UNIX) && OS_UNIX==1 && defined(THREADSAFE) && THREADSAFE==1
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <sched.h>
+#include <ctype.h>
+
+/*
+** Each thread is controlled by an instance of the following
+** structure.
+*/
+typedef struct Thread Thread;
+struct Thread {
+ /* The first group of fields are writable by the master and read-only
+ ** to the thread. */
+ char *zFilename; /* Name of database file */
+ void (*xOp)(Thread*); /* next operation to do */
+ char *zArg; /* argument usable by xOp */
+ int opnum; /* Operation number */
+ int busy; /* True if this thread is in use */
+
+ /* The next group of fields are writable by the thread but read-only to the
+ ** master. */
+ int completed; /* Number of operations completed */
+ sqlite *db; /* Open database */
+ sqlite_vm *vm; /* Pending operation */
+ char *zErr; /* operation error */
+ char *zStaticErr; /* Static error message */
+ int rc; /* operation return code */
+ int argc; /* number of columns in result */
+ const char **argv; /* result columns */
+ const char **colv; /* result column names */
+};
+
+/*
+** There can be as many as 26 threads running at once. Each is named
+** by a capital letter: A, B, C, ..., Y, Z.
+*/
+#define N_THREAD 26
+static Thread threadset[N_THREAD];
+
+
+/*
+** The main loop for a thread. Threads use busy waiting.
+*/
+static void *thread_main(void *pArg){
+ Thread *p = (Thread*)pArg;
+ if( p->db ){
+ sqlite_close(p->db);
+ }
+ p->db = sqlite_open(p->zFilename, 0, &p->zErr);
+ p->vm = 0;
+ p->completed = 1;
+ while( p->opnum<=p->completed ) sched_yield();
+ while( p->xOp ){
+ if( p->zErr && p->zErr!=p->zStaticErr ){
+ sqlite_freemem(p->zErr);
+ p->zErr = 0;
+ }
+ (*p->xOp)(p);
+ p->completed++;
+ while( p->opnum<=p->completed ) sched_yield();
+ }
+ if( p->vm ){
+ sqlite_finalize(p->vm, 0);
+ p->vm = 0;
+ }
+ if( p->db ){
+ sqlite_close(p->db);
+ p->db = 0;
+ }
+ if( p->zErr && p->zErr!=p->zStaticErr ){
+ sqlite_freemem(p->zErr);
+ p->zErr = 0;
+ }
+ p->completed++;
+ return 0;
+}
+
+/*
+** Get a thread ID which is an upper case letter. Return the index.
+** If the argument is not a valid thread ID put an error message in
+** the interpreter and return -1.
+*/
+static int parse_thread_id(Tcl_Interp *interp, const char *zArg){
+ if( zArg==0 || zArg[0]==0 || zArg[1]!=0 || !isupper(zArg[0]) ){
+ Tcl_AppendResult(interp, "thread ID must be an upper case letter", 0);
+ return -1;
+ }
+ return zArg[0] - 'A';
+}
+
+/*
+** Usage: thread_create NAME FILENAME
+**
+** NAME should be an upper case letter. Start the thread running with
+** an open connection to the given database.
+*/
+static int tcl_thread_create(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ pthread_t x;
+ int rc;
+
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID FILENAME", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( threadset[i].busy ){
+ Tcl_AppendResult(interp, "thread ", argv[1], " is already running", 0);
+ return TCL_ERROR;
+ }
+ threadset[i].busy = 1;
+ sqliteFree(threadset[i].zFilename);
+ threadset[i].zFilename = sqliteStrDup(argv[2]);
+ threadset[i].opnum = 1;
+ threadset[i].completed = 0;
+ rc = pthread_create(&x, 0, thread_main, &threadset[i]);
+ if( rc ){
+ Tcl_AppendResult(interp, "failed to create the thread", 0);
+ sqliteFree(threadset[i].zFilename);
+ threadset[i].busy = 0;
+ return TCL_ERROR;
+ }
+ pthread_detach(x);
+ return TCL_OK;
+}
+
+/*
+** Wait for a thread to reach its idle state.
+*/
+static void thread_wait(Thread *p){
+ while( p->opnum>p->completed ) sched_yield();
+}
+
+/*
+** Usage: thread_wait ID
+**
+** Wait on thread ID to reach its idle state.
+*/
+static int tcl_thread_wait(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ return TCL_OK;
+}
+
+/*
+** Stop a thread.
+*/
+static void stop_thread(Thread *p){
+ thread_wait(p);
+ p->xOp = 0;
+ p->opnum++;
+ thread_wait(p);
+ sqliteFree(p->zArg);
+ p->zArg = 0;
+ sqliteFree(p->zFilename);
+ p->zFilename = 0;
+ p->busy = 0;
+}
+
+/*
+** Usage: thread_halt ID
+**
+** Cause a thread to shut itself down. Wait for the shutdown to be
+** completed. If ID is "*" then stop all threads.
+*/
+static int tcl_thread_halt(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID", 0);
+ return TCL_ERROR;
+ }
+ if( argv[1][0]=='*' && argv[1][1]==0 ){
+ for(i=0; i<N_THREAD; i++){
+ if( threadset[i].busy ) stop_thread(&threadset[i]);
+ }
+ }else{
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ stop_thread(&threadset[i]);
+ }
+ return TCL_OK;
+}
+
+/*
+** Usage: thread_argc ID
+**
+** Wait on the most recent thread_step to complete, then return the
+** number of columns in the result set.
+*/
+static int tcl_thread_argc(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ char zBuf[100];
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ sprintf(zBuf, "%d", threadset[i].argc);
+ Tcl_AppendResult(interp, zBuf, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: thread_argv ID N
+**
+** Wait on the most recent thread_step to complete, then return the
+** value of the N-th columns in the result set.
+*/
+static int tcl_thread_argv(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ int n;
+
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID N", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[2], &n) ) return TCL_ERROR;
+ thread_wait(&threadset[i]);
+ if( n<0 || n>=threadset[i].argc ){
+ Tcl_AppendResult(interp, "column number out of range", 0);
+ return TCL_ERROR;
+ }
+ Tcl_AppendResult(interp, threadset[i].argv[n], 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: thread_colname ID N
+**
+** Wait on the most recent thread_step to complete, then return the
+** name of the N-th columns in the result set.
+*/
+static int tcl_thread_colname(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ int n;
+
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID N", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ if( Tcl_GetInt(interp, argv[2], &n) ) return TCL_ERROR;
+ thread_wait(&threadset[i]);
+ if( n<0 || n>=threadset[i].argc ){
+ Tcl_AppendResult(interp, "column number out of range", 0);
+ return TCL_ERROR;
+ }
+ Tcl_AppendResult(interp, threadset[i].colv[n], 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: thread_result ID
+**
+** Wait on the most recent operation to complete, then return the
+** result code from that operation.
+*/
+static int tcl_thread_result(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ const char *zName;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ switch( threadset[i].rc ){
+ case SQLITE_OK: zName = "SQLITE_OK"; break;
+ case SQLITE_ERROR: zName = "SQLITE_ERROR"; break;
+ case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break;
+ case SQLITE_PERM: zName = "SQLITE_PERM"; break;
+ case SQLITE_ABORT: zName = "SQLITE_ABORT"; break;
+ case SQLITE_BUSY: zName = "SQLITE_BUSY"; break;
+ case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break;
+ case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break;
+ case SQLITE_READONLY: zName = "SQLITE_READONLY"; break;
+ case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break;
+ case SQLITE_IOERR: zName = "SQLITE_IOERR"; break;
+ case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break;
+ case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break;
+ case SQLITE_FULL: zName = "SQLITE_FULL"; break;
+ case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break;
+ case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break;
+ case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break;
+ case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break;
+ case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break;
+ case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break;
+ case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break;
+ case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break;
+ case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break;
+ case SQLITE_AUTH: zName = "SQLITE_AUTH"; break;
+ case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break;
+ case SQLITE_RANGE: zName = "SQLITE_RANGE"; break;
+ case SQLITE_ROW: zName = "SQLITE_ROW"; break;
+ case SQLITE_DONE: zName = "SQLITE_DONE"; break;
+ default: zName = "SQLITE_Unknown"; break;
+ }
+ Tcl_AppendResult(interp, zName, 0);
+ return TCL_OK;
+}
+
+/*
+** Usage: thread_error ID
+**
+** Wait on the most recent operation to complete, then return the
+** error string.
+*/
+static int tcl_thread_error(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ Tcl_AppendResult(interp, threadset[i].zErr, 0);
+ return TCL_OK;
+}
+
+/*
+** This procedure runs in the thread to compile an SQL statement.
+*/
+static void do_compile(Thread *p){
+ if( p->db==0 ){
+ p->zErr = p->zStaticErr = "no database is open";
+ p->rc = SQLITE_ERROR;
+ return;
+ }
+ if( p->vm ){
+ sqlite_finalize(p->vm, 0);
+ p->vm = 0;
+ }
+ p->rc = sqlite_compile(p->db, p->zArg, 0, &p->vm, &p->zErr);
+}
+
+/*
+** Usage: thread_compile ID SQL
+**
+** Compile a new virtual machine.
+*/
+static int tcl_thread_compile(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID SQL", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ threadset[i].xOp = do_compile;
+ sqliteFree(threadset[i].zArg);
+ threadset[i].zArg = sqliteStrDup(argv[2]);
+ threadset[i].opnum++;
+ return TCL_OK;
+}
+
+/*
+** This procedure runs in the thread to step the virtual machine.
+*/
+static void do_step(Thread *p){
+ if( p->vm==0 ){
+ p->zErr = p->zStaticErr = "no virtual machine available";
+ p->rc = SQLITE_ERROR;
+ return;
+ }
+ p->rc = sqlite_step(p->vm, &p->argc, &p->argv, &p->colv);
+}
+
+/*
+** Usage: thread_step ID
+**
+** Advance the virtual machine by one step
+*/
+static int tcl_thread_step(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " IDL", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ threadset[i].xOp = do_step;
+ threadset[i].opnum++;
+ return TCL_OK;
+}
+
+/*
+** This procedure runs in the thread to finalize a virtual machine.
+*/
+static void do_finalize(Thread *p){
+ if( p->vm==0 ){
+ p->zErr = p->zStaticErr = "no virtual machine available";
+ p->rc = SQLITE_ERROR;
+ return;
+ }
+ p->rc = sqlite_finalize(p->vm, &p->zErr);
+ p->vm = 0;
+}
+
+/*
+** Usage: thread_finalize ID
+**
+** Finalize the virtual machine.
+*/
+static int tcl_thread_finalize(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i;
+ if( argc!=2 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " IDL", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ threadset[i].xOp = do_finalize;
+ sqliteFree(threadset[i].zArg);
+ threadset[i].zArg = 0;
+ threadset[i].opnum++;
+ return TCL_OK;
+}
+
+/*
+** Usage: thread_swap ID ID
+**
+** Interchange the sqlite* pointer between two threads.
+*/
+static int tcl_thread_swap(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ const char **argv /* Text of each argument */
+){
+ int i, j;
+ sqlite *temp;
+ if( argc!=3 ){
+ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
+ " ID1 ID2", 0);
+ return TCL_ERROR;
+ }
+ i = parse_thread_id(interp, argv[1]);
+ if( i<0 ) return TCL_ERROR;
+ if( !threadset[i].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[i]);
+ j = parse_thread_id(interp, argv[2]);
+ if( j<0 ) return TCL_ERROR;
+ if( !threadset[j].busy ){
+ Tcl_AppendResult(interp, "no such thread", 0);
+ return TCL_ERROR;
+ }
+ thread_wait(&threadset[j]);
+ temp = threadset[i].db;
+ threadset[i].db = threadset[j].db;
+ threadset[j].db = temp;
+ return TCL_OK;
+}
+
+/*
+** Register commands with the TCL interpreter.
+*/
+int Sqlitetest4_Init(Tcl_Interp *interp){
+ static struct {
+ char *zName;
+ Tcl_CmdProc *xProc;
+ } aCmd[] = {
+ { "thread_create", (Tcl_CmdProc*)tcl_thread_create },
+ { "thread_wait", (Tcl_CmdProc*)tcl_thread_wait },
+ { "thread_halt", (Tcl_CmdProc*)tcl_thread_halt },
+ { "thread_argc", (Tcl_CmdProc*)tcl_thread_argc },
+ { "thread_argv", (Tcl_CmdProc*)tcl_thread_argv },
+ { "thread_colname", (Tcl_CmdProc*)tcl_thread_colname },
+ { "thread_result", (Tcl_CmdProc*)tcl_thread_result },
+ { "thread_error", (Tcl_CmdProc*)tcl_thread_error },
+ { "thread_compile", (Tcl_CmdProc*)tcl_thread_compile },
+ { "thread_step", (Tcl_CmdProc*)tcl_thread_step },
+ { "thread_finalize", (Tcl_CmdProc*)tcl_thread_finalize },
+ { "thread_swap", (Tcl_CmdProc*)tcl_thread_swap },
+ };
+ int i;
+
+ for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){
+ Tcl_CreateCommand(interp, aCmd[i].zName, aCmd[i].xProc, 0, 0);
+ }
+ return TCL_OK;
+}
+#else
+int Sqlitetest4_Init(Tcl_Interp *interp){ return TCL_OK; }
+#endif /* OS_UNIX */
diff --git a/usr/src/cmd/svc/configd/sqlite/src/tokenize.c b/usr/src/cmd/svc/configd/sqlite/src/tokenize.c
new file mode 100644
index 0000000000..c7a6da42cb
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/tokenize.c
@@ -0,0 +1,682 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** An tokenizer for SQL
+**
+** This file contains C code that splits an SQL input string up into
+** individual tokens and sends those tokens one-by-one over to the
+** parser for analysis.
+**
+** $Id: tokenize.c,v 1.68 2004/02/14 23:59:58 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "os.h"
+#include <ctype.h>
+#include <stdlib.h>
+
+/*
+** All the keywords of the SQL language are stored as in a hash
+** table composed of instances of the following structure.
+*/
+typedef struct Keyword Keyword;
+struct Keyword {
+ char *zName; /* The keyword name */
+ u8 tokenType; /* Token value for this keyword */
+ u8 len; /* Length of this keyword */
+ u8 iNext; /* Index in aKeywordTable[] of next with same hash */
+};
+
+/*
+** These are the keywords
+*/
+static Keyword aKeywordTable[] = {
+ { "ABORT", TK_ABORT, },
+ { "AFTER", TK_AFTER, },
+ { "ALL", TK_ALL, },
+ { "AND", TK_AND, },
+ { "AS", TK_AS, },
+ { "ASC", TK_ASC, },
+ { "ATTACH", TK_ATTACH, },
+ { "BEFORE", TK_BEFORE, },
+ { "BEGIN", TK_BEGIN, },
+ { "BETWEEN", TK_BETWEEN, },
+ { "BY", TK_BY, },
+ { "CASCADE", TK_CASCADE, },
+ { "CASE", TK_CASE, },
+ { "CHECK", TK_CHECK, },
+ { "CLUSTER", TK_CLUSTER, },
+ { "COLLATE", TK_COLLATE, },
+ { "COMMIT", TK_COMMIT, },
+ { "CONFLICT", TK_CONFLICT, },
+ { "CONSTRAINT", TK_CONSTRAINT, },
+ { "COPY", TK_COPY, },
+ { "CREATE", TK_CREATE, },
+ { "CROSS", TK_JOIN_KW, },
+ { "DATABASE", TK_DATABASE, },
+ { "DEFAULT", TK_DEFAULT, },
+ { "DEFERRED", TK_DEFERRED, },
+ { "DEFERRABLE", TK_DEFERRABLE, },
+ { "DELETE", TK_DELETE, },
+ { "DELIMITERS", TK_DELIMITERS, },
+ { "DESC", TK_DESC, },
+ { "DETACH", TK_DETACH, },
+ { "DISTINCT", TK_DISTINCT, },
+ { "DROP", TK_DROP, },
+ { "END", TK_END, },
+ { "EACH", TK_EACH, },
+ { "ELSE", TK_ELSE, },
+ { "EXCEPT", TK_EXCEPT, },
+ { "EXPLAIN", TK_EXPLAIN, },
+ { "FAIL", TK_FAIL, },
+ { "FOR", TK_FOR, },
+ { "FOREIGN", TK_FOREIGN, },
+ { "FROM", TK_FROM, },
+ { "FULL", TK_JOIN_KW, },
+ { "GLOB", TK_GLOB, },
+ { "GROUP", TK_GROUP, },
+ { "HAVING", TK_HAVING, },
+ { "IGNORE", TK_IGNORE, },
+ { "IMMEDIATE", TK_IMMEDIATE, },
+ { "IN", TK_IN, },
+ { "INDEX", TK_INDEX, },
+ { "INITIALLY", TK_INITIALLY, },
+ { "INNER", TK_JOIN_KW, },
+ { "INSERT", TK_INSERT, },
+ { "INSTEAD", TK_INSTEAD, },
+ { "INTERSECT", TK_INTERSECT, },
+ { "INTO", TK_INTO, },
+ { "IS", TK_IS, },
+ { "ISNULL", TK_ISNULL, },
+ { "JOIN", TK_JOIN, },
+ { "KEY", TK_KEY, },
+ { "LEFT", TK_JOIN_KW, },
+ { "LIKE", TK_LIKE, },
+ { "LIMIT", TK_LIMIT, },
+ { "MATCH", TK_MATCH, },
+ { "NATURAL", TK_JOIN_KW, },
+ { "NOT", TK_NOT, },
+ { "NOTNULL", TK_NOTNULL, },
+ { "NULL", TK_NULL, },
+ { "OF", TK_OF, },
+ { "OFFSET", TK_OFFSET, },
+ { "ON", TK_ON, },
+ { "OR", TK_OR, },
+ { "ORDER", TK_ORDER, },
+ { "OUTER", TK_JOIN_KW, },
+ { "PRAGMA", TK_PRAGMA, },
+ { "PRIMARY", TK_PRIMARY, },
+ { "RAISE", TK_RAISE, },
+ { "REFERENCES", TK_REFERENCES, },
+ { "REPLACE", TK_REPLACE, },
+ { "RESTRICT", TK_RESTRICT, },
+ { "RIGHT", TK_JOIN_KW, },
+ { "ROLLBACK", TK_ROLLBACK, },
+ { "ROW", TK_ROW, },
+ { "SELECT", TK_SELECT, },
+ { "SET", TK_SET, },
+ { "STATEMENT", TK_STATEMENT, },
+ { "TABLE", TK_TABLE, },
+ { "TEMP", TK_TEMP, },
+ { "TEMPORARY", TK_TEMP, },
+ { "THEN", TK_THEN, },
+ { "TRANSACTION", TK_TRANSACTION, },
+ { "TRIGGER", TK_TRIGGER, },
+ { "UNION", TK_UNION, },
+ { "UNIQUE", TK_UNIQUE, },
+ { "UPDATE", TK_UPDATE, },
+ { "USING", TK_USING, },
+ { "VACUUM", TK_VACUUM, },
+ { "VALUES", TK_VALUES, },
+ { "VIEW", TK_VIEW, },
+ { "WHEN", TK_WHEN, },
+ { "WHERE", TK_WHERE, },
+};
+
+/*
+** This is the hash table
+*/
+#define KEY_HASH_SIZE 101
+static u8 aiHashTable[KEY_HASH_SIZE];
+
+
+/*
+** This function looks up an identifier to determine if it is a
+** keyword. If it is a keyword, the token code of that keyword is
+** returned. If the input is not a keyword, TK_ID is returned.
+*/
+int sqliteKeywordCode(const char *z, int n){
+ int h, i;
+ Keyword *p;
+ static char needInit = 1;
+ if( needInit ){
+ /* Initialize the keyword hash table */
+ sqliteOsEnterMutex();
+ if( needInit ){
+ int nk;
+ nk = sizeof(aKeywordTable)/sizeof(aKeywordTable[0]);
+ for(i=0; i<nk; i++){
+ aKeywordTable[i].len = strlen(aKeywordTable[i].zName);
+ h = sqliteHashNoCase(aKeywordTable[i].zName, aKeywordTable[i].len);
+ h %= KEY_HASH_SIZE;
+ aKeywordTable[i].iNext = aiHashTable[h];
+ aiHashTable[h] = i+1;
+ }
+ needInit = 0;
+ }
+ sqliteOsLeaveMutex();
+ }
+ h = sqliteHashNoCase(z, n) % KEY_HASH_SIZE;
+ for(i=aiHashTable[h]; i; i=p->iNext){
+ p = &aKeywordTable[i-1];
+ if( p->len==n && sqliteStrNICmp(p->zName, z, n)==0 ){
+ return p->tokenType;
+ }
+ }
+ return TK_ID;
+}
+
+
+/*
+** If X is a character that can be used in an identifier and
+** X&0x80==0 then isIdChar[X] will be 1. If X&0x80==0x80 then
+** X is always an identifier character. (Hence all UTF-8
+** characters can be part of an identifier). isIdChar[X] will
+** be 0 for every character in the lower 128 ASCII characters
+** that cannot be used as part of an identifier.
+**
+** In this implementation, an identifier can be a string of
+** alphabetic characters, digits, and "_" plus any character
+** with the high-order bit set. The latter rule means that
+** any sequence of UTF-8 characters or characters taken from
+** an extended ISO8859 character set can form an identifier.
+*/
+static const char isIdChar[] = {
+/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */
+};
+
+
+/*
+** Return the length of the token that begins at z[0].
+** Store the token type in *tokenType before returning.
+*/
+static int sqliteGetToken(const unsigned char *z, int *tokenType){
+ int i;
+ switch( *z ){
+ case ' ': case '\t': case '\n': case '\f': case '\r': {
+ for(i=1; isspace(z[i]); i++){}
+ *tokenType = TK_SPACE;
+ return i;
+ }
+ case '-': {
+ if( z[1]=='-' ){
+ for(i=2; z[i] && z[i]!='\n'; i++){}
+ *tokenType = TK_COMMENT;
+ return i;
+ }
+ *tokenType = TK_MINUS;
+ return 1;
+ }
+ case '(': {
+ *tokenType = TK_LP;
+ return 1;
+ }
+ case ')': {
+ *tokenType = TK_RP;
+ return 1;
+ }
+ case ';': {
+ *tokenType = TK_SEMI;
+ return 1;
+ }
+ case '+': {
+ *tokenType = TK_PLUS;
+ return 1;
+ }
+ case '*': {
+ *tokenType = TK_STAR;
+ return 1;
+ }
+ case '/': {
+ if( z[1]!='*' || z[2]==0 ){
+ *tokenType = TK_SLASH;
+ return 1;
+ }
+ for(i=3; z[i] && (z[i]!='/' || z[i-1]!='*'); i++){}
+ if( z[i] ) i++;
+ *tokenType = TK_COMMENT;
+ return i;
+ }
+ case '%': {
+ *tokenType = TK_REM;
+ return 1;
+ }
+ case '=': {
+ *tokenType = TK_EQ;
+ return 1 + (z[1]=='=');
+ }
+ case '<': {
+ if( z[1]=='=' ){
+ *tokenType = TK_LE;
+ return 2;
+ }else if( z[1]=='>' ){
+ *tokenType = TK_NE;
+ return 2;
+ }else if( z[1]=='<' ){
+ *tokenType = TK_LSHIFT;
+ return 2;
+ }else{
+ *tokenType = TK_LT;
+ return 1;
+ }
+ }
+ case '>': {
+ if( z[1]=='=' ){
+ *tokenType = TK_GE;
+ return 2;
+ }else if( z[1]=='>' ){
+ *tokenType = TK_RSHIFT;
+ return 2;
+ }else{
+ *tokenType = TK_GT;
+ return 1;
+ }
+ }
+ case '!': {
+ if( z[1]!='=' ){
+ *tokenType = TK_ILLEGAL;
+ return 2;
+ }else{
+ *tokenType = TK_NE;
+ return 2;
+ }
+ }
+ case '|': {
+ if( z[1]!='|' ){
+ *tokenType = TK_BITOR;
+ return 1;
+ }else{
+ *tokenType = TK_CONCAT;
+ return 2;
+ }
+ }
+ case ',': {
+ *tokenType = TK_COMMA;
+ return 1;
+ }
+ case '&': {
+ *tokenType = TK_BITAND;
+ return 1;
+ }
+ case '~': {
+ *tokenType = TK_BITNOT;
+ return 1;
+ }
+ case '\'': case '"': {
+ int delim = z[0];
+ for(i=1; z[i]; i++){
+ if( z[i]==delim ){
+ if( z[i+1]==delim ){
+ i++;
+ }else{
+ break;
+ }
+ }
+ }
+ if( z[i] ) i++;
+ *tokenType = TK_STRING;
+ return i;
+ }
+ case '.': {
+ *tokenType = TK_DOT;
+ return 1;
+ }
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ *tokenType = TK_INTEGER;
+ for(i=1; isdigit(z[i]); i++){}
+ if( z[i]=='.' && isdigit(z[i+1]) ){
+ i += 2;
+ while( isdigit(z[i]) ){ i++; }
+ *tokenType = TK_FLOAT;
+ }
+ if( (z[i]=='e' || z[i]=='E') &&
+ ( isdigit(z[i+1])
+ || ((z[i+1]=='+' || z[i+1]=='-') && isdigit(z[i+2]))
+ )
+ ){
+ i += 2;
+ while( isdigit(z[i]) ){ i++; }
+ *tokenType = TK_FLOAT;
+ }
+ return i;
+ }
+ case '[': {
+ for(i=1; z[i] && z[i-1]!=']'; i++){}
+ *tokenType = TK_ID;
+ return i;
+ }
+ case '?': {
+ *tokenType = TK_VARIABLE;
+ return 1;
+ }
+ default: {
+ if( (*z&0x80)==0 && !isIdChar[*z] ){
+ break;
+ }
+ for(i=1; (z[i]&0x80)!=0 || isIdChar[z[i]]; i++){}
+ *tokenType = sqliteKeywordCode((char*)z, i);
+ return i;
+ }
+ }
+ *tokenType = TK_ILLEGAL;
+ return 1;
+}
+
+/*
+** Run the parser on the given SQL string. The parser structure is
+** passed in. An SQLITE_ status code is returned. If an error occurs
+** and pzErrMsg!=NULL then an error message might be written into
+** memory obtained from malloc() and *pzErrMsg made to point to that
+** error message. Or maybe not.
+*/
+int sqliteRunParser(Parse *pParse, const char *zSql, char **pzErrMsg){
+ int nErr = 0;
+ int i;
+ void *pEngine;
+ int tokenType;
+ int lastTokenParsed = -1;
+ sqlite *db = pParse->db;
+ extern void *sqliteParserAlloc(void*(*)(int));
+ extern void sqliteParserFree(void*, void(*)(void*));
+ extern int sqliteParser(void*, int, Token, Parse*);
+
+ db->flags &= ~SQLITE_Interrupt;
+ pParse->rc = SQLITE_OK;
+ i = 0;
+ pEngine = sqliteParserAlloc((void*(*)(int))malloc);
+ if( pEngine==0 ){
+ sqliteSetString(pzErrMsg, "out of memory", (char*)0);
+ return 1;
+ }
+ pParse->sLastToken.dyn = 0;
+ pParse->zTail = zSql;
+ while( sqlite_malloc_failed==0 && zSql[i]!=0 ){
+ assert( i>=0 );
+ pParse->sLastToken.z = &zSql[i];
+ assert( pParse->sLastToken.dyn==0 );
+ pParse->sLastToken.n = sqliteGetToken((unsigned char*)&zSql[i], &tokenType);
+ i += pParse->sLastToken.n;
+ switch( tokenType ){
+ case TK_SPACE:
+ case TK_COMMENT: {
+ if( (db->flags & SQLITE_Interrupt)!=0 ){
+ pParse->rc = SQLITE_INTERRUPT;
+ sqliteSetString(pzErrMsg, "interrupt", (char*)0);
+ goto abort_parse;
+ }
+ break;
+ }
+ case TK_ILLEGAL: {
+ sqliteSetNString(pzErrMsg, "unrecognized token: \"", -1,
+ pParse->sLastToken.z, pParse->sLastToken.n, "\"", 1, 0);
+ nErr++;
+ goto abort_parse;
+ }
+ case TK_SEMI: {
+ pParse->zTail = &zSql[i];
+ /* Fall thru into the default case */
+ }
+ default: {
+ sqliteParser(pEngine, tokenType, pParse->sLastToken, pParse);
+ lastTokenParsed = tokenType;
+ if( pParse->rc!=SQLITE_OK ){
+ goto abort_parse;
+ }
+ break;
+ }
+ }
+ }
+abort_parse:
+ if( zSql[i]==0 && nErr==0 && pParse->rc==SQLITE_OK ){
+ if( lastTokenParsed!=TK_SEMI ){
+ sqliteParser(pEngine, TK_SEMI, pParse->sLastToken, pParse);
+ pParse->zTail = &zSql[i];
+ }
+ sqliteParser(pEngine, 0, pParse->sLastToken, pParse);
+ }
+ sqliteParserFree(pEngine, free);
+ if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){
+ sqliteSetString(&pParse->zErrMsg, sqlite_error_string(pParse->rc),
+ (char*)0);
+ }
+ if( pParse->zErrMsg ){
+ if( pzErrMsg && *pzErrMsg==0 ){
+ *pzErrMsg = pParse->zErrMsg;
+ }else{
+ sqliteFree(pParse->zErrMsg);
+ }
+ pParse->zErrMsg = 0;
+ if( !nErr ) nErr++;
+ }
+ if( pParse->pVdbe && pParse->nErr>0 ){
+ sqliteVdbeDelete(pParse->pVdbe);
+ pParse->pVdbe = 0;
+ }
+ if( pParse->pNewTable ){
+ sqliteDeleteTable(pParse->db, pParse->pNewTable);
+ pParse->pNewTable = 0;
+ }
+ if( pParse->pNewTrigger ){
+ sqliteDeleteTrigger(pParse->pNewTrigger);
+ pParse->pNewTrigger = 0;
+ }
+ if( nErr>0 && (pParse->rc==SQLITE_OK || pParse->rc==SQLITE_DONE) ){
+ pParse->rc = SQLITE_ERROR;
+ }
+ return nErr;
+}
+
+/*
+** Token types used by the sqlite_complete() routine. See the header
+** comments on that procedure for additional information.
+*/
+#define tkEXPLAIN 0
+#define tkCREATE 1
+#define tkTEMP 2
+#define tkTRIGGER 3
+#define tkEND 4
+#define tkSEMI 5
+#define tkWS 6
+#define tkOTHER 7
+
+/*
+** Return TRUE if the given SQL string ends in a semicolon.
+**
+** Special handling is require for CREATE TRIGGER statements.
+** Whenever the CREATE TRIGGER keywords are seen, the statement
+** must end with ";END;".
+**
+** This implementation uses a state machine with 7 states:
+**
+** (0) START At the beginning or end of an SQL statement. This routine
+** returns 1 if it ends in the START state and 0 if it ends
+** in any other state.
+**
+** (1) EXPLAIN The keyword EXPLAIN has been seen at the beginning of
+** a statement.
+**
+** (2) CREATE The keyword CREATE has been seen at the beginning of a
+** statement, possibly preceeded by EXPLAIN and/or followed by
+** TEMP or TEMPORARY
+**
+** (3) NORMAL We are in the middle of statement which ends with a single
+** semicolon.
+**
+** (4) TRIGGER We are in the middle of a trigger definition that must be
+** ended by a semicolon, the keyword END, and another semicolon.
+**
+** (5) SEMI We've seen the first semicolon in the ";END;" that occurs at
+** the end of a trigger definition.
+**
+** (6) END We've seen the ";END" of the ";END;" that occurs at the end
+** of a trigger difinition.
+**
+** Transitions between states above are determined by tokens extracted
+** from the input. The following tokens are significant:
+**
+** (0) tkEXPLAIN The "explain" keyword.
+** (1) tkCREATE The "create" keyword.
+** (2) tkTEMP The "temp" or "temporary" keyword.
+** (3) tkTRIGGER The "trigger" keyword.
+** (4) tkEND The "end" keyword.
+** (5) tkSEMI A semicolon.
+** (6) tkWS Whitespace
+** (7) tkOTHER Any other SQL token.
+**
+** Whitespace never causes a state transition and is always ignored.
+*/
+int sqlite_complete(const char *zSql){
+ u8 state = 0; /* Current state, using numbers defined in header comment */
+ u8 token; /* Value of the next token */
+
+ /* The following matrix defines the transition from one state to another
+ ** according to what token is seen. trans[state][token] returns the
+ ** next state.
+ */
+ static const u8 trans[7][8] = {
+ /* Token: */
+ /* State: ** EXPLAIN CREATE TEMP TRIGGER END SEMI WS OTHER */
+ /* 0 START: */ { 1, 2, 3, 3, 3, 0, 0, 3, },
+ /* 1 EXPLAIN: */ { 3, 2, 3, 3, 3, 0, 1, 3, },
+ /* 2 CREATE: */ { 3, 3, 2, 4, 3, 0, 2, 3, },
+ /* 3 NORMAL: */ { 3, 3, 3, 3, 3, 0, 3, 3, },
+ /* 4 TRIGGER: */ { 4, 4, 4, 4, 4, 5, 4, 4, },
+ /* 5 SEMI: */ { 4, 4, 4, 4, 6, 5, 5, 4, },
+ /* 6 END: */ { 4, 4, 4, 4, 4, 0, 6, 4, },
+ };
+
+ while( *zSql ){
+ switch( *zSql ){
+ case ';': { /* A semicolon */
+ token = tkSEMI;
+ break;
+ }
+ case ' ':
+ case '\r':
+ case '\t':
+ case '\n':
+ case '\f': { /* White space is ignored */
+ token = tkWS;
+ break;
+ }
+ case '/': { /* C-style comments */
+ if( zSql[1]!='*' ){
+ token = tkOTHER;
+ break;
+ }
+ zSql += 2;
+ while( zSql[0] && (zSql[0]!='*' || zSql[1]!='/') ){ zSql++; }
+ if( zSql[0]==0 ) return 0;
+ zSql++;
+ token = tkWS;
+ break;
+ }
+ case '-': { /* SQL-style comments from "--" to end of line */
+ if( zSql[1]!='-' ){
+ token = tkOTHER;
+ break;
+ }
+ while( *zSql && *zSql!='\n' ){ zSql++; }
+ if( *zSql==0 ) return state==0;
+ token = tkWS;
+ break;
+ }
+ case '[': { /* Microsoft-style identifiers in [...] */
+ zSql++;
+ while( *zSql && *zSql!=']' ){ zSql++; }
+ if( *zSql==0 ) return 0;
+ token = tkOTHER;
+ break;
+ }
+ case '"': /* single- and double-quoted strings */
+ case '\'': {
+ int c = *zSql;
+ zSql++;
+ while( *zSql && *zSql!=c ){ zSql++; }
+ if( *zSql==0 ) return 0;
+ token = tkOTHER;
+ break;
+ }
+ default: {
+ if( isIdChar[(u8)*zSql] ){
+ /* Keywords and unquoted identifiers */
+ int nId;
+ for(nId=1; isIdChar[(u8)zSql[nId]]; nId++){}
+ switch( *zSql ){
+ case 'c': case 'C': {
+ if( nId==6 && sqliteStrNICmp(zSql, "create", 6)==0 ){
+ token = tkCREATE;
+ }else{
+ token = tkOTHER;
+ }
+ break;
+ }
+ case 't': case 'T': {
+ if( nId==7 && sqliteStrNICmp(zSql, "trigger", 7)==0 ){
+ token = tkTRIGGER;
+ }else if( nId==4 && sqliteStrNICmp(zSql, "temp", 4)==0 ){
+ token = tkTEMP;
+ }else if( nId==9 && sqliteStrNICmp(zSql, "temporary", 9)==0 ){
+ token = tkTEMP;
+ }else{
+ token = tkOTHER;
+ }
+ break;
+ }
+ case 'e': case 'E': {
+ if( nId==3 && sqliteStrNICmp(zSql, "end", 3)==0 ){
+ token = tkEND;
+ }else if( nId==7 && sqliteStrNICmp(zSql, "explain", 7)==0 ){
+ token = tkEXPLAIN;
+ }else{
+ token = tkOTHER;
+ }
+ break;
+ }
+ default: {
+ token = tkOTHER;
+ break;
+ }
+ }
+ zSql += nId-1;
+ }else{
+ /* Operators and special symbols */
+ token = tkOTHER;
+ }
+ break;
+ }
+ }
+ state = trans[state][token];
+ zSql++;
+ }
+ return state==0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/trigger.c b/usr/src/cmd/svc/configd/sqlite/src/trigger.c
new file mode 100644
index 0000000000..1370f91d2e
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/trigger.c
@@ -0,0 +1,767 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+*
+*/
+#include "sqliteInt.h"
+
+/*
+** Delete a linked list of TriggerStep structures.
+*/
+void sqliteDeleteTriggerStep(TriggerStep *pTriggerStep){
+ while( pTriggerStep ){
+ TriggerStep * pTmp = pTriggerStep;
+ pTriggerStep = pTriggerStep->pNext;
+
+ if( pTmp->target.dyn ) sqliteFree((char*)pTmp->target.z);
+ sqliteExprDelete(pTmp->pWhere);
+ sqliteExprListDelete(pTmp->pExprList);
+ sqliteSelectDelete(pTmp->pSelect);
+ sqliteIdListDelete(pTmp->pIdList);
+
+ sqliteFree(pTmp);
+ }
+}
+
+/*
+** This is called by the parser when it sees a CREATE TRIGGER statement
+** up to the point of the BEGIN before the trigger actions. A Trigger
+** structure is generated based on the information available and stored
+** in pParse->pNewTrigger. After the trigger actions have been parsed, the
+** sqliteFinishTrigger() function is called to complete the trigger
+** construction process.
+*/
+void sqliteBeginTrigger(
+ Parse *pParse, /* The parse context of the CREATE TRIGGER statement */
+ Token *pName, /* The name of the trigger */
+ int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */
+ int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */
+ IdList *pColumns, /* column list if this is an UPDATE OF trigger */
+ SrcList *pTableName,/* The name of the table/view the trigger applies to */
+ int foreach, /* One of TK_ROW or TK_STATEMENT */
+ Expr *pWhen, /* WHEN clause */
+ int isTemp /* True if the TEMPORARY keyword is present */
+){
+ Trigger *nt;
+ Table *tab;
+ char *zName = 0; /* Name of the trigger */
+ sqlite *db = pParse->db;
+ int iDb; /* When database to store the trigger in */
+ DbFixer sFix;
+
+ /* Check that:
+ ** 1. the trigger name does not already exist.
+ ** 2. the table (or view) does exist in the same database as the trigger.
+ ** 3. that we are not trying to create a trigger on the sqlite_master table
+ ** 4. That we are not trying to create an INSTEAD OF trigger on a table.
+ ** 5. That we are not trying to create a BEFORE or AFTER trigger on a view.
+ */
+ if( sqlite_malloc_failed ) goto trigger_cleanup;
+ assert( pTableName->nSrc==1 );
+ if( db->init.busy
+ && sqliteFixInit(&sFix, pParse, db->init.iDb, "trigger", pName)
+ && sqliteFixSrcList(&sFix, pTableName)
+ ){
+ goto trigger_cleanup;
+ }
+ tab = sqliteSrcListLookup(pParse, pTableName);
+ if( !tab ){
+ goto trigger_cleanup;
+ }
+ iDb = isTemp ? 1 : tab->iDb;
+ if( iDb>=2 && !db->init.busy ){
+ sqliteErrorMsg(pParse, "triggers may not be added to auxiliary "
+ "database %s", db->aDb[tab->iDb].zName);
+ goto trigger_cleanup;
+ }
+
+ zName = sqliteStrNDup(pName->z, pName->n);
+ sqliteDequote(zName);
+ if( sqliteHashFind(&(db->aDb[iDb].trigHash), zName,pName->n+1) ){
+ sqliteErrorMsg(pParse, "trigger %T already exists", pName);
+ goto trigger_cleanup;
+ }
+ if( sqliteStrNICmp(tab->zName, "sqlite_", 7)==0 ){
+ sqliteErrorMsg(pParse, "cannot create trigger on system table");
+ pParse->nErr++;
+ goto trigger_cleanup;
+ }
+ if( tab->pSelect && tr_tm != TK_INSTEAD ){
+ sqliteErrorMsg(pParse, "cannot create %s trigger on view: %S",
+ (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName, 0);
+ goto trigger_cleanup;
+ }
+ if( !tab->pSelect && tr_tm == TK_INSTEAD ){
+ sqliteErrorMsg(pParse, "cannot create INSTEAD OF"
+ " trigger on table: %S", pTableName, 0);
+ goto trigger_cleanup;
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int code = SQLITE_CREATE_TRIGGER;
+ const char *zDb = db->aDb[tab->iDb].zName;
+ const char *zDbTrig = isTemp ? db->aDb[1].zName : zDb;
+ if( tab->iDb==1 || isTemp ) code = SQLITE_CREATE_TEMP_TRIGGER;
+ if( sqliteAuthCheck(pParse, code, zName, tab->zName, zDbTrig) ){
+ goto trigger_cleanup;
+ }
+ if( sqliteAuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(tab->iDb), 0, zDb)){
+ goto trigger_cleanup;
+ }
+ }
+#endif
+
+ /* INSTEAD OF triggers can only appear on views and BEGIN triggers
+ ** cannot appear on views. So we might as well translate every
+ ** INSTEAD OF trigger into a BEFORE trigger. It simplifies code
+ ** elsewhere.
+ */
+ if (tr_tm == TK_INSTEAD){
+ tr_tm = TK_BEFORE;
+ }
+
+ /* Build the Trigger object */
+ nt = (Trigger*)sqliteMalloc(sizeof(Trigger));
+ if( nt==0 ) goto trigger_cleanup;
+ nt->name = zName;
+ zName = 0;
+ nt->table = sqliteStrDup(pTableName->a[0].zName);
+ if( sqlite_malloc_failed ) goto trigger_cleanup;
+ nt->iDb = iDb;
+ nt->iTabDb = tab->iDb;
+ nt->op = op;
+ nt->tr_tm = tr_tm;
+ nt->pWhen = sqliteExprDup(pWhen);
+ nt->pColumns = sqliteIdListDup(pColumns);
+ nt->foreach = foreach;
+ sqliteTokenCopy(&nt->nameToken,pName);
+ assert( pParse->pNewTrigger==0 );
+ pParse->pNewTrigger = nt;
+
+trigger_cleanup:
+ sqliteFree(zName);
+ sqliteSrcListDelete(pTableName);
+ sqliteIdListDelete(pColumns);
+ sqliteExprDelete(pWhen);
+}
+
+/*
+** This routine is called after all of the trigger actions have been parsed
+** in order to complete the process of building the trigger.
+*/
+void sqliteFinishTrigger(
+ Parse *pParse, /* Parser context */
+ TriggerStep *pStepList, /* The triggered program */
+ Token *pAll /* Token that describes the complete CREATE TRIGGER */
+){
+ Trigger *nt = 0; /* The trigger whose construction is finishing up */
+ sqlite *db = pParse->db; /* The database */
+ DbFixer sFix;
+
+ if( pParse->nErr || pParse->pNewTrigger==0 ) goto triggerfinish_cleanup;
+ nt = pParse->pNewTrigger;
+ pParse->pNewTrigger = 0;
+ nt->step_list = pStepList;
+ while( pStepList ){
+ pStepList->pTrig = nt;
+ pStepList = pStepList->pNext;
+ }
+ if( sqliteFixInit(&sFix, pParse, nt->iDb, "trigger", &nt->nameToken)
+ && sqliteFixTriggerStep(&sFix, nt->step_list) ){
+ goto triggerfinish_cleanup;
+ }
+
+ /* if we are not initializing, and this trigger is not on a TEMP table,
+ ** build the sqlite_master entry
+ */
+ if( !db->init.busy ){
+ static VdbeOpList insertTrig[] = {
+ { OP_NewRecno, 0, 0, 0 },
+ { OP_String, 0, 0, "trigger" },
+ { OP_String, 0, 0, 0 }, /* 2: trigger name */
+ { OP_String, 0, 0, 0 }, /* 3: table name */
+ { OP_Integer, 0, 0, 0 },
+ { OP_String, 0, 0, 0 }, /* 5: SQL */
+ { OP_MakeRecord, 5, 0, 0 },
+ { OP_PutIntKey, 0, 0, 0 },
+ };
+ int addr;
+ Vdbe *v;
+
+ /* Make an entry in the sqlite_master table */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) goto triggerfinish_cleanup;
+ sqliteBeginWriteOperation(pParse, 0, 0);
+ sqliteOpenMasterTable(v, nt->iDb);
+ addr = sqliteVdbeAddOpList(v, ArraySize(insertTrig), insertTrig);
+ sqliteVdbeChangeP3(v, addr+2, nt->name, 0);
+ sqliteVdbeChangeP3(v, addr+3, nt->table, 0);
+ sqliteVdbeChangeP3(v, addr+5, pAll->z, pAll->n);
+ if( nt->iDb==0 ){
+ sqliteChangeCookie(db, v);
+ }
+ sqliteVdbeAddOp(v, OP_Close, 0, 0);
+ sqliteEndWriteOperation(pParse);
+ }
+
+ if( !pParse->explain ){
+ Table *pTab;
+ sqliteHashInsert(&db->aDb[nt->iDb].trigHash,
+ nt->name, strlen(nt->name)+1, nt);
+ pTab = sqliteLocateTable(pParse, nt->table, db->aDb[nt->iTabDb].zName);
+ assert( pTab!=0 );
+ nt->pNext = pTab->pTrigger;
+ pTab->pTrigger = nt;
+ nt = 0;
+ }
+
+triggerfinish_cleanup:
+ sqliteDeleteTrigger(nt);
+ sqliteDeleteTrigger(pParse->pNewTrigger);
+ pParse->pNewTrigger = 0;
+ sqliteDeleteTriggerStep(pStepList);
+}
+
+/*
+** Make a copy of all components of the given trigger step. This has
+** the effect of copying all Expr.token.z values into memory obtained
+** from sqliteMalloc(). As initially created, the Expr.token.z values
+** all point to the input string that was fed to the parser. But that
+** string is ephemeral - it will go away as soon as the sqlite_exec()
+** call that started the parser exits. This routine makes a persistent
+** copy of all the Expr.token.z strings so that the TriggerStep structure
+** will be valid even after the sqlite_exec() call returns.
+*/
+static void sqlitePersistTriggerStep(TriggerStep *p){
+ if( p->target.z ){
+ p->target.z = sqliteStrNDup(p->target.z, p->target.n);
+ p->target.dyn = 1;
+ }
+ if( p->pSelect ){
+ Select *pNew = sqliteSelectDup(p->pSelect);
+ sqliteSelectDelete(p->pSelect);
+ p->pSelect = pNew;
+ }
+ if( p->pWhere ){
+ Expr *pNew = sqliteExprDup(p->pWhere);
+ sqliteExprDelete(p->pWhere);
+ p->pWhere = pNew;
+ }
+ if( p->pExprList ){
+ ExprList *pNew = sqliteExprListDup(p->pExprList);
+ sqliteExprListDelete(p->pExprList);
+ p->pExprList = pNew;
+ }
+ if( p->pIdList ){
+ IdList *pNew = sqliteIdListDup(p->pIdList);
+ sqliteIdListDelete(p->pIdList);
+ p->pIdList = pNew;
+ }
+}
+
+/*
+** Turn a SELECT statement (that the pSelect parameter points to) into
+** a trigger step. Return a pointer to a TriggerStep structure.
+**
+** The parser calls this routine when it finds a SELECT statement in
+** body of a TRIGGER.
+*/
+TriggerStep *sqliteTriggerSelectStep(Select *pSelect){
+ TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep));
+ if( pTriggerStep==0 ) return 0;
+
+ pTriggerStep->op = TK_SELECT;
+ pTriggerStep->pSelect = pSelect;
+ pTriggerStep->orconf = OE_Default;
+ sqlitePersistTriggerStep(pTriggerStep);
+
+ return pTriggerStep;
+}
+
+/*
+** Build a trigger step out of an INSERT statement. Return a pointer
+** to the new trigger step.
+**
+** The parser calls this routine when it sees an INSERT inside the
+** body of a trigger.
+*/
+TriggerStep *sqliteTriggerInsertStep(
+ Token *pTableName, /* Name of the table into which we insert */
+ IdList *pColumn, /* List of columns in pTableName to insert into */
+ ExprList *pEList, /* The VALUE clause: a list of values to be inserted */
+ Select *pSelect, /* A SELECT statement that supplies values */
+ int orconf /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */
+){
+ TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep));
+ if( pTriggerStep==0 ) return 0;
+
+ assert(pEList == 0 || pSelect == 0);
+ assert(pEList != 0 || pSelect != 0);
+
+ pTriggerStep->op = TK_INSERT;
+ pTriggerStep->pSelect = pSelect;
+ pTriggerStep->target = *pTableName;
+ pTriggerStep->pIdList = pColumn;
+ pTriggerStep->pExprList = pEList;
+ pTriggerStep->orconf = orconf;
+ sqlitePersistTriggerStep(pTriggerStep);
+
+ return pTriggerStep;
+}
+
+/*
+** Construct a trigger step that implements an UPDATE statement and return
+** a pointer to that trigger step. The parser calls this routine when it
+** sees an UPDATE statement inside the body of a CREATE TRIGGER.
+*/
+TriggerStep *sqliteTriggerUpdateStep(
+ Token *pTableName, /* Name of the table to be updated */
+ ExprList *pEList, /* The SET clause: list of column and new values */
+ Expr *pWhere, /* The WHERE clause */
+ int orconf /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */
+){
+ TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep));
+ if( pTriggerStep==0 ) return 0;
+
+ pTriggerStep->op = TK_UPDATE;
+ pTriggerStep->target = *pTableName;
+ pTriggerStep->pExprList = pEList;
+ pTriggerStep->pWhere = pWhere;
+ pTriggerStep->orconf = orconf;
+ sqlitePersistTriggerStep(pTriggerStep);
+
+ return pTriggerStep;
+}
+
+/*
+** Construct a trigger step that implements a DELETE statement and return
+** a pointer to that trigger step. The parser calls this routine when it
+** sees a DELETE statement inside the body of a CREATE TRIGGER.
+*/
+TriggerStep *sqliteTriggerDeleteStep(Token *pTableName, Expr *pWhere){
+ TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep));
+ if( pTriggerStep==0 ) return 0;
+
+ pTriggerStep->op = TK_DELETE;
+ pTriggerStep->target = *pTableName;
+ pTriggerStep->pWhere = pWhere;
+ pTriggerStep->orconf = OE_Default;
+ sqlitePersistTriggerStep(pTriggerStep);
+
+ return pTriggerStep;
+}
+
+/*
+** Recursively delete a Trigger structure
+*/
+void sqliteDeleteTrigger(Trigger *pTrigger){
+ if( pTrigger==0 ) return;
+ sqliteDeleteTriggerStep(pTrigger->step_list);
+ sqliteFree(pTrigger->name);
+ sqliteFree(pTrigger->table);
+ sqliteExprDelete(pTrigger->pWhen);
+ sqliteIdListDelete(pTrigger->pColumns);
+ if( pTrigger->nameToken.dyn ) sqliteFree((char*)pTrigger->nameToken.z);
+ sqliteFree(pTrigger);
+}
+
+/*
+ * This function is called to drop a trigger from the database schema.
+ *
+ * This may be called directly from the parser and therefore identifies
+ * the trigger by name. The sqliteDropTriggerPtr() routine does the
+ * same job as this routine except it take a spointer to the trigger
+ * instead of the trigger name.
+ *
+ * Note that this function does not delete the trigger entirely. Instead it
+ * removes it from the internal schema and places it in the trigDrop hash
+ * table. This is so that the trigger can be restored into the database schema
+ * if the transaction is rolled back.
+ */
+void sqliteDropTrigger(Parse *pParse, SrcList *pName){
+ Trigger *pTrigger;
+ int i;
+ const char *zDb;
+ const char *zName;
+ int nName;
+ sqlite *db = pParse->db;
+
+ if( sqlite_malloc_failed ) goto drop_trigger_cleanup;
+ assert( pName->nSrc==1 );
+ zDb = pName->a[0].zDatabase;
+ zName = pName->a[0].zName;
+ nName = strlen(zName);
+ for(i=0; i<db->nDb; i++){
+ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
+ if( zDb && sqliteStrICmp(db->aDb[j].zName, zDb) ) continue;
+ pTrigger = sqliteHashFind(&(db->aDb[j].trigHash), zName, nName+1);
+ if( pTrigger ) break;
+ }
+ if( !pTrigger ){
+ sqliteErrorMsg(pParse, "no such trigger: %S", pName, 0);
+ goto drop_trigger_cleanup;
+ }
+ sqliteDropTriggerPtr(pParse, pTrigger, 0);
+
+drop_trigger_cleanup:
+ sqliteSrcListDelete(pName);
+}
+
+/*
+** Drop a trigger given a pointer to that trigger. If nested is false,
+** then also generate code to remove the trigger from the SQLITE_MASTER
+** table.
+*/
+void sqliteDropTriggerPtr(Parse *pParse, Trigger *pTrigger, int nested){
+ Table *pTable;
+ Vdbe *v;
+ sqlite *db = pParse->db;
+
+ assert( pTrigger->iDb<db->nDb );
+ if( pTrigger->iDb>=2 ){
+ sqliteErrorMsg(pParse, "triggers may not be removed from "
+ "auxiliary database %s", db->aDb[pTrigger->iDb].zName);
+ return;
+ }
+ pTable = sqliteFindTable(db, pTrigger->table,db->aDb[pTrigger->iTabDb].zName);
+ assert(pTable);
+ assert( pTable->iDb==pTrigger->iDb || pTrigger->iDb==1 );
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int code = SQLITE_DROP_TRIGGER;
+ const char *zDb = db->aDb[pTrigger->iDb].zName;
+ const char *zTab = SCHEMA_TABLE(pTrigger->iDb);
+ if( pTrigger->iDb ) code = SQLITE_DROP_TEMP_TRIGGER;
+ if( sqliteAuthCheck(pParse, code, pTrigger->name, pTable->zName, zDb) ||
+ sqliteAuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){
+ return;
+ }
+ }
+#endif
+
+ /* Generate code to destroy the database record of the trigger.
+ */
+ if( pTable!=0 && !nested && (v = sqliteGetVdbe(pParse))!=0 ){
+ int base;
+ static VdbeOpList dropTrigger[] = {
+ { OP_Rewind, 0, ADDR(9), 0},
+ { OP_String, 0, 0, 0}, /* 1 */
+ { OP_Column, 0, 1, 0},
+ { OP_Ne, 0, ADDR(8), 0},
+ { OP_String, 0, 0, "trigger"},
+ { OP_Column, 0, 0, 0},
+ { OP_Ne, 0, ADDR(8), 0},
+ { OP_Delete, 0, 0, 0},
+ { OP_Next, 0, ADDR(1), 0}, /* 8 */
+ };
+
+ sqliteBeginWriteOperation(pParse, 0, 0);
+ sqliteOpenMasterTable(v, pTrigger->iDb);
+ base = sqliteVdbeAddOpList(v, ArraySize(dropTrigger), dropTrigger);
+ sqliteVdbeChangeP3(v, base+1, pTrigger->name, 0);
+ if( pTrigger->iDb==0 ){
+ sqliteChangeCookie(db, v);
+ }
+ sqliteVdbeAddOp(v, OP_Close, 0, 0);
+ sqliteEndWriteOperation(pParse);
+ }
+
+ /*
+ * If this is not an "explain", then delete the trigger structure.
+ */
+ if( !pParse->explain ){
+ const char *zName = pTrigger->name;
+ int nName = strlen(zName);
+ if( pTable->pTrigger == pTrigger ){
+ pTable->pTrigger = pTrigger->pNext;
+ }else{
+ Trigger *cc = pTable->pTrigger;
+ while( cc ){
+ if( cc->pNext == pTrigger ){
+ cc->pNext = cc->pNext->pNext;
+ break;
+ }
+ cc = cc->pNext;
+ }
+ assert(cc);
+ }
+ sqliteHashInsert(&(db->aDb[pTrigger->iDb].trigHash), zName, nName+1, 0);
+ sqliteDeleteTrigger(pTrigger);
+ }
+}
+
+/*
+** pEList is the SET clause of an UPDATE statement. Each entry
+** in pEList is of the format <id>=<expr>. If any of the entries
+** in pEList have an <id> which matches an identifier in pIdList,
+** then return TRUE. If pIdList==NULL, then it is considered a
+** wildcard that matches anything. Likewise if pEList==NULL then
+** it matches anything so always return true. Return false only
+** if there is no match.
+*/
+static int checkColumnOverLap(IdList *pIdList, ExprList *pEList){
+ int e;
+ if( !pIdList || !pEList ) return 1;
+ for(e=0; e<pEList->nExpr; e++){
+ if( sqliteIdListIndex(pIdList, pEList->a[e].zName)>=0 ) return 1;
+ }
+ return 0;
+}
+
+/* A global variable that is TRUE if we should always set up temp tables for
+ * for triggers, even if there are no triggers to code. This is used to test
+ * how much overhead the triggers algorithm is causing.
+ *
+ * This flag can be set or cleared using the "trigger_overhead_test" pragma.
+ * The pragma is not documented since it is not really part of the interface
+ * to SQLite, just the test procedure.
+*/
+int always_code_trigger_setup = 0;
+
+/*
+ * Returns true if a trigger matching op, tr_tm and foreach that is NOT already
+ * on the Parse objects trigger-stack (to prevent recursive trigger firing) is
+ * found in the list specified as pTrigger.
+ */
+int sqliteTriggersExist(
+ Parse *pParse, /* Used to check for recursive triggers */
+ Trigger *pTrigger, /* A list of triggers associated with a table */
+ int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */
+ int tr_tm, /* one of TK_BEFORE, TK_AFTER */
+ int foreach, /* one of TK_ROW or TK_STATEMENT */
+ ExprList *pChanges /* Columns that change in an UPDATE statement */
+){
+ Trigger * pTriggerCursor;
+
+ if( always_code_trigger_setup ){
+ return 1;
+ }
+
+ pTriggerCursor = pTrigger;
+ while( pTriggerCursor ){
+ if( pTriggerCursor->op == op &&
+ pTriggerCursor->tr_tm == tr_tm &&
+ pTriggerCursor->foreach == foreach &&
+ checkColumnOverLap(pTriggerCursor->pColumns, pChanges) ){
+ TriggerStack * ss;
+ ss = pParse->trigStack;
+ while( ss && ss->pTrigger != pTrigger ){
+ ss = ss->pNext;
+ }
+ if( !ss )return 1;
+ }
+ pTriggerCursor = pTriggerCursor->pNext;
+ }
+
+ return 0;
+}
+
+/*
+** Convert the pStep->target token into a SrcList and return a pointer
+** to that SrcList.
+**
+** This routine adds a specific database name, if needed, to the target when
+** forming the SrcList. This prevents a trigger in one database from
+** referring to a target in another database. An exception is when the
+** trigger is in TEMP in which case it can refer to any other database it
+** wants.
+*/
+static SrcList *targetSrcList(
+ Parse *pParse, /* The parsing context */
+ TriggerStep *pStep /* The trigger containing the target token */
+){
+ Token sDb; /* Dummy database name token */
+ int iDb; /* Index of the database to use */
+ SrcList *pSrc; /* SrcList to be returned */
+
+ iDb = pStep->pTrig->iDb;
+ if( iDb==0 || iDb>=2 ){
+ assert( iDb<pParse->db->nDb );
+ sDb.z = pParse->db->aDb[iDb].zName;
+ sDb.n = strlen(sDb.z);
+ pSrc = sqliteSrcListAppend(0, &sDb, &pStep->target);
+ } else {
+ pSrc = sqliteSrcListAppend(0, &pStep->target, 0);
+ }
+ return pSrc;
+}
+
+/*
+** Generate VDBE code for zero or more statements inside the body of a
+** trigger.
+*/
+static int codeTriggerProgram(
+ Parse *pParse, /* The parser context */
+ TriggerStep *pStepList, /* List of statements inside the trigger body */
+ int orconfin /* Conflict algorithm. (OE_Abort, etc) */
+){
+ TriggerStep * pTriggerStep = pStepList;
+ int orconf;
+
+ while( pTriggerStep ){
+ int saveNTab = pParse->nTab;
+
+ orconf = (orconfin == OE_Default)?pTriggerStep->orconf:orconfin;
+ pParse->trigStack->orconf = orconf;
+ switch( pTriggerStep->op ){
+ case TK_SELECT: {
+ Select * ss = sqliteSelectDup(pTriggerStep->pSelect);
+ assert(ss);
+ assert(ss->pSrc);
+ sqliteSelect(pParse, ss, SRT_Discard, 0, 0, 0, 0);
+ sqliteSelectDelete(ss);
+ break;
+ }
+ case TK_UPDATE: {
+ SrcList *pSrc;
+ pSrc = targetSrcList(pParse, pTriggerStep);
+ sqliteVdbeAddOp(pParse->pVdbe, OP_ListPush, 0, 0);
+ sqliteUpdate(pParse, pSrc,
+ sqliteExprListDup(pTriggerStep->pExprList),
+ sqliteExprDup(pTriggerStep->pWhere), orconf);
+ sqliteVdbeAddOp(pParse->pVdbe, OP_ListPop, 0, 0);
+ break;
+ }
+ case TK_INSERT: {
+ SrcList *pSrc;
+ pSrc = targetSrcList(pParse, pTriggerStep);
+ sqliteInsert(pParse, pSrc,
+ sqliteExprListDup(pTriggerStep->pExprList),
+ sqliteSelectDup(pTriggerStep->pSelect),
+ sqliteIdListDup(pTriggerStep->pIdList), orconf);
+ break;
+ }
+ case TK_DELETE: {
+ SrcList *pSrc;
+ sqliteVdbeAddOp(pParse->pVdbe, OP_ListPush, 0, 0);
+ pSrc = targetSrcList(pParse, pTriggerStep);
+ sqliteDeleteFrom(pParse, pSrc, sqliteExprDup(pTriggerStep->pWhere));
+ sqliteVdbeAddOp(pParse->pVdbe, OP_ListPop, 0, 0);
+ break;
+ }
+ default:
+ assert(0);
+ }
+ pParse->nTab = saveNTab;
+ pTriggerStep = pTriggerStep->pNext;
+ }
+
+ return 0;
+}
+
+/*
+** This is called to code FOR EACH ROW triggers.
+**
+** When the code that this function generates is executed, the following
+** must be true:
+**
+** 1. No cursors may be open in the main database. (But newIdx and oldIdx
+** can be indices of cursors in temporary tables. See below.)
+**
+** 2. If the triggers being coded are ON INSERT or ON UPDATE triggers, then
+** a temporary vdbe cursor (index newIdx) must be open and pointing at
+** a row containing values to be substituted for new.* expressions in the
+** trigger program(s).
+**
+** 3. If the triggers being coded are ON DELETE or ON UPDATE triggers, then
+** a temporary vdbe cursor (index oldIdx) must be open and pointing at
+** a row containing values to be substituted for old.* expressions in the
+** trigger program(s).
+**
+*/
+int sqliteCodeRowTrigger(
+ Parse *pParse, /* Parse context */
+ int op, /* One of TK_UPDATE, TK_INSERT, TK_DELETE */
+ ExprList *pChanges, /* Changes list for any UPDATE OF triggers */
+ int tr_tm, /* One of TK_BEFORE, TK_AFTER */
+ Table *pTab, /* The table to code triggers from */
+ int newIdx, /* The indice of the "new" row to access */
+ int oldIdx, /* The indice of the "old" row to access */
+ int orconf, /* ON CONFLICT policy */
+ int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */
+){
+ Trigger * pTrigger;
+ TriggerStack * pTriggerStack;
+
+ assert(op == TK_UPDATE || op == TK_INSERT || op == TK_DELETE);
+ assert(tr_tm == TK_BEFORE || tr_tm == TK_AFTER );
+
+ assert(newIdx != -1 || oldIdx != -1);
+
+ pTrigger = pTab->pTrigger;
+ while( pTrigger ){
+ int fire_this = 0;
+
+ /* determine whether we should code this trigger */
+ if( pTrigger->op == op && pTrigger->tr_tm == tr_tm &&
+ pTrigger->foreach == TK_ROW ){
+ fire_this = 1;
+ pTriggerStack = pParse->trigStack;
+ while( pTriggerStack ){
+ if( pTriggerStack->pTrigger == pTrigger ){
+ fire_this = 0;
+ }
+ pTriggerStack = pTriggerStack->pNext;
+ }
+ if( op == TK_UPDATE && pTrigger->pColumns &&
+ !checkColumnOverLap(pTrigger->pColumns, pChanges) ){
+ fire_this = 0;
+ }
+ }
+
+ if( fire_this && (pTriggerStack = sqliteMalloc(sizeof(TriggerStack)))!=0 ){
+ int endTrigger;
+ SrcList dummyTablist;
+ Expr * whenExpr;
+ AuthContext sContext;
+
+ dummyTablist.nSrc = 0;
+
+ /* Push an entry on to the trigger stack */
+ pTriggerStack->pTrigger = pTrigger;
+ pTriggerStack->newIdx = newIdx;
+ pTriggerStack->oldIdx = oldIdx;
+ pTriggerStack->pTab = pTab;
+ pTriggerStack->pNext = pParse->trigStack;
+ pTriggerStack->ignoreJump = ignoreJump;
+ pParse->trigStack = pTriggerStack;
+ sqliteAuthContextPush(pParse, &sContext, pTrigger->name);
+
+ /* code the WHEN clause */
+ endTrigger = sqliteVdbeMakeLabel(pParse->pVdbe);
+ whenExpr = sqliteExprDup(pTrigger->pWhen);
+ if( sqliteExprResolveIds(pParse, &dummyTablist, 0, whenExpr) ){
+ pParse->trigStack = pParse->trigStack->pNext;
+ sqliteFree(pTriggerStack);
+ sqliteExprDelete(whenExpr);
+ return 1;
+ }
+ sqliteExprIfFalse(pParse, whenExpr, endTrigger, 1);
+ sqliteExprDelete(whenExpr);
+
+ sqliteVdbeAddOp(pParse->pVdbe, OP_ContextPush, 0, 0);
+ codeTriggerProgram(pParse, pTrigger->step_list, orconf);
+ sqliteVdbeAddOp(pParse->pVdbe, OP_ContextPop, 0, 0);
+
+ /* Pop the entry off the trigger stack */
+ pParse->trigStack = pParse->trigStack->pNext;
+ sqliteAuthContextPop(&sContext);
+ sqliteFree(pTriggerStack);
+
+ sqliteVdbeResolveLabel(pParse->pVdbe, endTrigger);
+ }
+ pTrigger = pTrigger->pNext;
+ }
+
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/update.c b/usr/src/cmd/svc/configd/sqlite/src/update.c
new file mode 100644
index 0000000000..d90b144f2a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/update.c
@@ -0,0 +1,462 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle UPDATE statements.
+**
+** $Id: update.c,v 1.70.2.1 2004/04/29 16:16:29 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** Process an UPDATE statement.
+**
+** UPDATE OR IGNORE table_wxyz SET a=b, c=d WHERE e<5 AND f NOT NULL;
+** \_______/ \________/ \______/ \________________/
+* onError pTabList pChanges pWhere
+*/
+void sqliteUpdate(
+ Parse *pParse, /* The parser context */
+ SrcList *pTabList, /* The table in which we should change things */
+ ExprList *pChanges, /* Things to be changed */
+ Expr *pWhere, /* The WHERE clause. May be null */
+ int onError /* How to handle constraint errors */
+){
+ int i, j; /* Loop counters */
+ Table *pTab; /* The table to be updated */
+ int loopStart; /* VDBE instruction address of the start of the loop */
+ int jumpInst; /* Addr of VDBE instruction to jump out of loop */
+ WhereInfo *pWInfo; /* Information about the WHERE clause */
+ Vdbe *v; /* The virtual database engine */
+ Index *pIdx; /* For looping over indices */
+ int nIdx; /* Number of indices that need updating */
+ int nIdxTotal; /* Total number of indices */
+ int iCur; /* VDBE Cursor number of pTab */
+ sqlite *db; /* The database structure */
+ Index **apIdx = 0; /* An array of indices that need updating too */
+ char *aIdxUsed = 0; /* aIdxUsed[i]==1 if the i-th index is used */
+ int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the
+ ** an expression for the i-th column of the table.
+ ** aXRef[i]==-1 if the i-th column is not changed. */
+ int chngRecno; /* True if the record number is being changed */
+ Expr *pRecnoExpr; /* Expression defining the new record number */
+ int openAll; /* True if all indices need to be opened */
+ int isView; /* Trying to update a view */
+ int iStackDepth; /* Index of memory cell holding stack depth */
+ AuthContext sContext; /* The authorization context */
+
+ int before_triggers; /* True if there are any BEFORE triggers */
+ int after_triggers; /* True if there are any AFTER triggers */
+ int row_triggers_exist = 0; /* True if any row triggers exist */
+
+ int newIdx = -1; /* index of trigger "new" temp table */
+ int oldIdx = -1; /* index of trigger "old" temp table */
+
+ sContext.pParse = 0;
+ if( pParse->nErr || sqlite_malloc_failed ) goto update_cleanup;
+ db = pParse->db;
+ assert( pTabList->nSrc==1 );
+ iStackDepth = pParse->nMem++;
+
+ /* Locate the table which we want to update.
+ */
+ pTab = sqliteSrcListLookup(pParse, pTabList);
+ if( pTab==0 ) goto update_cleanup;
+ before_triggers = sqliteTriggersExist(pParse, pTab->pTrigger,
+ TK_UPDATE, TK_BEFORE, TK_ROW, pChanges);
+ after_triggers = sqliteTriggersExist(pParse, pTab->pTrigger,
+ TK_UPDATE, TK_AFTER, TK_ROW, pChanges);
+ row_triggers_exist = before_triggers || after_triggers;
+ isView = pTab->pSelect!=0;
+ if( sqliteIsReadOnly(pParse, pTab, before_triggers) ){
+ goto update_cleanup;
+ }
+ if( isView ){
+ if( sqliteViewGetColumnNames(pParse, pTab) ){
+ goto update_cleanup;
+ }
+ }
+ aXRef = sqliteMalloc( sizeof(int) * pTab->nCol );
+ if( aXRef==0 ) goto update_cleanup;
+ for(i=0; i<pTab->nCol; i++) aXRef[i] = -1;
+
+ /* If there are FOR EACH ROW triggers, allocate cursors for the
+ ** special OLD and NEW tables
+ */
+ if( row_triggers_exist ){
+ newIdx = pParse->nTab++;
+ oldIdx = pParse->nTab++;
+ }
+
+ /* Allocate a cursors for the main database table and for all indices.
+ ** The index cursors might not be used, but if they are used they
+ ** need to occur right after the database cursor. So go ahead and
+ ** allocate enough space, just in case.
+ */
+ pTabList->a[0].iCursor = iCur = pParse->nTab++;
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ pParse->nTab++;
+ }
+
+ /* Resolve the column names in all the expressions of the
+ ** of the UPDATE statement. Also find the column index
+ ** for each column to be updated in the pChanges array. For each
+ ** column to be updated, make sure we have authorization to change
+ ** that column.
+ */
+ chngRecno = 0;
+ for(i=0; i<pChanges->nExpr; i++){
+ if( sqliteExprResolveIds(pParse, pTabList, 0, pChanges->a[i].pExpr) ){
+ goto update_cleanup;
+ }
+ if( sqliteExprCheck(pParse, pChanges->a[i].pExpr, 0, 0) ){
+ goto update_cleanup;
+ }
+ for(j=0; j<pTab->nCol; j++){
+ if( sqliteStrICmp(pTab->aCol[j].zName, pChanges->a[i].zName)==0 ){
+ if( j==pTab->iPKey ){
+ chngRecno = 1;
+ pRecnoExpr = pChanges->a[i].pExpr;
+ }
+ aXRef[j] = i;
+ break;
+ }
+ }
+ if( j>=pTab->nCol ){
+ if( sqliteIsRowid(pChanges->a[i].zName) ){
+ chngRecno = 1;
+ pRecnoExpr = pChanges->a[i].pExpr;
+ }else{
+ sqliteErrorMsg(pParse, "no such column: %s", pChanges->a[i].zName);
+ goto update_cleanup;
+ }
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int rc;
+ rc = sqliteAuthCheck(pParse, SQLITE_UPDATE, pTab->zName,
+ pTab->aCol[j].zName, db->aDb[pTab->iDb].zName);
+ if( rc==SQLITE_DENY ){
+ goto update_cleanup;
+ }else if( rc==SQLITE_IGNORE ){
+ aXRef[j] = -1;
+ }
+ }
+#endif
+ }
+
+ /* Allocate memory for the array apIdx[] and fill it with pointers to every
+ ** index that needs to be updated. Indices only need updating if their
+ ** key includes one of the columns named in pChanges or if the record
+ ** number of the original table entry is changing.
+ */
+ for(nIdx=nIdxTotal=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdxTotal++){
+ if( chngRecno ){
+ i = 0;
+ }else {
+ for(i=0; i<pIdx->nColumn; i++){
+ if( aXRef[pIdx->aiColumn[i]]>=0 ) break;
+ }
+ }
+ if( i<pIdx->nColumn ) nIdx++;
+ }
+ if( nIdxTotal>0 ){
+ apIdx = sqliteMalloc( sizeof(Index*) * nIdx + nIdxTotal );
+ if( apIdx==0 ) goto update_cleanup;
+ aIdxUsed = (char*)&apIdx[nIdx];
+ }
+ for(nIdx=j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ if( chngRecno ){
+ i = 0;
+ }else{
+ for(i=0; i<pIdx->nColumn; i++){
+ if( aXRef[pIdx->aiColumn[i]]>=0 ) break;
+ }
+ }
+ if( i<pIdx->nColumn ){
+ apIdx[nIdx++] = pIdx;
+ aIdxUsed[j] = 1;
+ }else{
+ aIdxUsed[j] = 0;
+ }
+ }
+
+ /* Resolve the column names in all the expressions in the
+ ** WHERE clause.
+ */
+ if( pWhere ){
+ if( sqliteExprResolveIds(pParse, pTabList, 0, pWhere) ){
+ goto update_cleanup;
+ }
+ if( sqliteExprCheck(pParse, pWhere, 0, 0) ){
+ goto update_cleanup;
+ }
+ }
+
+ /* Start the view context
+ */
+ if( isView ){
+ sqliteAuthContextPush(pParse, &sContext, pTab->zName);
+ }
+
+ /* Begin generating code.
+ */
+ v = sqliteGetVdbe(pParse);
+ if( v==0 ) goto update_cleanup;
+ sqliteBeginWriteOperation(pParse, 1, pTab->iDb);
+
+ /* If we are trying to update a view, construct that view into
+ ** a temporary table.
+ */
+ if( isView ){
+ Select *pView;
+ pView = sqliteSelectDup(pTab->pSelect);
+ sqliteSelect(pParse, pView, SRT_TempTable, iCur, 0, 0, 0);
+ sqliteSelectDelete(pView);
+ }
+
+ /* Begin the database scan
+ */
+ pWInfo = sqliteWhereBegin(pParse, pTabList, pWhere, 1, 0);
+ if( pWInfo==0 ) goto update_cleanup;
+
+ /* Remember the index of every item to be updated.
+ */
+ sqliteVdbeAddOp(v, OP_ListWrite, 0, 0);
+
+ /* End the database scan loop.
+ */
+ sqliteWhereEnd(pWInfo);
+
+ /* Initialize the count of updated rows
+ */
+ if( db->flags & SQLITE_CountRows && !pParse->trigStack ){
+ sqliteVdbeAddOp(v, OP_Integer, 0, 0);
+ }
+
+ if( row_triggers_exist ){
+ /* Create pseudo-tables for NEW and OLD
+ */
+ sqliteVdbeAddOp(v, OP_OpenPseudo, oldIdx, 0);
+ sqliteVdbeAddOp(v, OP_OpenPseudo, newIdx, 0);
+
+ /* The top of the update loop for when there are triggers.
+ */
+ sqliteVdbeAddOp(v, OP_ListRewind, 0, 0);
+ sqliteVdbeAddOp(v, OP_StackDepth, 0, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, iStackDepth, 1);
+ loopStart = sqliteVdbeAddOp(v, OP_MemLoad, iStackDepth, 0);
+ sqliteVdbeAddOp(v, OP_StackReset, 0, 0);
+ jumpInst = sqliteVdbeAddOp(v, OP_ListRead, 0, 0);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+
+ /* Open a cursor and make it point to the record that is
+ ** being updated.
+ */
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeAddOp(v, OP_OpenRead, iCur, pTab->tnum);
+ }
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, 0);
+
+ /* Generate the OLD table
+ */
+ sqliteVdbeAddOp(v, OP_Recno, iCur, 0);
+ sqliteVdbeAddOp(v, OP_RowData, iCur, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, oldIdx, 0);
+
+ /* Generate the NEW table
+ */
+ if( chngRecno ){
+ sqliteExprCode(pParse, pRecnoExpr);
+ }else{
+ sqliteVdbeAddOp(v, OP_Recno, iCur, 0);
+ }
+ for(i=0; i<pTab->nCol; i++){
+ if( i==pTab->iPKey ){
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ continue;
+ }
+ j = aXRef[i];
+ if( j<0 ){
+ sqliteVdbeAddOp(v, OP_Column, iCur, i);
+ }else{
+ sqliteExprCode(pParse, pChanges->a[j].pExpr);
+ }
+ }
+ sqliteVdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0);
+ sqliteVdbeAddOp(v, OP_PutIntKey, newIdx, 0);
+ if( !isView ){
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ }
+
+ /* Fire the BEFORE and INSTEAD OF triggers
+ */
+ if( sqliteCodeRowTrigger(pParse, TK_UPDATE, pChanges, TK_BEFORE, pTab,
+ newIdx, oldIdx, onError, loopStart) ){
+ goto update_cleanup;
+ }
+ }
+
+ if( !isView ){
+ /*
+ ** Open every index that needs updating. Note that if any
+ ** index could potentially invoke a REPLACE conflict resolution
+ ** action, then we need to open all indices because we might need
+ ** to be deleting some records.
+ */
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeAddOp(v, OP_OpenWrite, iCur, pTab->tnum);
+ if( onError==OE_Replace ){
+ openAll = 1;
+ }else{
+ openAll = 0;
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->onError==OE_Replace ){
+ openAll = 1;
+ break;
+ }
+ }
+ }
+ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
+ if( openAll || aIdxUsed[i] ){
+ sqliteVdbeAddOp(v, OP_Integer, pIdx->iDb, 0);
+ sqliteVdbeAddOp(v, OP_OpenWrite, iCur+i+1, pIdx->tnum);
+ assert( pParse->nTab>iCur+i+1 );
+ }
+ }
+
+ /* Loop over every record that needs updating. We have to load
+ ** the old data for each record to be updated because some columns
+ ** might not change and we will need to copy the old value.
+ ** Also, the old data is needed to delete the old index entires.
+ ** So make the cursor point at the old record.
+ */
+ if( !row_triggers_exist ){
+ sqliteVdbeAddOp(v, OP_ListRewind, 0, 0);
+ jumpInst = loopStart = sqliteVdbeAddOp(v, OP_ListRead, 0, 0);
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ }
+ sqliteVdbeAddOp(v, OP_NotExists, iCur, loopStart);
+
+ /* If the record number will change, push the record number as it
+ ** will be after the update. (The old record number is currently
+ ** on top of the stack.)
+ */
+ if( chngRecno ){
+ sqliteExprCode(pParse, pRecnoExpr);
+ sqliteVdbeAddOp(v, OP_MustBeInt, 0, 0);
+ }
+
+ /* Compute new data for this record.
+ */
+ for(i=0; i<pTab->nCol; i++){
+ if( i==pTab->iPKey ){
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ continue;
+ }
+ j = aXRef[i];
+ if( j<0 ){
+ sqliteVdbeAddOp(v, OP_Column, iCur, i);
+ }else{
+ sqliteExprCode(pParse, pChanges->a[j].pExpr);
+ }
+ }
+
+ /* Do constraint checks
+ */
+ sqliteGenerateConstraintChecks(pParse, pTab, iCur, aIdxUsed, chngRecno, 1,
+ onError, loopStart);
+
+ /* Delete the old indices for the current record.
+ */
+ sqliteGenerateRowIndexDelete(db, v, pTab, iCur, aIdxUsed);
+
+ /* If changing the record number, delete the old record.
+ */
+ if( chngRecno ){
+ sqliteVdbeAddOp(v, OP_Delete, iCur, 0);
+ }
+
+ /* Create the new index entries and the new record.
+ */
+ sqliteCompleteInsertion(pParse, pTab, iCur, aIdxUsed, chngRecno, 1, -1);
+ }
+
+ /* Increment the row counter
+ */
+ if( db->flags & SQLITE_CountRows && !pParse->trigStack){
+ sqliteVdbeAddOp(v, OP_AddImm, 1, 0);
+ }
+
+ /* If there are triggers, close all the cursors after each iteration
+ ** through the loop. The fire the after triggers.
+ */
+ if( row_triggers_exist ){
+ if( !isView ){
+ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
+ if( openAll || aIdxUsed[i] )
+ sqliteVdbeAddOp(v, OP_Close, iCur+i+1, 0);
+ }
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ pParse->nTab = iCur;
+ }
+ if( sqliteCodeRowTrigger(pParse, TK_UPDATE, pChanges, TK_AFTER, pTab,
+ newIdx, oldIdx, onError, loopStart) ){
+ goto update_cleanup;
+ }
+ }
+
+ /* Repeat the above with the next record to be updated, until
+ ** all record selected by the WHERE clause have been updated.
+ */
+ sqliteVdbeAddOp(v, OP_Goto, 0, loopStart);
+ sqliteVdbeChangeP2(v, jumpInst, sqliteVdbeCurrentAddr(v));
+ sqliteVdbeAddOp(v, OP_ListReset, 0, 0);
+
+ /* Close all tables if there were no FOR EACH ROW triggers */
+ if( !row_triggers_exist ){
+ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
+ if( openAll || aIdxUsed[i] ){
+ sqliteVdbeAddOp(v, OP_Close, iCur+i+1, 0);
+ }
+ }
+ sqliteVdbeAddOp(v, OP_Close, iCur, 0);
+ pParse->nTab = iCur;
+ }else{
+ sqliteVdbeAddOp(v, OP_Close, newIdx, 0);
+ sqliteVdbeAddOp(v, OP_Close, oldIdx, 0);
+ }
+
+ sqliteVdbeAddOp(v, OP_SetCounts, 0, 0);
+ sqliteEndWriteOperation(pParse);
+
+ /*
+ ** Return the number of rows that were changed.
+ */
+ if( db->flags & SQLITE_CountRows && !pParse->trigStack ){
+ sqliteVdbeOp3(v, OP_ColumnName, 0, 1, "rows updated", P3_STATIC);
+ sqliteVdbeAddOp(v, OP_Callback, 1, 0);
+ }
+
+update_cleanup:
+ sqliteAuthContextPop(&sContext);
+ sqliteFree(apIdx);
+ sqliteFree(aXRef);
+ sqliteSrcListDelete(pTabList);
+ sqliteExprListDelete(pChanges);
+ sqliteExprDelete(pWhere);
+ return;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/util.c b/usr/src/cmd/svc/configd/sqlite/src/util.c
new file mode 100644
index 0000000000..8dda556f18
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/util.c
@@ -0,0 +1,1138 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Utility functions used throughout sqlite.
+**
+** This file contains functions for allocating memory, comparing
+** strings, and stuff like that.
+**
+** $Id: util.c,v 1.74.2.1 2004/07/15 13:08:41 drh Exp $
+*/
+#include "sqliteInt.h"
+#include <stdarg.h>
+#include <ctype.h>
+
+/*
+** If malloc() ever fails, this global variable gets set to 1.
+** This causes the library to abort and never again function.
+*/
+int sqlite_malloc_failed = 0;
+
+/*
+** If MEMORY_DEBUG is defined, then use versions of malloc() and
+** free() that track memory usage and check for buffer overruns.
+*/
+#ifdef MEMORY_DEBUG
+
+/*
+** For keeping track of the number of mallocs and frees. This
+** is used to check for memory leaks.
+*/
+int sqlite_nMalloc; /* Number of sqliteMalloc() calls */
+int sqlite_nFree; /* Number of sqliteFree() calls */
+int sqlite_iMallocFail; /* Fail sqliteMalloc() after this many calls */
+#if MEMORY_DEBUG>1
+static int memcnt = 0;
+#endif
+
+/*
+** Number of 32-bit guard words
+*/
+#define N_GUARD 1
+
+/*
+** Allocate new memory and set it to zero. Return NULL if
+** no memory is available.
+*/
+void *sqliteMalloc_(int n, int bZero, char *zFile, int line){
+ void *p;
+ int *pi;
+ int i, k;
+ if( sqlite_iMallocFail>=0 ){
+ sqlite_iMallocFail--;
+ if( sqlite_iMallocFail==0 ){
+ sqlite_malloc_failed++;
+#if MEMORY_DEBUG>1
+ fprintf(stderr,"**** failed to allocate %d bytes at %s:%d\n",
+ n, zFile,line);
+#endif
+ sqlite_iMallocFail--;
+ return 0;
+ }
+ }
+ if( n==0 ) return 0;
+ k = (n+sizeof(int)-1)/sizeof(int);
+ pi = malloc( (N_GUARD*2+1+k)*sizeof(int));
+ if( pi==0 ){
+ sqlite_malloc_failed++;
+ return 0;
+ }
+ sqlite_nMalloc++;
+ for(i=0; i<N_GUARD; i++) pi[i] = 0xdead1122;
+ pi[N_GUARD] = n;
+ for(i=0; i<N_GUARD; i++) pi[k+1+N_GUARD+i] = 0xdead3344;
+ p = &pi[N_GUARD+1];
+ memset(p, bZero==0, n);
+#if MEMORY_DEBUG>1
+ fprintf(stderr,"%06d malloc %d bytes at 0x%x from %s:%d\n",
+ ++memcnt, n, (int)p, zFile,line);
+#endif
+ return p;
+}
+
+/*
+** Check to see if the given pointer was obtained from sqliteMalloc()
+** and is able to hold at least N bytes. Raise an exception if this
+** is not the case.
+**
+** This routine is used for testing purposes only.
+*/
+void sqliteCheckMemory(void *p, int N){
+ int *pi = p;
+ int n, i, k;
+ pi -= N_GUARD+1;
+ for(i=0; i<N_GUARD; i++){
+ assert( pi[i]==0xdead1122 );
+ }
+ n = pi[N_GUARD];
+ assert( N>=0 && N<n );
+ k = (n+sizeof(int)-1)/sizeof(int);
+ for(i=0; i<N_GUARD; i++){
+ assert( pi[k+N_GUARD+1+i]==0xdead3344 );
+ }
+}
+
+/*
+** Free memory previously obtained from sqliteMalloc()
+*/
+void sqliteFree_(void *p, char *zFile, int line){
+ if( p ){
+ int *pi, i, k, n;
+ pi = p;
+ pi -= N_GUARD+1;
+ sqlite_nFree++;
+ for(i=0; i<N_GUARD; i++){
+ if( pi[i]!=0xdead1122 ){
+ fprintf(stderr,"Low-end memory corruption at 0x%x\n", (int)p);
+ return;
+ }
+ }
+ n = pi[N_GUARD];
+ k = (n+sizeof(int)-1)/sizeof(int);
+ for(i=0; i<N_GUARD; i++){
+ if( pi[k+N_GUARD+1+i]!=0xdead3344 ){
+ fprintf(stderr,"High-end memory corruption at 0x%x\n", (int)p);
+ return;
+ }
+ }
+ memset(pi, 0xff, (k+N_GUARD*2+1)*sizeof(int));
+#if MEMORY_DEBUG>1
+ fprintf(stderr,"%06d free %d bytes at 0x%x from %s:%d\n",
+ ++memcnt, n, (int)p, zFile,line);
+#endif
+ free(pi);
+ }
+}
+
+/*
+** Resize a prior allocation. If p==0, then this routine
+** works just like sqliteMalloc(). If n==0, then this routine
+** works just like sqliteFree().
+*/
+void *sqliteRealloc_(void *oldP, int n, char *zFile, int line){
+ int *oldPi, *pi, i, k, oldN, oldK;
+ void *p;
+ if( oldP==0 ){
+ return sqliteMalloc_(n,1,zFile,line);
+ }
+ if( n==0 ){
+ sqliteFree_(oldP,zFile,line);
+ return 0;
+ }
+ oldPi = oldP;
+ oldPi -= N_GUARD+1;
+ if( oldPi[0]!=0xdead1122 ){
+ fprintf(stderr,"Low-end memory corruption in realloc at 0x%x\n", (int)oldP);
+ return 0;
+ }
+ oldN = oldPi[N_GUARD];
+ oldK = (oldN+sizeof(int)-1)/sizeof(int);
+ for(i=0; i<N_GUARD; i++){
+ if( oldPi[oldK+N_GUARD+1+i]!=0xdead3344 ){
+ fprintf(stderr,"High-end memory corruption in realloc at 0x%x\n",
+ (int)oldP);
+ return 0;
+ }
+ }
+ k = (n + sizeof(int) - 1)/sizeof(int);
+ pi = malloc( (k+N_GUARD*2+1)*sizeof(int) );
+ if( pi==0 ){
+ sqlite_malloc_failed++;
+ return 0;
+ }
+ for(i=0; i<N_GUARD; i++) pi[i] = 0xdead1122;
+ pi[N_GUARD] = n;
+ for(i=0; i<N_GUARD; i++) pi[k+N_GUARD+1+i] = 0xdead3344;
+ p = &pi[N_GUARD+1];
+ memcpy(p, oldP, n>oldN ? oldN : n);
+ if( n>oldN ){
+ memset(&((char*)p)[oldN], 0, n-oldN);
+ }
+ memset(oldPi, 0xab, (oldK+N_GUARD+2)*sizeof(int));
+ free(oldPi);
+#if MEMORY_DEBUG>1
+ fprintf(stderr,"%06d realloc %d to %d bytes at 0x%x to 0x%x at %s:%d\n",
+ ++memcnt, oldN, n, (int)oldP, (int)p, zFile, line);
+#endif
+ return p;
+}
+
+/*
+** Make a duplicate of a string into memory obtained from malloc()
+** Free the original string using sqliteFree().
+**
+** This routine is called on all strings that are passed outside of
+** the SQLite library. That way clients can free the string using free()
+** rather than having to call sqliteFree().
+*/
+void sqliteStrRealloc(char **pz){
+ char *zNew;
+ if( pz==0 || *pz==0 ) return;
+ zNew = malloc( strlen(*pz) + 1 );
+ if( zNew==0 ){
+ sqlite_malloc_failed++;
+ sqliteFree(*pz);
+ *pz = 0;
+ }
+ strcpy(zNew, *pz);
+ sqliteFree(*pz);
+ *pz = zNew;
+}
+
+/*
+** Make a copy of a string in memory obtained from sqliteMalloc()
+*/
+char *sqliteStrDup_(const char *z, char *zFile, int line){
+ char *zNew;
+ if( z==0 ) return 0;
+ zNew = sqliteMalloc_(strlen(z)+1, 0, zFile, line);
+ if( zNew ) strcpy(zNew, z);
+ return zNew;
+}
+char *sqliteStrNDup_(const char *z, int n, char *zFile, int line){
+ char *zNew;
+ if( z==0 ) return 0;
+ zNew = sqliteMalloc_(n+1, 0, zFile, line);
+ if( zNew ){
+ memcpy(zNew, z, n);
+ zNew[n] = 0;
+ }
+ return zNew;
+}
+#endif /* MEMORY_DEBUG */
+
+/*
+** The following versions of malloc() and free() are for use in a
+** normal build.
+*/
+#if !defined(MEMORY_DEBUG)
+
+/*
+** Allocate new memory and set it to zero. Return NULL if
+** no memory is available. See also sqliteMallocRaw().
+*/
+void *sqliteMalloc(int n){
+ void *p;
+ if( (p = malloc(n))==0 ){
+ if( n>0 ) sqlite_malloc_failed++;
+ }else{
+ memset(p, 0, n);
+ }
+ return p;
+}
+
+/*
+** Allocate new memory but do not set it to zero. Return NULL if
+** no memory is available. See also sqliteMalloc().
+*/
+void *sqliteMallocRaw(int n){
+ void *p;
+ if( (p = malloc(n))==0 ){
+ if( n>0 ) sqlite_malloc_failed++;
+ }
+ return p;
+}
+
+/*
+** Free memory previously obtained from sqliteMalloc()
+*/
+void sqliteFree(void *p){
+ if( p ){
+ free(p);
+ }
+}
+
+/*
+** Resize a prior allocation. If p==0, then this routine
+** works just like sqliteMalloc(). If n==0, then this routine
+** works just like sqliteFree().
+*/
+void *sqliteRealloc(void *p, int n){
+ void *p2;
+ if( p==0 ){
+ return sqliteMalloc(n);
+ }
+ if( n==0 ){
+ sqliteFree(p);
+ return 0;
+ }
+ p2 = realloc(p, n);
+ if( p2==0 ){
+ sqlite_malloc_failed++;
+ }
+ return p2;
+}
+
+/*
+** Make a copy of a string in memory obtained from sqliteMalloc()
+*/
+char *sqliteStrDup(const char *z){
+ char *zNew;
+ if( z==0 ) return 0;
+ zNew = sqliteMallocRaw(strlen(z)+1);
+ if( zNew ) strcpy(zNew, z);
+ return zNew;
+}
+char *sqliteStrNDup(const char *z, int n){
+ char *zNew;
+ if( z==0 ) return 0;
+ zNew = sqliteMallocRaw(n+1);
+ if( zNew ){
+ memcpy(zNew, z, n);
+ zNew[n] = 0;
+ }
+ return zNew;
+}
+#endif /* !defined(MEMORY_DEBUG) */
+
+/*
+** Create a string from the 2nd and subsequent arguments (up to the
+** first NULL argument), store the string in memory obtained from
+** sqliteMalloc() and make the pointer indicated by the 1st argument
+** point to that string. The 1st argument must either be NULL or
+** point to memory obtained from sqliteMalloc().
+*/
+void sqliteSetString(char **pz, const char *zFirst, ...){
+ va_list ap;
+ int nByte;
+ const char *z;
+ char *zResult;
+
+ if( pz==0 ) return;
+ nByte = strlen(zFirst) + 1;
+ va_start(ap, zFirst);
+ while( (z = va_arg(ap, const char*))!=0 ){
+ nByte += strlen(z);
+ }
+ va_end(ap);
+ sqliteFree(*pz);
+ *pz = zResult = sqliteMallocRaw( nByte );
+ if( zResult==0 ){
+ return;
+ }
+ strcpy(zResult, zFirst);
+ zResult += strlen(zResult);
+ va_start(ap, zFirst);
+ while( (z = va_arg(ap, const char*))!=0 ){
+ strcpy(zResult, z);
+ zResult += strlen(zResult);
+ }
+ va_end(ap);
+#ifdef MEMORY_DEBUG
+#if MEMORY_DEBUG>1
+ fprintf(stderr,"string at 0x%x is %s\n", (int)*pz, *pz);
+#endif
+#endif
+}
+
+/*
+** Works like sqliteSetString, but each string is now followed by
+** a length integer which specifies how much of the source string
+** to copy (in bytes). -1 means use the whole string. The 1st
+** argument must either be NULL or point to memory obtained from
+** sqliteMalloc().
+*/
+void sqliteSetNString(char **pz, ...){
+ va_list ap;
+ int nByte;
+ const char *z;
+ char *zResult;
+ int n;
+
+ if( pz==0 ) return;
+ nByte = 0;
+ va_start(ap, pz);
+ while( (z = va_arg(ap, const char*))!=0 ){
+ n = va_arg(ap, int);
+ if( n<=0 ) n = strlen(z);
+ nByte += n;
+ }
+ va_end(ap);
+ sqliteFree(*pz);
+ *pz = zResult = sqliteMallocRaw( nByte + 1 );
+ if( zResult==0 ) return;
+ va_start(ap, pz);
+ while( (z = va_arg(ap, const char*))!=0 ){
+ n = va_arg(ap, int);
+ if( n<=0 ) n = strlen(z);
+ strncpy(zResult, z, n);
+ zResult += n;
+ }
+ *zResult = 0;
+#ifdef MEMORY_DEBUG
+#if MEMORY_DEBUG>1
+ fprintf(stderr,"string at 0x%x is %s\n", (int)*pz, *pz);
+#endif
+#endif
+ va_end(ap);
+}
+
+/*
+** Add an error message to pParse->zErrMsg and increment pParse->nErr.
+** The following formatting characters are allowed:
+**
+** %s Insert a string
+** %z A string that should be freed after use
+** %d Insert an integer
+** %T Insert a token
+** %S Insert the first element of a SrcList
+*/
+void sqliteErrorMsg(Parse *pParse, const char *zFormat, ...){
+ va_list ap;
+ pParse->nErr++;
+ sqliteFree(pParse->zErrMsg);
+ va_start(ap, zFormat);
+ pParse->zErrMsg = sqliteVMPrintf(zFormat, ap);
+ va_end(ap);
+}
+
+/*
+** Convert an SQL-style quoted string into a normal string by removing
+** the quote characters. The conversion is done in-place. If the
+** input does not begin with a quote character, then this routine
+** is a no-op.
+**
+** 2002-Feb-14: This routine is extended to remove MS-Access style
+** brackets from around identifers. For example: "[a-b-c]" becomes
+** "a-b-c".
+*/
+void sqliteDequote(char *z){
+ int quote;
+ int i, j;
+ if( z==0 ) return;
+ quote = z[0];
+ switch( quote ){
+ case '\'': break;
+ case '"': break;
+ case '[': quote = ']'; break;
+ default: return;
+ }
+ for(i=1, j=0; z[i]; i++){
+ if( z[i]==quote ){
+ if( z[i+1]==quote ){
+ z[j++] = quote;
+ i++;
+ }else{
+ z[j++] = 0;
+ break;
+ }
+ }else{
+ z[j++] = z[i];
+ }
+ }
+}
+
+/* An array to map all upper-case characters into their corresponding
+** lower-case character.
+*/
+static unsigned char UpperToLower[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,
+ 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107,
+ 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,
+ 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+ 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,
+ 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,
+ 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,
+ 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,
+ 252,253,254,255
+};
+
+/*
+** This function computes a hash on the name of a keyword.
+** Case is not significant.
+*/
+int sqliteHashNoCase(const char *z, int n){
+ int h = 0;
+ if( n<=0 ) n = strlen(z);
+ while( n > 0 ){
+ h = (h<<3) ^ h ^ UpperToLower[(unsigned char)*z++];
+ n--;
+ }
+ return h & 0x7fffffff;
+}
+
+/*
+** Some systems have stricmp(). Others have strcasecmp(). Because
+** there is no consistency, we will define our own.
+*/
+int sqliteStrICmp(const char *zLeft, const char *zRight){
+ register unsigned char *a, *b;
+ a = (unsigned char *)zLeft;
+ b = (unsigned char *)zRight;
+ while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; }
+ return UpperToLower[*a] - UpperToLower[*b];
+}
+int sqliteStrNICmp(const char *zLeft, const char *zRight, int N){
+ register unsigned char *a, *b;
+ a = (unsigned char *)zLeft;
+ b = (unsigned char *)zRight;
+ while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; }
+ return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b];
+}
+
+/*
+** Return TRUE if z is a pure numeric string. Return FALSE if the
+** string contains any character which is not part of a number.
+**
+** Am empty string is considered non-numeric.
+*/
+int sqliteIsNumber(const char *z){
+ if( *z=='-' || *z=='+' ) z++;
+ if( !isdigit(*z) ){
+ return 0;
+ }
+ z++;
+ while( isdigit(*z) ){ z++; }
+ if( *z=='.' ){
+ z++;
+ if( !isdigit(*z) ) return 0;
+ while( isdigit(*z) ){ z++; }
+ }
+ if( *z=='e' || *z=='E' ){
+ z++;
+ if( *z=='+' || *z=='-' ) z++;
+ if( !isdigit(*z) ) return 0;
+ while( isdigit(*z) ){ z++; }
+ }
+ return *z==0;
+}
+
+/*
+** The string z[] is an ascii representation of a real number.
+** Convert this string to a double.
+**
+** This routine assumes that z[] really is a valid number. If it
+** is not, the result is undefined.
+**
+** This routine is used instead of the library atof() function because
+** the library atof() might want to use "," as the decimal point instead
+** of "." depending on how locale is set. But that would cause problems
+** for SQL. So this routine always uses "." regardless of locale.
+*/
+double sqliteAtoF(const char *z, const char **pzEnd){
+ int sign = 1;
+ LONGDOUBLE_TYPE v1 = 0.0;
+ if( *z=='-' ){
+ sign = -1;
+ z++;
+ }else if( *z=='+' ){
+ z++;
+ }
+ while( isdigit(*z) ){
+ v1 = v1*10.0 + (*z - '0');
+ z++;
+ }
+ if( *z=='.' ){
+ LONGDOUBLE_TYPE divisor = 1.0;
+ z++;
+ while( isdigit(*z) ){
+ v1 = v1*10.0 + (*z - '0');
+ divisor *= 10.0;
+ z++;
+ }
+ v1 /= divisor;
+ }
+ if( *z=='e' || *z=='E' ){
+ int esign = 1;
+ int eval = 0;
+ LONGDOUBLE_TYPE scale = 1.0;
+ z++;
+ if( *z=='-' ){
+ esign = -1;
+ z++;
+ }else if( *z=='+' ){
+ z++;
+ }
+ while( isdigit(*z) ){
+ eval = eval*10 + *z - '0';
+ z++;
+ }
+ while( eval>=64 ){ scale *= 1.0e+64; eval -= 64; }
+ while( eval>=16 ){ scale *= 1.0e+16; eval -= 16; }
+ while( eval>=4 ){ scale *= 1.0e+4; eval -= 4; }
+ while( eval>=1 ){ scale *= 1.0e+1; eval -= 1; }
+ if( esign<0 ){
+ v1 /= scale;
+ }else{
+ v1 *= scale;
+ }
+ }
+ if( pzEnd ) *pzEnd = z;
+ return sign<0 ? -v1 : v1;
+}
+
+/*
+** The string zNum represents an integer. There might be some other
+** information following the integer too, but that part is ignored.
+** If the integer that the prefix of zNum represents will fit in a
+** 32-bit signed integer, return TRUE. Otherwise return FALSE.
+**
+** This routine returns FALSE for the string -2147483648 even that
+** that number will, in theory fit in a 32-bit integer. But positive
+** 2147483648 will not fit in 32 bits. So it seems safer to return
+** false.
+*/
+int sqliteFitsIn32Bits(const char *zNum){
+ int i, c;
+ if( *zNum=='-' || *zNum=='+' ) zNum++;
+ for(i=0; (c=zNum[i])>='0' && c<='9'; i++){}
+ return i<10 || (i==10 && memcmp(zNum,"2147483647",10)<=0);
+}
+
+/* This comparison routine is what we use for comparison operations
+** between numeric values in an SQL expression. "Numeric" is a little
+** bit misleading here. What we mean is that the strings have a
+** type of "numeric" from the point of view of SQL. The strings
+** do not necessarily contain numbers. They could contain text.
+**
+** If the input strings both look like actual numbers then they
+** compare in numerical order. Numerical strings are always less
+** than non-numeric strings so if one input string looks like a
+** number and the other does not, then the one that looks like
+** a number is the smaller. Non-numeric strings compare in
+** lexigraphical order (the same order as strcmp()).
+*/
+int sqliteCompare(const char *atext, const char *btext){
+ int result;
+ int isNumA, isNumB;
+ if( atext==0 ){
+ return -1;
+ }else if( btext==0 ){
+ return 1;
+ }
+ isNumA = sqliteIsNumber(atext);
+ isNumB = sqliteIsNumber(btext);
+ if( isNumA ){
+ if( !isNumB ){
+ result = -1;
+ }else{
+ double rA, rB;
+ rA = sqliteAtoF(atext, 0);
+ rB = sqliteAtoF(btext, 0);
+ if( rA<rB ){
+ result = -1;
+ }else if( rA>rB ){
+ result = +1;
+ }else{
+ result = 0;
+ }
+ }
+ }else if( isNumB ){
+ result = +1;
+ }else {
+ result = strcmp(atext, btext);
+ }
+ return result;
+}
+
+/*
+** This routine is used for sorting. Each key is a list of one or more
+** null-terminated elements. The list is terminated by two nulls in
+** a row. For example, the following text is a key with three elements
+**
+** Aone\000Dtwo\000Athree\000\000
+**
+** All elements begin with one of the characters "+-AD" and end with "\000"
+** with zero or more text elements in between. Except, NULL elements
+** consist of the special two-character sequence "N\000".
+**
+** Both arguments will have the same number of elements. This routine
+** returns negative, zero, or positive if the first argument is less
+** than, equal to, or greater than the first. (Result is a-b).
+**
+** Each element begins with one of the characters "+", "-", "A", "D".
+** This character determines the sort order and collating sequence:
+**
+** + Sort numerically in ascending order
+** - Sort numerically in descending order
+** A Sort as strings in ascending order
+** D Sort as strings in descending order.
+**
+** For the "+" and "-" sorting, pure numeric strings (strings for which the
+** isNum() function above returns TRUE) always compare less than strings
+** that are not pure numerics. Non-numeric strings compare in memcmp()
+** order. This is the same sort order as the sqliteCompare() function
+** above generates.
+**
+** The last point is a change from version 2.6.3 to version 2.7.0. In
+** version 2.6.3 and earlier, substrings of digits compare in numerical
+** and case was used only to break a tie.
+**
+** Elements that begin with 'A' or 'D' compare in memcmp() order regardless
+** of whether or not they look like a number.
+**
+** Note that the sort order imposed by the rules above is the same
+** from the ordering defined by the "<", "<=", ">", and ">=" operators
+** of expressions and for indices. This was not the case for version
+** 2.6.3 and earlier.
+*/
+int sqliteSortCompare(const char *a, const char *b){
+ int res = 0;
+ int isNumA, isNumB;
+ int dir = 0;
+
+ while( res==0 && *a && *b ){
+ if( a[0]=='N' || b[0]=='N' ){
+ if( a[0]==b[0] ){
+ a += 2;
+ b += 2;
+ continue;
+ }
+ if( a[0]=='N' ){
+ dir = b[0];
+ res = -1;
+ }else{
+ dir = a[0];
+ res = +1;
+ }
+ break;
+ }
+ assert( a[0]==b[0] );
+ if( (dir=a[0])=='A' || a[0]=='D' ){
+ res = strcmp(&a[1],&b[1]);
+ if( res ) break;
+ }else{
+ isNumA = sqliteIsNumber(&a[1]);
+ isNumB = sqliteIsNumber(&b[1]);
+ if( isNumA ){
+ double rA, rB;
+ if( !isNumB ){
+ res = -1;
+ break;
+ }
+ rA = sqliteAtoF(&a[1], 0);
+ rB = sqliteAtoF(&b[1], 0);
+ if( rA<rB ){
+ res = -1;
+ break;
+ }
+ if( rA>rB ){
+ res = +1;
+ break;
+ }
+ }else if( isNumB ){
+ res = +1;
+ break;
+ }else{
+ res = strcmp(&a[1],&b[1]);
+ if( res ) break;
+ }
+ }
+ a += strlen(&a[1]) + 2;
+ b += strlen(&b[1]) + 2;
+ }
+ if( dir=='-' || dir=='D' ) res = -res;
+ return res;
+}
+
+/*
+** Some powers of 64. These constants are needed in the
+** sqliteRealToSortable() routine below.
+*/
+#define _64e3 (64.0 * 64.0 * 64.0)
+#define _64e4 (64.0 * 64.0 * 64.0 * 64.0)
+#define _64e15 (_64e3 * _64e4 * _64e4 * _64e4)
+#define _64e16 (_64e4 * _64e4 * _64e4 * _64e4)
+#define _64e63 (_64e15 * _64e16 * _64e16 * _64e16)
+#define _64e64 (_64e16 * _64e16 * _64e16 * _64e16)
+
+/*
+** The following procedure converts a double-precision floating point
+** number into a string. The resulting string has the property that
+** two such strings comparied using strcmp() or memcmp() will give the
+** same results as a numeric comparison of the original floating point
+** numbers.
+**
+** This routine is used to generate database keys from floating point
+** numbers such that the keys sort in the same order as the original
+** floating point numbers even though the keys are compared using
+** memcmp().
+**
+** The calling function should have allocated at least 14 characters
+** of space for the buffer z[].
+*/
+void sqliteRealToSortable(double r, char *z){
+ int neg;
+ int exp;
+ int cnt = 0;
+
+ /* This array maps integers between 0 and 63 into base-64 digits.
+ ** The digits must be chosen such at their ASCII codes are increasing.
+ ** This means we can not use the traditional base-64 digit set. */
+ static const char zDigit[] =
+ "0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "|~";
+ if( r<0.0 ){
+ neg = 1;
+ r = -r;
+ *z++ = '-';
+ } else {
+ neg = 0;
+ *z++ = '0';
+ }
+ exp = 0;
+
+ if( r==0.0 ){
+ exp = -1024;
+ }else if( r<(0.5/64.0) ){
+ while( r < 0.5/_64e64 && exp > -961 ){ r *= _64e64; exp -= 64; }
+ while( r < 0.5/_64e16 && exp > -1009 ){ r *= _64e16; exp -= 16; }
+ while( r < 0.5/_64e4 && exp > -1021 ){ r *= _64e4; exp -= 4; }
+ while( r < 0.5/64.0 && exp > -1024 ){ r *= 64.0; exp -= 1; }
+ }else if( r>=0.5 ){
+ while( r >= 0.5*_64e63 && exp < 960 ){ r *= 1.0/_64e64; exp += 64; }
+ while( r >= 0.5*_64e15 && exp < 1008 ){ r *= 1.0/_64e16; exp += 16; }
+ while( r >= 0.5*_64e3 && exp < 1020 ){ r *= 1.0/_64e4; exp += 4; }
+ while( r >= 0.5 && exp < 1023 ){ r *= 1.0/64.0; exp += 1; }
+ }
+ if( neg ){
+ exp = -exp;
+ r = -r;
+ }
+ exp += 1024;
+ r += 0.5;
+ if( exp<0 ) return;
+ if( exp>=2048 || r>=1.0 ){
+ strcpy(z, "~~~~~~~~~~~~");
+ return;
+ }
+ *z++ = zDigit[(exp>>6)&0x3f];
+ *z++ = zDigit[exp & 0x3f];
+ while( r>0.0 && cnt<10 ){
+ int digit;
+ r *= 64.0;
+ digit = (int)r;
+ assert( digit>=0 && digit<64 );
+ *z++ = zDigit[digit & 0x3f];
+ r -= digit;
+ cnt++;
+ }
+ *z = 0;
+}
+
+#ifdef SQLITE_UTF8
+/*
+** X is a pointer to the first byte of a UTF-8 character. Increment
+** X so that it points to the next character. This only works right
+** if X points to a well-formed UTF-8 string.
+*/
+#define sqliteNextChar(X) while( (0xc0&*++(X))==0x80 ){}
+#define sqliteCharVal(X) sqlite_utf8_to_int(X)
+
+#else /* !defined(SQLITE_UTF8) */
+/*
+** For iso8859 encoding, the next character is just the next byte.
+*/
+#define sqliteNextChar(X) (++(X));
+#define sqliteCharVal(X) ((int)*(X))
+
+#endif /* defined(SQLITE_UTF8) */
+
+
+#ifdef SQLITE_UTF8
+/*
+** Convert the UTF-8 character to which z points into a 31-bit
+** UCS character. This only works right if z points to a well-formed
+** UTF-8 string.
+*/
+static int sqlite_utf8_to_int(const unsigned char *z){
+ int c;
+ static const int initVal[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 0, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 0, 1, 254,
+ 255,
+ };
+ c = initVal[*(z++)];
+ while( (0xc0&*z)==0x80 ){
+ c = (c<<6) | (0x3f&*(z++));
+ }
+ return c;
+}
+#endif
+
+/*
+** Compare two UTF-8 strings for equality where the first string can
+** potentially be a "glob" expression. Return true (1) if they
+** are the same and false (0) if they are different.
+**
+** Globbing rules:
+**
+** '*' Matches any sequence of zero or more characters.
+**
+** '?' Matches exactly one character.
+**
+** [...] Matches one character from the enclosed list of
+** characters.
+**
+** [^...] Matches one character not in the enclosed list.
+**
+** With the [...] and [^...] matching, a ']' character can be included
+** in the list by making it the first character after '[' or '^'. A
+** range of characters can be specified using '-'. Example:
+** "[a-z]" matches any single lower-case letter. To match a '-', make
+** it the last character in the list.
+**
+** This routine is usually quick, but can be N**2 in the worst case.
+**
+** Hints: to match '*' or '?', put them in "[]". Like this:
+**
+** abc[*]xyz Matches "abc*xyz" only
+*/
+int
+sqliteGlobCompare(const unsigned char *zPattern, const unsigned char *zString){
+ register int c;
+ int invert;
+ int seen;
+ int c2;
+
+ while( (c = *zPattern)!=0 ){
+ switch( c ){
+ case '*':
+ while( (c=zPattern[1]) == '*' || c == '?' ){
+ if( c=='?' ){
+ if( *zString==0 ) return 0;
+ sqliteNextChar(zString);
+ }
+ zPattern++;
+ }
+ if( c==0 ) return 1;
+ if( c=='[' ){
+ while( *zString && sqliteGlobCompare(&zPattern[1],zString)==0 ){
+ sqliteNextChar(zString);
+ }
+ return *zString!=0;
+ }else{
+ while( (c2 = *zString)!=0 ){
+ while( c2 != 0 && c2 != c ){ c2 = *++zString; }
+ if( c2==0 ) return 0;
+ if( sqliteGlobCompare(&zPattern[1],zString) ) return 1;
+ sqliteNextChar(zString);
+ }
+ return 0;
+ }
+ case '?': {
+ if( *zString==0 ) return 0;
+ sqliteNextChar(zString);
+ zPattern++;
+ break;
+ }
+ case '[': {
+ int prior_c = 0;
+ seen = 0;
+ invert = 0;
+ c = sqliteCharVal(zString);
+ if( c==0 ) return 0;
+ c2 = *++zPattern;
+ if( c2=='^' ){ invert = 1; c2 = *++zPattern; }
+ if( c2==']' ){
+ if( c==']' ) seen = 1;
+ c2 = *++zPattern;
+ }
+ while( (c2 = sqliteCharVal(zPattern))!=0 && c2!=']' ){
+ if( c2=='-' && zPattern[1]!=']' && zPattern[1]!=0 && prior_c>0 ){
+ zPattern++;
+ c2 = sqliteCharVal(zPattern);
+ if( c>=prior_c && c<=c2 ) seen = 1;
+ prior_c = 0;
+ }else if( c==c2 ){
+ seen = 1;
+ prior_c = c2;
+ }else{
+ prior_c = c2;
+ }
+ sqliteNextChar(zPattern);
+ }
+ if( c2==0 || (seen ^ invert)==0 ) return 0;
+ sqliteNextChar(zString);
+ zPattern++;
+ break;
+ }
+ default: {
+ if( c != *zString ) return 0;
+ zPattern++;
+ zString++;
+ break;
+ }
+ }
+ }
+ return *zString==0;
+}
+
+/*
+** Compare two UTF-8 strings for equality using the "LIKE" operator of
+** SQL. The '%' character matches any sequence of 0 or more
+** characters and '_' matches any single character. Case is
+** not significant.
+**
+** This routine is just an adaptation of the sqliteGlobCompare()
+** routine above.
+*/
+int
+sqliteLikeCompare(const unsigned char *zPattern, const unsigned char *zString){
+ register int c;
+ int c2;
+
+ while( (c = UpperToLower[*zPattern])!=0 ){
+ switch( c ){
+ case '%': {
+ while( (c=zPattern[1]) == '%' || c == '_' ){
+ if( c=='_' ){
+ if( *zString==0 ) return 0;
+ sqliteNextChar(zString);
+ }
+ zPattern++;
+ }
+ if( c==0 ) return 1;
+ c = UpperToLower[c];
+ while( (c2=UpperToLower[*zString])!=0 ){
+ while( c2 != 0 && c2 != c ){ c2 = UpperToLower[*++zString]; }
+ if( c2==0 ) return 0;
+ if( sqliteLikeCompare(&zPattern[1],zString) ) return 1;
+ sqliteNextChar(zString);
+ }
+ return 0;
+ }
+ case '_': {
+ if( *zString==0 ) return 0;
+ sqliteNextChar(zString);
+ zPattern++;
+ break;
+ }
+ default: {
+ if( c != UpperToLower[*zString] ) return 0;
+ zPattern++;
+ zString++;
+ break;
+ }
+ }
+ }
+ return *zString==0;
+}
+
+/*
+** Change the sqlite.magic from SQLITE_MAGIC_OPEN to SQLITE_MAGIC_BUSY.
+** Return an error (non-zero) if the magic was not SQLITE_MAGIC_OPEN
+** when this routine is called.
+**
+** This routine is a attempt to detect if two threads use the
+** same sqlite* pointer at the same time. There is a race
+** condition so it is possible that the error is not detected.
+** But usually the problem will be seen. The result will be an
+** error which can be used to debug the application that is
+** using SQLite incorrectly.
+**
+** Ticket #202: If db->magic is not a valid open value, take care not
+** to modify the db structure at all. It could be that db is a stale
+** pointer. In other words, it could be that there has been a prior
+** call to sqlite_close(db) and db has been deallocated. And we do
+** not want to write into deallocated memory.
+*/
+int sqliteSafetyOn(sqlite *db){
+ if( db->magic==SQLITE_MAGIC_OPEN ){
+ db->magic = SQLITE_MAGIC_BUSY;
+ return 0;
+ }else if( db->magic==SQLITE_MAGIC_BUSY || db->magic==SQLITE_MAGIC_ERROR
+ || db->want_to_close ){
+ db->magic = SQLITE_MAGIC_ERROR;
+ db->flags |= SQLITE_Interrupt;
+ }
+ return 1;
+}
+
+/*
+** Change the magic from SQLITE_MAGIC_BUSY to SQLITE_MAGIC_OPEN.
+** Return an error (non-zero) if the magic was not SQLITE_MAGIC_BUSY
+** when this routine is called.
+*/
+int sqliteSafetyOff(sqlite *db){
+ if( db->magic==SQLITE_MAGIC_BUSY ){
+ db->magic = SQLITE_MAGIC_OPEN;
+ return 0;
+ }else if( db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_ERROR
+ || db->want_to_close ){
+ db->magic = SQLITE_MAGIC_ERROR;
+ db->flags |= SQLITE_Interrupt;
+ }
+ return 1;
+}
+
+/*
+** Check to make sure we are not currently executing an sqlite_exec().
+** If we are currently in an sqlite_exec(), return true and set
+** sqlite.magic to SQLITE_MAGIC_ERROR. This will cause a complete
+** shutdown of the database.
+**
+** This routine is used to try to detect when API routines are called
+** at the wrong time or in the wrong sequence.
+*/
+int sqliteSafetyCheck(sqlite *db){
+ if( db->pVdbe!=0 ){
+ db->magic = SQLITE_MAGIC_ERROR;
+ return 1;
+ }
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/vacuum.c b/usr/src/cmd/svc/configd/sqlite/src/vacuum.c
new file mode 100644
index 0000000000..ba425a7916
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/vacuum.c
@@ -0,0 +1,330 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the VACUUM command.
+**
+** Most of the code in this file may be omitted by defining the
+** SQLITE_OMIT_VACUUM macro.
+**
+** $Id: vacuum.c,v 1.13.2.2 2004/06/04 19:07:54 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "os.h"
+
+/*
+** A structure for holding a dynamic string - a string that can grow
+** without bound.
+*/
+typedef struct dynStr dynStr;
+struct dynStr {
+ char *z; /* Text of the string in space obtained from sqliteMalloc() */
+ int nAlloc; /* Amount of space allocated to z[] */
+ int nUsed; /* Next unused slot in z[] */
+};
+
+/*
+** A structure that holds the vacuum context
+*/
+typedef struct vacuumStruct vacuumStruct;
+struct vacuumStruct {
+ sqlite *dbOld; /* Original database */
+ sqlite *dbNew; /* New database */
+ char **pzErrMsg; /* Write errors here */
+ int rc; /* Set to non-zero on an error */
+ const char *zTable; /* Name of a table being copied */
+ const char *zPragma; /* Pragma to execute with results */
+ dynStr s1, s2; /* Two dynamic strings */
+};
+
+#if !defined(SQLITE_OMIT_VACUUM) || SQLITE_OMIT_VACUUM
+/*
+** Append text to a dynamic string
+*/
+static void appendText(dynStr *p, const char *zText, int nText){
+ if( nText<0 ) nText = strlen(zText);
+ if( p->z==0 || p->nUsed + nText + 1 >= p->nAlloc ){
+ char *zNew;
+ p->nAlloc = p->nUsed + nText + 1000;
+ zNew = sqliteRealloc(p->z, p->nAlloc);
+ if( zNew==0 ){
+ sqliteFree(p->z);
+ memset(p, 0, sizeof(*p));
+ return;
+ }
+ p->z = zNew;
+ }
+ memcpy(&p->z[p->nUsed], zText, nText+1);
+ p->nUsed += nText;
+}
+
+/*
+** Append text to a dynamic string, having first put the text in quotes.
+*/
+static void appendQuoted(dynStr *p, const char *zText){
+ int i, j;
+ appendText(p, "'", 1);
+ for(i=j=0; zText[i]; i++){
+ if( zText[i]=='\'' ){
+ appendText(p, &zText[j], i-j+1);
+ j = i + 1;
+ appendText(p, "'", 1);
+ }
+ }
+ if( j<i ){
+ appendText(p, &zText[j], i-j);
+ }
+ appendText(p, "'", 1);
+}
+
+/*
+** Execute statements of SQL. If an error occurs, write the error
+** message into *pzErrMsg and return non-zero.
+*/
+static int execsql(char **pzErrMsg, sqlite *db, const char *zSql){
+ char *zErrMsg = 0;
+ int rc;
+
+ /* printf("***** executing *****\n%s\n", zSql); */
+ rc = sqlite_exec(db, zSql, 0, 0, &zErrMsg);
+ if( zErrMsg ){
+ sqliteSetString(pzErrMsg, zErrMsg, (char*)0);
+ sqlite_freemem(zErrMsg);
+ }
+ return rc;
+}
+
+/*
+** This is the second stage callback. Each invocation contains all the
+** data for a single row of a single table in the original database. This
+** routine must write that information into the new database.
+*/
+static int vacuumCallback2(void *pArg, int argc, char **argv, char **NotUsed){
+ vacuumStruct *p = (vacuumStruct*)pArg;
+ const char *zSep = "(";
+ int i;
+
+ if( argv==0 ) return 0;
+ p->s2.nUsed = 0;
+ appendText(&p->s2, "INSERT INTO ", -1);
+ appendQuoted(&p->s2, p->zTable);
+ appendText(&p->s2, " VALUES", -1);
+ for(i=0; i<argc; i++){
+ appendText(&p->s2, zSep, 1);
+ zSep = ",";
+ if( argv[i]==0 ){
+ appendText(&p->s2, "NULL", 4);
+ }else{
+ appendQuoted(&p->s2, argv[i]);
+ }
+ }
+ appendText(&p->s2,")", 1);
+ p->rc = execsql(p->pzErrMsg, p->dbNew, p->s2.z);
+ return p->rc;
+}
+
+/*
+** This is the first stage callback. Each invocation contains three
+** arguments where are taken from the SQLITE_MASTER table of the original
+** database: (1) the entry type, (2) the entry name, and (3) the SQL for
+** the entry. In all cases, execute the SQL of the third argument.
+** For tables, run a query to select all entries in that table and
+** transfer them to the second-stage callback.
+*/
+static int vacuumCallback1(void *pArg, int argc, char **argv, char **NotUsed){
+ vacuumStruct *p = (vacuumStruct*)pArg;
+ int rc = 0;
+ assert( argc==3 );
+ if( argv==0 ) return 0;
+ assert( argv[0]!=0 );
+ assert( argv[1]!=0 );
+ assert( argv[2]!=0 );
+ rc = execsql(p->pzErrMsg, p->dbNew, argv[2]);
+ if( rc==SQLITE_OK && strcmp(argv[0],"table")==0 ){
+ char *zErrMsg = 0;
+ p->s1.nUsed = 0;
+ appendText(&p->s1, "SELECT * FROM ", -1);
+ appendQuoted(&p->s1, argv[1]);
+ p->zTable = argv[1];
+ rc = sqlite_exec(p->dbOld, p->s1.z, vacuumCallback2, p, &zErrMsg);
+ if( zErrMsg ){
+ sqliteSetString(p->pzErrMsg, zErrMsg, (char*)0);
+ sqlite_freemem(zErrMsg);
+ }
+ }
+ if( rc!=SQLITE_ABORT ) p->rc = rc;
+ return rc;
+}
+
+/*
+** This callback is used to transfer PRAGMA settings from one database
+** to the other. The value in argv[0] should be passed to a pragma
+** identified by ((vacuumStruct*)pArg)->zPragma.
+*/
+static int vacuumCallback3(void *pArg, int argc, char **argv, char **NotUsed){
+ vacuumStruct *p = (vacuumStruct*)pArg;
+ char zBuf[200];
+ assert( argc==1 );
+ if( argv==0 ) return 0;
+ assert( argv[0]!=0 );
+ assert( strlen(p->zPragma)<100 );
+ assert( strlen(argv[0])<30 );
+ sprintf(zBuf,"PRAGMA %s=%s;", p->zPragma, argv[0]);
+ p->rc = execsql(p->pzErrMsg, p->dbNew, zBuf);
+ return p->rc;
+}
+
+/*
+** Generate a random name of 20 character in length.
+*/
+static void randomName(unsigned char *zBuf){
+ static const unsigned char zChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789";
+ int i;
+ sqliteRandomness(20, zBuf);
+ for(i=0; i<20; i++){
+ zBuf[i] = zChars[ zBuf[i]%(sizeof(zChars)-1) ];
+ }
+}
+#endif
+
+/*
+** The non-standard VACUUM command is used to clean up the database,
+** collapse free space, etc. It is modelled after the VACUUM command
+** in PostgreSQL.
+**
+** In version 1.0.x of SQLite, the VACUUM command would call
+** gdbm_reorganize() on all the database tables. But beginning
+** with 2.0.0, SQLite no longer uses GDBM so this command has
+** become a no-op.
+*/
+void sqliteVacuum(Parse *pParse, Token *pTableName){
+ Vdbe *v = sqliteGetVdbe(pParse);
+ sqliteVdbeAddOp(v, OP_Vacuum, 0, 0);
+ return;
+}
+
+/*
+** This routine implements the OP_Vacuum opcode of the VDBE.
+*/
+int sqliteRunVacuum(char **pzErrMsg, sqlite *db){
+#if !defined(SQLITE_OMIT_VACUUM) || SQLITE_OMIT_VACUUM
+ const char *zFilename; /* full pathname of the database file */
+ int nFilename; /* number of characters in zFilename[] */
+ char *zTemp = 0; /* a temporary file in same directory as zFilename */
+ sqlite *dbNew = 0; /* The new vacuumed database */
+ int rc = SQLITE_OK; /* Return code from service routines */
+ int i; /* Loop counter */
+ char *zErrMsg; /* Error message */
+ vacuumStruct sVac; /* Information passed to callbacks */
+
+ /* These are all of the pragmas that need to be transferred over
+ ** to the new database */
+ static const char *zPragma[] = {
+ "default_synchronous",
+ "default_cache_size",
+ /* "default_temp_store", */
+ };
+
+ if( db->flags & SQLITE_InTrans ){
+ sqliteSetString(pzErrMsg, "cannot VACUUM from within a transaction",
+ (char*)0);
+ return SQLITE_ERROR;
+ }
+ if( db->flags & SQLITE_Interrupt ){
+ return SQLITE_INTERRUPT;
+ }
+ memset(&sVac, 0, sizeof(sVac));
+
+ /* Get the full pathname of the database file and create two
+ ** temporary filenames in the same directory as the original file.
+ */
+ zFilename = sqliteBtreeGetFilename(db->aDb[0].pBt);
+ if( zFilename==0 ){
+ /* This only happens with the in-memory database. VACUUM is a no-op
+ ** there, so just return */
+ return SQLITE_OK;
+ }
+ nFilename = strlen(zFilename);
+ zTemp = sqliteMalloc( nFilename+100 );
+ if( zTemp==0 ) return SQLITE_NOMEM;
+ strcpy(zTemp, zFilename);
+ for(i=0; i<10; i++){
+ zTemp[nFilename] = '-';
+ randomName((unsigned char*)&zTemp[nFilename+1]);
+ if( !sqliteOsFileExists(zTemp) ) break;
+ }
+ if( i>=10 ){
+ sqliteSetString(pzErrMsg, "unable to create a temporary database file "
+ "in the same directory as the original database", (char*)0);
+ goto end_of_vacuum;
+ }
+
+
+ dbNew = sqlite_open(zTemp, 0, &zErrMsg);
+ if( dbNew==0 ){
+ sqliteSetString(pzErrMsg, "unable to open a temporary database at ",
+ zTemp, " - ", zErrMsg, (char*)0);
+ goto end_of_vacuum;
+ }
+ if( (rc = execsql(pzErrMsg, db, "BEGIN"))!=0 ) goto end_of_vacuum;
+ if( (rc = execsql(pzErrMsg, dbNew, "PRAGMA synchronous=off; BEGIN"))!=0 ){
+ goto end_of_vacuum;
+ }
+
+ sVac.dbOld = db;
+ sVac.dbNew = dbNew;
+ sVac.pzErrMsg = pzErrMsg;
+ for(i=0; rc==SQLITE_OK && i<sizeof(zPragma)/sizeof(zPragma[0]); i++){
+ char zBuf[200];
+ assert( strlen(zPragma[i])<100 );
+ sprintf(zBuf, "PRAGMA %s;", zPragma[i]);
+ sVac.zPragma = zPragma[i];
+ rc = sqlite_exec(db, zBuf, vacuumCallback3, &sVac, &zErrMsg);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite_exec(db,
+ "SELECT type, name, sql FROM sqlite_master "
+ "WHERE sql NOT NULL AND type!='view' "
+ "UNION ALL "
+ "SELECT type, name, sql FROM sqlite_master "
+ "WHERE sql NOT NULL AND type=='view'",
+ vacuumCallback1, &sVac, &zErrMsg);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqliteBtreeCopyFile(db->aDb[0].pBt, dbNew->aDb[0].pBt);
+ sqlite_exec(db, "COMMIT", 0, 0, 0);
+ sqliteResetInternalSchema(db, 0);
+ }
+
+end_of_vacuum:
+ if( rc && zErrMsg!=0 ){
+ sqliteSetString(pzErrMsg, "unable to vacuum database - ",
+ zErrMsg, (char*)0);
+ }
+ sqlite_exec(db, "ROLLBACK", 0, 0, 0);
+ if( (dbNew && (dbNew->flags & SQLITE_Interrupt))
+ || (db->flags & SQLITE_Interrupt) ){
+ rc = SQLITE_INTERRUPT;
+ }
+ if( dbNew ) sqlite_close(dbNew);
+ sqliteOsDelete(zTemp);
+ sqliteFree(zTemp);
+ sqliteFree(sVac.s1.z);
+ sqliteFree(sVac.s2.z);
+ if( zErrMsg ) sqlite_freemem(zErrMsg);
+ if( rc==SQLITE_ABORT && sVac.rc!=SQLITE_INTERRUPT ) sVac.rc = SQLITE_ERROR;
+ return sVac.rc;
+#endif
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/vdbe.c b/usr/src/cmd/svc/configd/sqlite/src/vdbe.c
new file mode 100644
index 0000000000..b4a501463e
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/vdbe.c
@@ -0,0 +1,4928 @@
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** The code in this file implements execution method of the
+** Virtual Database Engine (VDBE). A separate file ("vdbeaux.c")
+** handles housekeeping details such as creating and deleting
+** VDBE instances. This file is solely interested in executing
+** the VDBE program.
+**
+** In the external interface, an "sqlite_vm*" is an opaque pointer
+** to a VDBE.
+**
+** The SQL parser generates a program which is then executed by
+** the VDBE to do the work of the SQL statement. VDBE programs are
+** similar in form to assembly language. The program consists of
+** a linear sequence of operations. Each operation has an opcode
+** and 3 operands. Operands P1 and P2 are integers. Operand P3
+** is a null-terminated string. The P2 operand must be non-negative.
+** Opcodes will typically ignore one or more operands. Many opcodes
+** ignore all three operands.
+**
+** Computation results are stored on a stack. Each entry on the
+** stack is either an integer, a null-terminated string, a floating point
+** number, or the SQL "NULL" value. An inplicit conversion from one
+** type to the other occurs as necessary.
+**
+** Most of the code in this file is taken up by the sqliteVdbeExec()
+** function which does the work of interpreting a VDBE program.
+** But other routines are also provided to help in building up
+** a program instruction by instruction.
+**
+** Various scripts scan this source file in order to generate HTML
+** documentation, headers files, or other derived files. The formatting
+** of the code in this file is, therefore, important. See other comments
+** in this file for details. If in doubt, do not deviate from existing
+** commenting and indentation practices when changing or adding code.
+**
+** $Id: vdbe.c,v 1.268.2.3 2004/07/19 19:30:50 drh Exp $
+*/
+#include "sqliteInt.h"
+#include "os.h"
+#include <ctype.h>
+#include "vdbeInt.h"
+
+/*
+** The following global variable is incremented every time a cursor
+** moves, either by the OP_MoveTo or the OP_Next opcode. The test
+** procedures use this information to make sure that indices are
+** working correctly. This variable has no function other than to
+** help verify the correct operation of the library.
+*/
+int sqlite_search_count = 0;
+
+/*
+** When this global variable is positive, it gets decremented once before
+** each instruction in the VDBE. When reaches zero, the SQLITE_Interrupt
+** of the db.flags field is set in order to simulate an interrupt.
+**
+** This facility is used for testing purposes only. It does not function
+** in an ordinary build.
+*/
+int sqlite_interrupt_count = 0;
+
+/*
+** Advance the virtual machine to the next output row.
+**
+** The return vale will be either SQLITE_BUSY, SQLITE_DONE,
+** SQLITE_ROW, SQLITE_ERROR, or SQLITE_MISUSE.
+**
+** SQLITE_BUSY means that the virtual machine attempted to open
+** a locked database and there is no busy callback registered.
+** Call sqlite_step() again to retry the open. *pN is set to 0
+** and *pazColName and *pazValue are both set to NULL.
+**
+** SQLITE_DONE means that the virtual machine has finished
+** executing. sqlite_step() should not be called again on this
+** virtual machine. *pN and *pazColName are set appropriately
+** but *pazValue is set to NULL.
+**
+** SQLITE_ROW means that the virtual machine has generated another
+** row of the result set. *pN is set to the number of columns in
+** the row. *pazColName is set to the names of the columns followed
+** by the column datatypes. *pazValue is set to the values of each
+** column in the row. The value of the i-th column is (*pazValue)[i].
+** The name of the i-th column is (*pazColName)[i] and the datatype
+** of the i-th column is (*pazColName)[i+*pN].
+**
+** SQLITE_ERROR means that a run-time error (such as a constraint
+** violation) has occurred. The details of the error will be returned
+** by the next call to sqlite_finalize(). sqlite_step() should not
+** be called again on the VM.
+**
+** SQLITE_MISUSE means that the this routine was called inappropriately.
+** Perhaps it was called on a virtual machine that had already been
+** finalized or on one that had previously returned SQLITE_ERROR or
+** SQLITE_DONE. Or it could be the case the the same database connection
+** is being used simulataneously by two or more threads.
+*/
+int sqlite_step(
+ sqlite_vm *pVm, /* The virtual machine to execute */
+ int *pN, /* OUT: Number of columns in result */
+ const char ***pazValue, /* OUT: Column data */
+ const char ***pazColName /* OUT: Column names and datatypes */
+){
+ Vdbe *p = (Vdbe*)pVm;
+ sqlite *db;
+ int rc;
+
+ if( p->magic!=VDBE_MAGIC_RUN ){
+ return SQLITE_MISUSE;
+ }
+ db = p->db;
+ if( sqliteSafetyOn(db) ){
+ p->rc = SQLITE_MISUSE;
+ return SQLITE_MISUSE;
+ }
+ if( p->explain ){
+ rc = sqliteVdbeList(p);
+ }else{
+ rc = sqliteVdbeExec(p);
+ }
+ if( rc==SQLITE_DONE || rc==SQLITE_ROW ){
+ if( pazColName ) *pazColName = (const char**)p->azColName;
+ if( pN ) *pN = p->nResColumn;
+ }else{
+ if( pazColName) *pazColName = 0;
+ if( pN ) *pN = 0;
+ }
+ if( pazValue ){
+ if( rc==SQLITE_ROW ){
+ *pazValue = (const char**)p->azResColumn;
+ }else{
+ *pazValue = 0;
+ }
+ }
+ if( sqliteSafetyOff(db) ){
+ return SQLITE_MISUSE;
+ }
+ return rc;
+}
+
+/*
+** Insert a new aggregate element and make it the element that
+** has focus.
+**
+** Return 0 on success and 1 if memory is exhausted.
+*/
+static int AggInsert(Agg *p, char *zKey, int nKey){
+ AggElem *pElem, *pOld;
+ int i;
+ Mem *pMem;
+ pElem = sqliteMalloc( sizeof(AggElem) + nKey +
+ (p->nMem-1)*sizeof(pElem->aMem[0]) );
+ if( pElem==0 ) return 1;
+ pElem->zKey = (char*)&pElem->aMem[p->nMem];
+ memcpy(pElem->zKey, zKey, nKey);
+ pElem->nKey = nKey;
+ pOld = sqliteHashInsert(&p->hash, pElem->zKey, pElem->nKey, pElem);
+ if( pOld!=0 ){
+ assert( pOld==pElem ); /* Malloc failed on insert */
+ sqliteFree(pOld);
+ return 0;
+ }
+ for(i=0, pMem=pElem->aMem; i<p->nMem; i++, pMem++){
+ pMem->flags = MEM_Null;
+ }
+ p->pCurrent = pElem;
+ return 0;
+}
+
+/*
+** Get the AggElem currently in focus
+*/
+#define AggInFocus(P) ((P).pCurrent ? (P).pCurrent : _AggInFocus(&(P)))
+static AggElem *_AggInFocus(Agg *p){
+ HashElem *pElem = sqliteHashFirst(&p->hash);
+ if( pElem==0 ){
+ AggInsert(p,"",1);
+ pElem = sqliteHashFirst(&p->hash);
+ }
+ return pElem ? sqliteHashData(pElem) : 0;
+}
+
+/*
+** Convert the given stack entity into a string if it isn't one
+** already.
+*/
+#define Stringify(P) if(((P)->flags & MEM_Str)==0){hardStringify(P);}
+static int hardStringify(Mem *pStack){
+ int fg = pStack->flags;
+ if( fg & MEM_Real ){
+ sqlite_snprintf(sizeof(pStack->zShort),pStack->zShort,"%.15g",pStack->r);
+ }else if( fg & MEM_Int ){
+ sqlite_snprintf(sizeof(pStack->zShort),pStack->zShort,"%d",pStack->i);
+ }else{
+ pStack->zShort[0] = 0;
+ }
+ pStack->z = pStack->zShort;
+ pStack->n = strlen(pStack->zShort)+1;
+ pStack->flags = MEM_Str | MEM_Short;
+ return 0;
+}
+
+/*
+** Convert the given stack entity into a string that has been obtained
+** from sqliteMalloc(). This is different from Stringify() above in that
+** Stringify() will use the NBFS bytes of static string space if the string
+** will fit but this routine always mallocs for space.
+** Return non-zero if we run out of memory.
+*/
+#define Dynamicify(P) (((P)->flags & MEM_Dyn)==0 ? hardDynamicify(P):0)
+static int hardDynamicify(Mem *pStack){
+ int fg = pStack->flags;
+ char *z;
+ if( (fg & MEM_Str)==0 ){
+ hardStringify(pStack);
+ }
+ assert( (fg & MEM_Dyn)==0 );
+ z = sqliteMallocRaw( pStack->n );
+ if( z==0 ) return 1;
+ memcpy(z, pStack->z, pStack->n);
+ pStack->z = z;
+ pStack->flags |= MEM_Dyn;
+ return 0;
+}
+
+/*
+** An ephemeral string value (signified by the MEM_Ephem flag) contains
+** a pointer to a dynamically allocated string where some other entity
+** is responsible for deallocating that string. Because the stack entry
+** does not control the string, it might be deleted without the stack
+** entry knowing it.
+**
+** This routine converts an ephemeral string into a dynamically allocated
+** string that the stack entry itself controls. In other words, it
+** converts an MEM_Ephem string into an MEM_Dyn string.
+*/
+#define Deephemeralize(P) \
+ if( ((P)->flags&MEM_Ephem)!=0 && hardDeephem(P) ){ goto no_mem;}
+static int hardDeephem(Mem *pStack){
+ char *z;
+ assert( (pStack->flags & MEM_Ephem)!=0 );
+ z = sqliteMallocRaw( pStack->n );
+ if( z==0 ) return 1;
+ memcpy(z, pStack->z, pStack->n);
+ pStack->z = z;
+ pStack->flags &= ~MEM_Ephem;
+ pStack->flags |= MEM_Dyn;
+ return 0;
+}
+
+/*
+** Release the memory associated with the given stack level. This
+** leaves the Mem.flags field in an inconsistent state.
+*/
+#define Release(P) \
+ if ((P)->flags & MEM_Dyn) { \
+ sqliteFree((P)->z); \
+ (P)->z = NULL; \
+ }
+
+/*
+** Pop the stack N times.
+*/
+static void popStack(Mem **ppTos, int N){
+ Mem *pTos = *ppTos;
+ while( N>0 ){
+ N--;
+ Release(pTos);
+ pTos--;
+ }
+ *ppTos = pTos;
+}
+
+/*
+** Return TRUE if zNum is a 32-bit signed integer and write
+** the value of the integer into *pNum. If zNum is not an integer
+** or is an integer that is too large to be expressed with just 32
+** bits, then return false.
+**
+** Under Linux (RedHat 7.2) this routine is much faster than atoi()
+** for converting strings into integers.
+*/
+static int toInt(const char *zNum, int *pNum){
+ int v = 0;
+ int neg;
+ int i, c;
+ if( *zNum=='-' ){
+ neg = 1;
+ zNum++;
+ }else if( *zNum=='+' ){
+ neg = 0;
+ zNum++;
+ }else{
+ neg = 0;
+ }
+ for(i=0; (c=zNum[i])>='0' && c<='9'; i++){
+ v = v*10 + c - '0';
+ }
+ *pNum = neg ? -v : v;
+ return c==0 && i>0 && (i<10 || (i==10 && memcmp(zNum,"2147483647",10)<=0));
+}
+
+/*
+** Convert the given stack entity into a integer if it isn't one
+** already.
+**
+** Any prior string or real representation is invalidated.
+** NULLs are converted into 0.
+*/
+#define Integerify(P) if(((P)->flags&MEM_Int)==0){ hardIntegerify(P); }
+static void hardIntegerify(Mem *pStack){
+ if( pStack->flags & MEM_Real ){
+ pStack->i = (int)pStack->r;
+ Release(pStack);
+ }else if( pStack->flags & MEM_Str ){
+ toInt(pStack->z, &pStack->i);
+ Release(pStack);
+ }else{
+ pStack->i = 0;
+ }
+ pStack->flags = MEM_Int;
+}
+
+/*
+** Get a valid Real representation for the given stack element.
+**
+** Any prior string or integer representation is retained.
+** NULLs are converted into 0.0.
+*/
+#define Realify(P) if(((P)->flags&MEM_Real)==0){ hardRealify(P); }
+static void hardRealify(Mem *pStack){
+ if( pStack->flags & MEM_Str ){
+ pStack->r = sqliteAtoF(pStack->z, 0);
+ }else if( pStack->flags & MEM_Int ){
+ pStack->r = pStack->i;
+ }else{
+ pStack->r = 0.0;
+ }
+ pStack->flags |= MEM_Real;
+}
+
+/*
+** The parameters are pointers to the head of two sorted lists
+** of Sorter structures. Merge these two lists together and return
+** a single sorted list. This routine forms the core of the merge-sort
+** algorithm.
+**
+** In the case of a tie, left sorts in front of right.
+*/
+static Sorter *Merge(Sorter *pLeft, Sorter *pRight){
+ Sorter sHead;
+ Sorter *pTail;
+ pTail = &sHead;
+ pTail->pNext = 0;
+ while( pLeft && pRight ){
+ int c = sqliteSortCompare(pLeft->zKey, pRight->zKey);
+ if( c<=0 ){
+ pTail->pNext = pLeft;
+ pLeft = pLeft->pNext;
+ }else{
+ pTail->pNext = pRight;
+ pRight = pRight->pNext;
+ }
+ pTail = pTail->pNext;
+ }
+ if( pLeft ){
+ pTail->pNext = pLeft;
+ }else if( pRight ){
+ pTail->pNext = pRight;
+ }
+ return sHead.pNext;
+}
+
+/*
+** The following routine works like a replacement for the standard
+** library routine fgets(). The difference is in how end-of-line (EOL)
+** is handled. Standard fgets() uses LF for EOL under unix, CRLF
+** under windows, and CR under mac. This routine accepts any of these
+** character sequences as an EOL mark. The EOL mark is replaced by
+** a single LF character in zBuf.
+*/
+static char *vdbe_fgets(char *zBuf, int nBuf, FILE *in){
+ int i, c;
+ for(i=0; i<nBuf-1 && (c=getc(in))!=EOF; i++){
+ zBuf[i] = c;
+ if( c=='\r' || c=='\n' ){
+ if( c=='\r' ){
+ zBuf[i] = '\n';
+ c = getc(in);
+ if( c!=EOF && c!='\n' ) ungetc(c, in);
+ }
+ i++;
+ break;
+ }
+ }
+ zBuf[i] = 0;
+ return i>0 ? zBuf : 0;
+}
+
+/*
+** Make sure there is space in the Vdbe structure to hold at least
+** mxCursor cursors. If there is not currently enough space, then
+** allocate more.
+**
+** If a memory allocation error occurs, return 1. Return 0 if
+** everything works.
+*/
+static int expandCursorArraySize(Vdbe *p, int mxCursor){
+ if( mxCursor>=p->nCursor ){
+ Cursor *aCsr = sqliteRealloc( p->aCsr, (mxCursor+1)*sizeof(Cursor) );
+ if( aCsr==0 ) return 1;
+ p->aCsr = aCsr;
+ memset(&p->aCsr[p->nCursor], 0, sizeof(Cursor)*(mxCursor+1-p->nCursor));
+ p->nCursor = mxCursor+1;
+ }
+ return 0;
+}
+
+#ifdef VDBE_PROFILE
+/*
+** The following routine only works on pentium-class processors.
+** It uses the RDTSC opcode to read cycle count value out of the
+** processor and returns that value. This can be used for high-res
+** profiling.
+*/
+__inline__ unsigned long long int hwtime(void){
+ unsigned long long int x;
+ __asm__("rdtsc\n\t"
+ "mov %%edx, %%ecx\n\t"
+ :"=A" (x));
+ return x;
+}
+#endif
+
+/*
+** The CHECK_FOR_INTERRUPT macro defined here looks to see if the
+** sqlite_interrupt() routine has been called. If it has been, then
+** processing of the VDBE program is interrupted.
+**
+** This macro added to every instruction that does a jump in order to
+** implement a loop. This test used to be on every single instruction,
+** but that meant we more testing that we needed. By only testing the
+** flag on jump instructions, we get a (small) speed improvement.
+*/
+#define CHECK_FOR_INTERRUPT \
+ if( db->flags & SQLITE_Interrupt ) goto abort_due_to_interrupt;
+
+
+/*
+** Execute as much of a VDBE program as we can then return.
+**
+** sqliteVdbeMakeReady() must be called before this routine in order to
+** close the program with a final OP_Halt and to set up the callbacks
+** and the error message pointer.
+**
+** Whenever a row or result data is available, this routine will either
+** invoke the result callback (if there is one) or return with
+** SQLITE_ROW.
+**
+** If an attempt is made to open a locked database, then this routine
+** will either invoke the busy callback (if there is one) or it will
+** return SQLITE_BUSY.
+**
+** If an error occurs, an error message is written to memory obtained
+** from sqliteMalloc() and p->zErrMsg is made to point to that memory.
+** The error code is stored in p->rc and this routine returns SQLITE_ERROR.
+**
+** If the callback ever returns non-zero, then the program exits
+** immediately. There will be no error message but the p->rc field is
+** set to SQLITE_ABORT and this routine will return SQLITE_ERROR.
+**
+** A memory allocation error causes p->rc to be set to SQLITE_NOMEM and this
+** routine to return SQLITE_ERROR.
+**
+** Other fatal errors return SQLITE_ERROR.
+**
+** After this routine has finished, sqliteVdbeFinalize() should be
+** used to clean up the mess that was left behind.
+*/
+int sqliteVdbeExec(
+ Vdbe *p /* The VDBE */
+){
+ int pc; /* The program counter */
+ Op *pOp; /* Current operation */
+ int rc = SQLITE_OK; /* Value to return */
+ sqlite *db = p->db; /* The database */
+ Mem *pTos; /* Top entry in the operand stack */
+ char zBuf[100]; /* Space to sprintf() an integer */
+#ifdef VDBE_PROFILE
+ unsigned long long start; /* CPU clock count at start of opcode */
+ int origPc; /* Program counter at start of opcode */
+#endif
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ int nProgressOps = 0; /* Opcodes executed since progress callback. */
+#endif
+
+ if( p->magic!=VDBE_MAGIC_RUN ) return SQLITE_MISUSE;
+ assert( db->magic==SQLITE_MAGIC_BUSY );
+ assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY );
+ p->rc = SQLITE_OK;
+ assert( p->explain==0 );
+ if( sqlite_malloc_failed ) goto no_mem;
+ pTos = p->pTos;
+ if( p->popStack ){
+ popStack(&pTos, p->popStack);
+ p->popStack = 0;
+ }
+ CHECK_FOR_INTERRUPT;
+ for(pc=p->pc; rc==SQLITE_OK; pc++){
+ assert( pc>=0 && pc<p->nOp );
+ assert( pTos<=&p->aStack[pc] );
+#ifdef VDBE_PROFILE
+ origPc = pc;
+ start = hwtime();
+#endif
+ pOp = &p->aOp[pc];
+
+ /* Only allow tracing if NDEBUG is not defined.
+ */
+#ifndef NDEBUG
+ if( p->trace ){
+ sqliteVdbePrintOp(p->trace, pc, pOp);
+ }
+#endif
+
+ /* Check to see if we need to simulate an interrupt. This only happens
+ ** if we have a special test build.
+ */
+#ifdef SQLITE_TEST
+ if( sqlite_interrupt_count>0 ){
+ sqlite_interrupt_count--;
+ if( sqlite_interrupt_count==0 ){
+ sqlite_interrupt(db);
+ }
+ }
+#endif
+
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ /* Call the progress callback if it is configured and the required number
+ ** of VDBE ops have been executed (either since this invocation of
+ ** sqliteVdbeExec() or since last time the progress callback was called).
+ ** If the progress callback returns non-zero, exit the virtual machine with
+ ** a return code SQLITE_ABORT.
+ */
+ if( db->xProgress ){
+ if( db->nProgressOps==nProgressOps ){
+ if( db->xProgress(db->pProgressArg)!=0 ){
+ rc = SQLITE_ABORT;
+ continue; /* skip to the next iteration of the for loop */
+ }
+ nProgressOps = 0;
+ }
+ nProgressOps++;
+ }
+#endif
+
+ switch( pOp->opcode ){
+
+/*****************************************************************************
+** What follows is a massive switch statement where each case implements a
+** separate instruction in the virtual machine. If we follow the usual
+** indentation conventions, each case should be indented by 6 spaces. But
+** that is a lot of wasted space on the left margin. So the code within
+** the switch statement will break with convention and be flush-left. Another
+** big comment (similar to this one) will mark the point in the code where
+** we transition back to normal indentation.
+**
+** The formatting of each case is important. The makefile for SQLite
+** generates two C files "opcodes.h" and "opcodes.c" by scanning this
+** file looking for lines that begin with "case OP_". The opcodes.h files
+** will be filled with #defines that give unique integer values to each
+** opcode and the opcodes.c file is filled with an array of strings where
+** each string is the symbolic name for the corresponding opcode.
+**
+** Documentation about VDBE opcodes is generated by scanning this file
+** for lines of that contain "Opcode:". That line and all subsequent
+** comment lines are used in the generation of the opcode.html documentation
+** file.
+**
+** SUMMARY:
+**
+** Formatting is important to scripts that scan this file.
+** Do not deviate from the formatting style currently in use.
+**
+*****************************************************************************/
+
+/* Opcode: Goto * P2 *
+**
+** An unconditional jump to address P2.
+** The next instruction executed will be
+** the one at index P2 from the beginning of
+** the program.
+*/
+case OP_Goto: {
+ CHECK_FOR_INTERRUPT;
+ pc = pOp->p2 - 1;
+ break;
+}
+
+/* Opcode: Gosub * P2 *
+**
+** Push the current address plus 1 onto the return address stack
+** and then jump to address P2.
+**
+** The return address stack is of limited depth. If too many
+** OP_Gosub operations occur without intervening OP_Returns, then
+** the return address stack will fill up and processing will abort
+** with a fatal error.
+*/
+case OP_Gosub: {
+ if( p->returnDepth>=sizeof(p->returnStack)/sizeof(p->returnStack[0]) ){
+ sqliteSetString(&p->zErrMsg, "return address stack overflow", (char*)0);
+ p->rc = SQLITE_INTERNAL;
+ return SQLITE_ERROR;
+ }
+ p->returnStack[p->returnDepth++] = pc+1;
+ pc = pOp->p2 - 1;
+ break;
+}
+
+/* Opcode: Return * * *
+**
+** Jump immediately to the next instruction after the last unreturned
+** OP_Gosub. If an OP_Return has occurred for all OP_Gosubs, then
+** processing aborts with a fatal error.
+*/
+case OP_Return: {
+ if( p->returnDepth<=0 ){
+ sqliteSetString(&p->zErrMsg, "return address stack underflow", (char*)0);
+ p->rc = SQLITE_INTERNAL;
+ return SQLITE_ERROR;
+ }
+ p->returnDepth--;
+ pc = p->returnStack[p->returnDepth] - 1;
+ break;
+}
+
+/* Opcode: Halt P1 P2 *
+**
+** Exit immediately. All open cursors, Lists, Sorts, etc are closed
+** automatically.
+**
+** P1 is the result code returned by sqlite_exec(). For a normal
+** halt, this should be SQLITE_OK (0). For errors, it can be some
+** other value. If P1!=0 then P2 will determine whether or not to
+** rollback the current transaction. Do not rollback if P2==OE_Fail.
+** Do the rollback if P2==OE_Rollback. If P2==OE_Abort, then back
+** out all changes that have occurred during this execution of the
+** VDBE, but do not rollback the transaction.
+**
+** There is an implied "Halt 0 0 0" instruction inserted at the very end of
+** every program. So a jump past the last instruction of the program
+** is the same as executing Halt.
+*/
+case OP_Halt: {
+ p->magic = VDBE_MAGIC_HALT;
+ p->pTos = pTos;
+ if( pOp->p1!=SQLITE_OK ){
+ p->rc = pOp->p1;
+ p->errorAction = pOp->p2;
+ if( pOp->p3 ){
+ sqliteSetString(&p->zErrMsg, pOp->p3, (char*)0);
+ }
+ return SQLITE_ERROR;
+ }else{
+ p->rc = SQLITE_OK;
+ return SQLITE_DONE;
+ }
+}
+
+/* Opcode: Integer P1 * P3
+**
+** The integer value P1 is pushed onto the stack. If P3 is not zero
+** then it is assumed to be a string representation of the same integer.
+*/
+case OP_Integer: {
+ pTos++;
+ pTos->i = pOp->p1;
+ pTos->flags = MEM_Int;
+ if( pOp->p3 ){
+ pTos->z = pOp->p3;
+ pTos->flags |= MEM_Str | MEM_Static;
+ pTos->n = strlen(pOp->p3)+1;
+ }
+ break;
+}
+
+/* Opcode: String * * P3
+**
+** The string value P3 is pushed onto the stack. If P3==0 then a
+** NULL is pushed onto the stack.
+*/
+case OP_String: {
+ char *z = pOp->p3;
+ pTos++;
+ if( z==0 ){
+ pTos->flags = MEM_Null;
+ }else{
+ pTos->z = z;
+ pTos->n = strlen(z) + 1;
+ pTos->flags = MEM_Str | MEM_Static;
+ }
+ break;
+}
+
+/* Opcode: Variable P1 * *
+**
+** Push the value of variable P1 onto the stack. A variable is
+** an unknown in the original SQL string as handed to sqlite_compile().
+** Any occurance of the '?' character in the original SQL is considered
+** a variable. Variables in the SQL string are number from left to
+** right beginning with 1. The values of variables are set using the
+** sqlite_bind() API.
+*/
+case OP_Variable: {
+ int j = pOp->p1 - 1;
+ pTos++;
+ if( j>=0 && j<p->nVar && p->azVar[j]!=0 ){
+ pTos->z = p->azVar[j];
+ pTos->n = p->anVar[j];
+ pTos->flags = MEM_Str | MEM_Static;
+ }else{
+ pTos->flags = MEM_Null;
+ }
+ break;
+}
+
+/* Opcode: Pop P1 * *
+**
+** P1 elements are popped off of the top of stack and discarded.
+*/
+case OP_Pop: {
+ assert( pOp->p1>=0 );
+ popStack(&pTos, pOp->p1);
+ assert( pTos>=&p->aStack[-1] );
+ break;
+}
+
+/* Opcode: Dup P1 P2 *
+**
+** A copy of the P1-th element of the stack
+** is made and pushed onto the top of the stack.
+** The top of the stack is element 0. So the
+** instruction "Dup 0 0 0" will make a copy of the
+** top of the stack.
+**
+** If the content of the P1-th element is a dynamically
+** allocated string, then a new copy of that string
+** is made if P2==0. If P2!=0, then just a pointer
+** to the string is copied.
+**
+** Also see the Pull instruction.
+*/
+case OP_Dup: {
+ Mem *pFrom = &pTos[-pOp->p1];
+ assert( pFrom<=pTos && pFrom>=p->aStack );
+ pTos++;
+ memcpy(pTos, pFrom, sizeof(*pFrom)-NBFS);
+ if( pTos->flags & MEM_Str ){
+ if( pOp->p2 && (pTos->flags & (MEM_Dyn|MEM_Ephem)) ){
+ pTos->flags &= ~MEM_Dyn;
+ pTos->flags |= MEM_Ephem;
+ }else if( pTos->flags & MEM_Short ){
+ memcpy(pTos->zShort, pFrom->zShort, pTos->n);
+ pTos->z = pTos->zShort;
+ }else if( (pTos->flags & MEM_Static)==0 ){
+ pTos->z = sqliteMallocRaw(pFrom->n);
+ if( sqlite_malloc_failed ) goto no_mem;
+ memcpy(pTos->z, pFrom->z, pFrom->n);
+ pTos->flags &= ~(MEM_Static|MEM_Ephem|MEM_Short);
+ pTos->flags |= MEM_Dyn;
+ }
+ }
+ break;
+}
+
+/* Opcode: Pull P1 * *
+**
+** The P1-th element is removed from its current location on
+** the stack and pushed back on top of the stack. The
+** top of the stack is element 0, so "Pull 0 0 0" is
+** a no-op. "Pull 1 0 0" swaps the top two elements of
+** the stack.
+**
+** See also the Dup instruction.
+*/
+case OP_Pull: {
+ Mem *pFrom = &pTos[-pOp->p1];
+ int i;
+ Mem ts;
+
+ ts = *pFrom;
+ Deephemeralize(pTos);
+ for(i=0; i<pOp->p1; i++, pFrom++){
+ Deephemeralize(&pFrom[1]);
+ *pFrom = pFrom[1];
+ assert( (pFrom->flags & MEM_Ephem)==0 );
+ if( pFrom->flags & MEM_Short ){
+ assert( pFrom->flags & MEM_Str );
+ assert( pFrom->z==pFrom[1].zShort );
+ pFrom->z = pFrom->zShort;
+ }
+ }
+ *pTos = ts;
+ if( pTos->flags & MEM_Short ){
+ assert( pTos->flags & MEM_Str );
+ assert( pTos->z==pTos[-pOp->p1].zShort );
+ pTos->z = pTos->zShort;
+ }
+ break;
+}
+
+/* Opcode: Push P1 * *
+**
+** Overwrite the value of the P1-th element down on the
+** stack (P1==0 is the top of the stack) with the value
+** of the top of the stack. Then pop the top of the stack.
+*/
+case OP_Push: {
+ Mem *pTo = &pTos[-pOp->p1];
+
+ assert( pTo>=p->aStack );
+ Deephemeralize(pTos);
+ Release(pTo);
+ *pTo = *pTos;
+ if( pTo->flags & MEM_Short ){
+ assert( pTo->z==pTos->zShort );
+ pTo->z = pTo->zShort;
+ }
+ pTos--;
+ break;
+}
+
+
+/* Opcode: ColumnName P1 P2 P3
+**
+** P3 becomes the P1-th column name (first is 0). An array of pointers
+** to all column names is passed as the 4th parameter to the callback.
+** If P2==1 then this is the last column in the result set and thus the
+** number of columns in the result set will be P1. There must be at least
+** one OP_ColumnName with a P2==1 before invoking OP_Callback and the
+** number of columns specified in OP_Callback must one more than the P1
+** value of the OP_ColumnName that has P2==1.
+*/
+case OP_ColumnName: {
+ assert( pOp->p1>=0 && pOp->p1<p->nOp );
+ p->azColName[pOp->p1] = pOp->p3;
+ p->nCallback = 0;
+ if( pOp->p2 ) p->nResColumn = pOp->p1+1;
+ break;
+}
+
+/* Opcode: Callback P1 * *
+**
+** Pop P1 values off the stack and form them into an array. Then
+** invoke the callback function using the newly formed array as the
+** 3rd parameter.
+*/
+case OP_Callback: {
+ int i;
+ char **azArgv = p->zArgv;
+ Mem *pCol;
+
+ pCol = &pTos[1-pOp->p1];
+ assert( pCol>=p->aStack );
+ for(i=0; i<pOp->p1; i++, pCol++){
+ if( pCol->flags & MEM_Null ){
+ azArgv[i] = 0;
+ }else{
+ Stringify(pCol);
+ azArgv[i] = pCol->z;
+ }
+ }
+ azArgv[i] = 0;
+ p->nCallback++;
+ p->azResColumn = azArgv;
+ assert( p->nResColumn==pOp->p1 );
+ p->popStack = pOp->p1;
+ p->pc = pc + 1;
+ p->pTos = pTos;
+ return SQLITE_ROW;
+}
+
+/* Opcode: Concat P1 P2 P3
+**
+** Look at the first P1 elements of the stack. Append them all
+** together with the lowest element first. Use P3 as a separator.
+** Put the result on the top of the stack. The original P1 elements
+** are popped from the stack if P2==0 and retained if P2==1. If
+** any element of the stack is NULL, then the result is NULL.
+**
+** If P3 is NULL, then use no separator. When P1==1, this routine
+** makes a copy of the top stack element into memory obtained
+** from sqliteMalloc().
+*/
+case OP_Concat: {
+ char *zNew;
+ int nByte;
+ int nField;
+ int i, j;
+ char *zSep;
+ int nSep;
+ Mem *pTerm;
+
+ nField = pOp->p1;
+ zSep = pOp->p3;
+ if( zSep==0 ) zSep = "";
+ nSep = strlen(zSep);
+ assert( &pTos[1-nField] >= p->aStack );
+ nByte = 1 - nSep;
+ pTerm = &pTos[1-nField];
+ for(i=0; i<nField; i++, pTerm++){
+ if( pTerm->flags & MEM_Null ){
+ nByte = -1;
+ break;
+ }else{
+ Stringify(pTerm);
+ nByte += pTerm->n - 1 + nSep;
+ }
+ }
+ if( nByte<0 ){
+ if( pOp->p2==0 ){
+ popStack(&pTos, nField);
+ }
+ pTos++;
+ pTos->flags = MEM_Null;
+ break;
+ }
+ zNew = sqliteMallocRaw( nByte );
+ if( zNew==0 ) goto no_mem;
+ j = 0;
+ pTerm = &pTos[1-nField];
+ for(i=j=0; i<nField; i++, pTerm++){
+ assert( pTerm->flags & MEM_Str );
+ memcpy(&zNew[j], pTerm->z, pTerm->n-1);
+ j += pTerm->n-1;
+ if( nSep>0 && i<nField-1 ){
+ memcpy(&zNew[j], zSep, nSep);
+ j += nSep;
+ }
+ }
+ zNew[j] = 0;
+ if( pOp->p2==0 ){
+ popStack(&pTos, nField);
+ }
+ pTos++;
+ pTos->n = nByte;
+ pTos->flags = MEM_Str|MEM_Dyn;
+ pTos->z = zNew;
+ break;
+}
+
+/* Opcode: Add * * *
+**
+** Pop the top two elements from the stack, add them together,
+** and push the result back onto the stack. If either element
+** is a string then it is converted to a double using the atof()
+** function before the addition.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: Multiply * * *
+**
+** Pop the top two elements from the stack, multiply them together,
+** and push the result back onto the stack. If either element
+** is a string then it is converted to a double using the atof()
+** function before the multiplication.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: Subtract * * *
+**
+** Pop the top two elements from the stack, subtract the
+** first (what was on top of the stack) from the second (the
+** next on stack)
+** and push the result back onto the stack. If either element
+** is a string then it is converted to a double using the atof()
+** function before the subtraction.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: Divide * * *
+**
+** Pop the top two elements from the stack, divide the
+** first (what was on top of the stack) from the second (the
+** next on stack)
+** and push the result back onto the stack. If either element
+** is a string then it is converted to a double using the atof()
+** function before the division. Division by zero returns NULL.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: Remainder * * *
+**
+** Pop the top two elements from the stack, divide the
+** first (what was on top of the stack) from the second (the
+** next on stack)
+** and push the remainder after division onto the stack. If either element
+** is a string then it is converted to a double using the atof()
+** function before the division. Division by zero returns NULL.
+** If either operand is NULL, the result is NULL.
+*/
+case OP_Add:
+case OP_Subtract:
+case OP_Multiply:
+case OP_Divide:
+case OP_Remainder: {
+ Mem *pNos = &pTos[-1];
+ assert( pNos>=p->aStack );
+ if( ((pTos->flags | pNos->flags) & MEM_Null)!=0 ){
+ Release(pTos);
+ pTos--;
+ Release(pTos);
+ pTos->flags = MEM_Null;
+ }else if( (pTos->flags & pNos->flags & MEM_Int)==MEM_Int ){
+ int a, b;
+ a = pTos->i;
+ b = pNos->i;
+ switch( pOp->opcode ){
+ case OP_Add: b += a; break;
+ case OP_Subtract: b -= a; break;
+ case OP_Multiply: b *= a; break;
+ case OP_Divide: {
+ if( a==0 ) goto divide_by_zero;
+ b /= a;
+ break;
+ }
+ default: {
+ if( a==0 ) goto divide_by_zero;
+ b %= a;
+ break;
+ }
+ }
+ Release(pTos);
+ pTos--;
+ Release(pTos);
+ pTos->i = b;
+ pTos->flags = MEM_Int;
+ }else{
+ double a, b;
+ Realify(pTos);
+ Realify(pNos);
+ a = pTos->r;
+ b = pNos->r;
+ switch( pOp->opcode ){
+ case OP_Add: b += a; break;
+ case OP_Subtract: b -= a; break;
+ case OP_Multiply: b *= a; break;
+ case OP_Divide: {
+ if( a==0.0 ) goto divide_by_zero;
+ b /= a;
+ break;
+ }
+ default: {
+ int ia = (int)a;
+ int ib = (int)b;
+ if( ia==0.0 ) goto divide_by_zero;
+ b = ib % ia;
+ break;
+ }
+ }
+ Release(pTos);
+ pTos--;
+ Release(pTos);
+ pTos->r = b;
+ pTos->flags = MEM_Real;
+ }
+ break;
+
+divide_by_zero:
+ Release(pTos);
+ pTos--;
+ Release(pTos);
+ pTos->flags = MEM_Null;
+ break;
+}
+
+/* Opcode: Function P1 * P3
+**
+** Invoke a user function (P3 is a pointer to a Function structure that
+** defines the function) with P1 string arguments taken from the stack.
+** Pop all arguments from the stack and push back the result.
+**
+** See also: AggFunc
+*/
+case OP_Function: {
+ int n, i;
+ Mem *pArg;
+ char **azArgv;
+ sqlite_func ctx;
+
+ n = pOp->p1;
+ pArg = &pTos[1-n];
+ azArgv = p->zArgv;
+ for(i=0; i<n; i++, pArg++){
+ if( pArg->flags & MEM_Null ){
+ azArgv[i] = 0;
+ }else{
+ Stringify(pArg);
+ azArgv[i] = pArg->z;
+ }
+ }
+ ctx.pFunc = (FuncDef*)pOp->p3;
+ ctx.s.flags = MEM_Null;
+ ctx.s.z = 0;
+ ctx.isError = 0;
+ ctx.isStep = 0;
+ if( sqliteSafetyOff(db) ) goto abort_due_to_misuse;
+ (*ctx.pFunc->xFunc)(&ctx, n, (const char**)azArgv);
+ if( sqliteSafetyOn(db) ) goto abort_due_to_misuse;
+ popStack(&pTos, n);
+ pTos++;
+ *pTos = ctx.s;
+ if( pTos->flags & MEM_Short ){
+ pTos->z = pTos->zShort;
+ }
+ if( ctx.isError ){
+ sqliteSetString(&p->zErrMsg,
+ (pTos->flags & MEM_Str)!=0 ? pTos->z : "user function error", (char*)0);
+ rc = SQLITE_ERROR;
+ }
+ break;
+}
+
+/* Opcode: BitAnd * * *
+**
+** Pop the top two elements from the stack. Convert both elements
+** to integers. Push back onto the stack the bit-wise AND of the
+** two elements.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: BitOr * * *
+**
+** Pop the top two elements from the stack. Convert both elements
+** to integers. Push back onto the stack the bit-wise OR of the
+** two elements.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: ShiftLeft * * *
+**
+** Pop the top two elements from the stack. Convert both elements
+** to integers. Push back onto the stack the top element shifted
+** left by N bits where N is the second element on the stack.
+** If either operand is NULL, the result is NULL.
+*/
+/* Opcode: ShiftRight * * *
+**
+** Pop the top two elements from the stack. Convert both elements
+** to integers. Push back onto the stack the top element shifted
+** right by N bits where N is the second element on the stack.
+** If either operand is NULL, the result is NULL.
+*/
+case OP_BitAnd:
+case OP_BitOr:
+case OP_ShiftLeft:
+case OP_ShiftRight: {
+ Mem *pNos = &pTos[-1];
+ int a, b;
+
+ assert( pNos>=p->aStack );
+ if( (pTos->flags | pNos->flags) & MEM_Null ){
+ popStack(&pTos, 2);
+ pTos++;
+ pTos->flags = MEM_Null;
+ break;
+ }
+ Integerify(pTos);
+ Integerify(pNos);
+ a = pTos->i;
+ b = pNos->i;
+ switch( pOp->opcode ){
+ case OP_BitAnd: a &= b; break;
+ case OP_BitOr: a |= b; break;
+ case OP_ShiftLeft: a <<= b; break;
+ case OP_ShiftRight: a >>= b; break;
+ default: /* CANT HAPPEN */ break;
+ }
+ assert( (pTos->flags & MEM_Dyn)==0 );
+ assert( (pNos->flags & MEM_Dyn)==0 );
+ pTos--;
+ Release(pTos);
+ pTos->i = a;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: AddImm P1 * *
+**
+** Add the value P1 to whatever is on top of the stack. The result
+** is always an integer.
+**
+** To force the top of the stack to be an integer, just add 0.
+*/
+case OP_AddImm: {
+ assert( pTos>=p->aStack );
+ Integerify(pTos);
+ pTos->i += pOp->p1;
+ break;
+}
+
+/* Opcode: ForceInt P1 P2 *
+**
+** Convert the top of the stack into an integer. If the current top of
+** the stack is not numeric (meaning that is is a NULL or a string that
+** does not look like an integer or floating point number) then pop the
+** stack and jump to P2. If the top of the stack is numeric then
+** convert it into the least integer that is greater than or equal to its
+** current value if P1==0, or to the least integer that is strictly
+** greater than its current value if P1==1.
+*/
+case OP_ForceInt: {
+ int v;
+ assert( pTos>=p->aStack );
+ if( (pTos->flags & (MEM_Int|MEM_Real))==0
+ && ((pTos->flags & MEM_Str)==0 || sqliteIsNumber(pTos->z)==0) ){
+ Release(pTos);
+ pTos--;
+ pc = pOp->p2 - 1;
+ break;
+ }
+ if( pTos->flags & MEM_Int ){
+ v = pTos->i + (pOp->p1!=0);
+ }else{
+ Realify(pTos);
+ v = (int)pTos->r;
+ if( pTos->r>(double)v ) v++;
+ if( pOp->p1 && pTos->r==(double)v ) v++;
+ }
+ Release(pTos);
+ pTos->i = v;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: MustBeInt P1 P2 *
+**
+** Force the top of the stack to be an integer. If the top of the
+** stack is not an integer and cannot be converted into an integer
+** with out data loss, then jump immediately to P2, or if P2==0
+** raise an SQLITE_MISMATCH exception.
+**
+** If the top of the stack is not an integer and P2 is not zero and
+** P1 is 1, then the stack is popped. In all other cases, the depth
+** of the stack is unchanged.
+*/
+case OP_MustBeInt: {
+ assert( pTos>=p->aStack );
+ if( pTos->flags & MEM_Int ){
+ /* Do nothing */
+ }else if( pTos->flags & MEM_Real ){
+ int i = (int)pTos->r;
+ double r = (double)i;
+ if( r!=pTos->r ){
+ goto mismatch;
+ }
+ pTos->i = i;
+ }else if( pTos->flags & MEM_Str ){
+ int v;
+ if( !toInt(pTos->z, &v) ){
+ double r;
+ if( !sqliteIsNumber(pTos->z) ){
+ goto mismatch;
+ }
+ Realify(pTos);
+ v = (int)pTos->r;
+ r = (double)v;
+ if( r!=pTos->r ){
+ goto mismatch;
+ }
+ }
+ pTos->i = v;
+ }else{
+ goto mismatch;
+ }
+ Release(pTos);
+ pTos->flags = MEM_Int;
+ break;
+
+mismatch:
+ if( pOp->p2==0 ){
+ rc = SQLITE_MISMATCH;
+ goto abort_due_to_error;
+ }else{
+ if( pOp->p1 ) popStack(&pTos, 1);
+ pc = pOp->p2 - 1;
+ }
+ break;
+}
+
+/* Opcode: Eq P1 P2 *
+**
+** Pop the top two elements from the stack. If they are equal, then
+** jump to instruction P2. Otherwise, continue to the next instruction.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** If both values are numeric, they are converted to doubles using atof()
+** and compared for equality that way. Otherwise the strcmp() library
+** routine is used for the comparison. For a pure text comparison
+** use OP_StrEq.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: Ne P1 P2 *
+**
+** Pop the top two elements from the stack. If they are not equal, then
+** jump to instruction P2. Otherwise, continue to the next instruction.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** If both values are numeric, they are converted to doubles using atof()
+** and compared in that format. Otherwise the strcmp() library
+** routine is used for the comparison. For a pure text comparison
+** use OP_StrNe.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: Lt P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the
+** next on stack) is less than the first (the top of stack), then
+** jump to instruction P2. Otherwise, continue to the next instruction.
+** In other words, jump if NOS<TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** If both values are numeric, they are converted to doubles using atof()
+** and compared in that format. Numeric values are always less than
+** non-numeric values. If both operands are non-numeric, the strcmp() library
+** routine is used for the comparison. For a pure text comparison
+** use OP_StrLt.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: Le P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the
+** next on stack) is less than or equal to the first (the top of stack),
+** then jump to instruction P2. In other words, jump if NOS<=TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** If both values are numeric, they are converted to doubles using atof()
+** and compared in that format. Numeric values are always less than
+** non-numeric values. If both operands are non-numeric, the strcmp() library
+** routine is used for the comparison. For a pure text comparison
+** use OP_StrLe.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: Gt P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the
+** next on stack) is greater than the first (the top of stack),
+** then jump to instruction P2. In other words, jump if NOS>TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** If both values are numeric, they are converted to doubles using atof()
+** and compared in that format. Numeric values are always less than
+** non-numeric values. If both operands are non-numeric, the strcmp() library
+** routine is used for the comparison. For a pure text comparison
+** use OP_StrGt.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: Ge P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the next
+** on stack) is greater than or equal to the first (the top of stack),
+** then jump to instruction P2. In other words, jump if NOS>=TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** If both values are numeric, they are converted to doubles using atof()
+** and compared in that format. Numeric values are always less than
+** non-numeric values. If both operands are non-numeric, the strcmp() library
+** routine is used for the comparison. For a pure text comparison
+** use OP_StrGe.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+case OP_Eq:
+case OP_Ne:
+case OP_Lt:
+case OP_Le:
+case OP_Gt:
+case OP_Ge: {
+ Mem *pNos = &pTos[-1];
+ int c, v;
+ int ft, fn;
+ assert( pNos>=p->aStack );
+ ft = pTos->flags;
+ fn = pNos->flags;
+ if( (ft | fn) & MEM_Null ){
+ popStack(&pTos, 2);
+ if( pOp->p2 ){
+ if( pOp->p1 ) pc = pOp->p2-1;
+ }else{
+ pTos++;
+ pTos->flags = MEM_Null;
+ }
+ break;
+ }else if( (ft & fn & MEM_Int)==MEM_Int ){
+ c = pNos->i - pTos->i;
+ }else if( (ft & MEM_Int)!=0 && (fn & MEM_Str)!=0 && toInt(pNos->z,&v) ){
+ c = v - pTos->i;
+ }else if( (fn & MEM_Int)!=0 && (ft & MEM_Str)!=0 && toInt(pTos->z,&v) ){
+ c = pNos->i - v;
+ }else{
+ Stringify(pTos);
+ Stringify(pNos);
+ c = sqliteCompare(pNos->z, pTos->z);
+ }
+ switch( pOp->opcode ){
+ case OP_Eq: c = c==0; break;
+ case OP_Ne: c = c!=0; break;
+ case OP_Lt: c = c<0; break;
+ case OP_Le: c = c<=0; break;
+ case OP_Gt: c = c>0; break;
+ default: c = c>=0; break;
+ }
+ popStack(&pTos, 2);
+ if( pOp->p2 ){
+ if( c ) pc = pOp->p2-1;
+ }else{
+ pTos++;
+ pTos->i = c;
+ pTos->flags = MEM_Int;
+ }
+ break;
+}
+/* INSERT NO CODE HERE!
+**
+** The opcode numbers are extracted from this source file by doing
+**
+** grep '^case OP_' vdbe.c | ... >opcodes.h
+**
+** The opcodes are numbered in the order that they appear in this file.
+** But in order for the expression generating code to work right, the
+** string comparison operators that follow must be numbered exactly 6
+** greater than the numeric comparison opcodes above. So no other
+** cases can appear between the two.
+*/
+/* Opcode: StrEq P1 P2 *
+**
+** Pop the top two elements from the stack. If they are equal, then
+** jump to instruction P2. Otherwise, continue to the next instruction.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** The strcmp() library routine is used for the comparison. For a
+** numeric comparison, use OP_Eq.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: StrNe P1 P2 *
+**
+** Pop the top two elements from the stack. If they are not equal, then
+** jump to instruction P2. Otherwise, continue to the next instruction.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** The strcmp() library routine is used for the comparison. For a
+** numeric comparison, use OP_Ne.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: StrLt P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the
+** next on stack) is less than the first (the top of stack), then
+** jump to instruction P2. Otherwise, continue to the next instruction.
+** In other words, jump if NOS<TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** The strcmp() library routine is used for the comparison. For a
+** numeric comparison, use OP_Lt.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: StrLe P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the
+** next on stack) is less than or equal to the first (the top of stack),
+** then jump to instruction P2. In other words, jump if NOS<=TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** The strcmp() library routine is used for the comparison. For a
+** numeric comparison, use OP_Le.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: StrGt P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the
+** next on stack) is greater than the first (the top of stack),
+** then jump to instruction P2. In other words, jump if NOS>TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** The strcmp() library routine is used for the comparison. For a
+** numeric comparison, use OP_Gt.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+/* Opcode: StrGe P1 P2 *
+**
+** Pop the top two elements from the stack. If second element (the next
+** on stack) is greater than or equal to the first (the top of stack),
+** then jump to instruction P2. In other words, jump if NOS>=TOS.
+**
+** If either operand is NULL (and thus if the result is unknown) then
+** take the jump if P1 is true.
+**
+** The strcmp() library routine is used for the comparison. For a
+** numeric comparison, use OP_Ge.
+**
+** If P2 is zero, do not jump. Instead, push an integer 1 onto the
+** stack if the jump would have been taken, or a 0 if not. Push a
+** NULL if either operand was NULL.
+*/
+case OP_StrEq:
+case OP_StrNe:
+case OP_StrLt:
+case OP_StrLe:
+case OP_StrGt:
+case OP_StrGe: {
+ Mem *pNos = &pTos[-1];
+ int c;
+ assert( pNos>=p->aStack );
+ if( (pNos->flags | pTos->flags) & MEM_Null ){
+ popStack(&pTos, 2);
+ if( pOp->p2 ){
+ if( pOp->p1 ) pc = pOp->p2-1;
+ }else{
+ pTos++;
+ pTos->flags = MEM_Null;
+ }
+ break;
+ }else{
+ Stringify(pTos);
+ Stringify(pNos);
+ c = strcmp(pNos->z, pTos->z);
+ }
+ /* The asserts on each case of the following switch are there to verify
+ ** that string comparison opcodes are always exactly 6 greater than the
+ ** corresponding numeric comparison opcodes. The code generator depends
+ ** on this fact.
+ */
+ switch( pOp->opcode ){
+ case OP_StrEq: c = c==0; assert( pOp->opcode-6==OP_Eq ); break;
+ case OP_StrNe: c = c!=0; assert( pOp->opcode-6==OP_Ne ); break;
+ case OP_StrLt: c = c<0; assert( pOp->opcode-6==OP_Lt ); break;
+ case OP_StrLe: c = c<=0; assert( pOp->opcode-6==OP_Le ); break;
+ case OP_StrGt: c = c>0; assert( pOp->opcode-6==OP_Gt ); break;
+ default: c = c>=0; assert( pOp->opcode-6==OP_Ge ); break;
+ }
+ popStack(&pTos, 2);
+ if( pOp->p2 ){
+ if( c ) pc = pOp->p2-1;
+ }else{
+ pTos++;
+ pTos->flags = MEM_Int;
+ pTos->i = c;
+ }
+ break;
+}
+
+/* Opcode: And * * *
+**
+** Pop two values off the stack. Take the logical AND of the
+** two values and push the resulting boolean value back onto the
+** stack.
+*/
+/* Opcode: Or * * *
+**
+** Pop two values off the stack. Take the logical OR of the
+** two values and push the resulting boolean value back onto the
+** stack.
+*/
+case OP_And:
+case OP_Or: {
+ Mem *pNos = &pTos[-1];
+ int v1, v2; /* 0==TRUE, 1==FALSE, 2==UNKNOWN or NULL */
+
+ assert( pNos>=p->aStack );
+ if( pTos->flags & MEM_Null ){
+ v1 = 2;
+ }else{
+ Integerify(pTos);
+ v1 = pTos->i==0;
+ }
+ if( pNos->flags & MEM_Null ){
+ v2 = 2;
+ }else{
+ Integerify(pNos);
+ v2 = pNos->i==0;
+ }
+ if( pOp->opcode==OP_And ){
+ static const unsigned char and_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 };
+ v1 = and_logic[v1*3+v2];
+ }else{
+ static const unsigned char or_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 };
+ v1 = or_logic[v1*3+v2];
+ }
+ popStack(&pTos, 2);
+ pTos++;
+ if( v1==2 ){
+ pTos->flags = MEM_Null;
+ }else{
+ pTos->i = v1==0;
+ pTos->flags = MEM_Int;
+ }
+ break;
+}
+
+/* Opcode: Negative * * *
+**
+** Treat the top of the stack as a numeric quantity. Replace it
+** with its additive inverse. If the top of the stack is NULL
+** its value is unchanged.
+*/
+/* Opcode: AbsValue * * *
+**
+** Treat the top of the stack as a numeric quantity. Replace it
+** with its absolute value. If the top of the stack is NULL
+** its value is unchanged.
+*/
+case OP_Negative:
+case OP_AbsValue: {
+ assert( pTos>=p->aStack );
+ if( pTos->flags & MEM_Real ){
+ Release(pTos);
+ if( pOp->opcode==OP_Negative || pTos->r<0.0 ){
+ pTos->r = -pTos->r;
+ }
+ pTos->flags = MEM_Real;
+ }else if( pTos->flags & MEM_Int ){
+ Release(pTos);
+ if( pOp->opcode==OP_Negative || pTos->i<0 ){
+ pTos->i = -pTos->i;
+ }
+ pTos->flags = MEM_Int;
+ }else if( pTos->flags & MEM_Null ){
+ /* Do nothing */
+ }else{
+ Realify(pTos);
+ Release(pTos);
+ if( pOp->opcode==OP_Negative || pTos->r<0.0 ){
+ pTos->r = -pTos->r;
+ }
+ pTos->flags = MEM_Real;
+ }
+ break;
+}
+
+/* Opcode: Not * * *
+**
+** Interpret the top of the stack as a boolean value. Replace it
+** with its complement. If the top of the stack is NULL its value
+** is unchanged.
+*/
+case OP_Not: {
+ assert( pTos>=p->aStack );
+ if( pTos->flags & MEM_Null ) break; /* Do nothing to NULLs */
+ Integerify(pTos);
+ Release(pTos);
+ pTos->i = !pTos->i;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: BitNot * * *
+**
+** Interpret the top of the stack as an value. Replace it
+** with its ones-complement. If the top of the stack is NULL its
+** value is unchanged.
+*/
+case OP_BitNot: {
+ assert( pTos>=p->aStack );
+ if( pTos->flags & MEM_Null ) break; /* Do nothing to NULLs */
+ Integerify(pTos);
+ Release(pTos);
+ pTos->i = ~pTos->i;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: Noop * * *
+**
+** Do nothing. This instruction is often useful as a jump
+** destination.
+*/
+case OP_Noop: {
+ break;
+}
+
+/* Opcode: If P1 P2 *
+**
+** Pop a single boolean from the stack. If the boolean popped is
+** true, then jump to p2. Otherwise continue to the next instruction.
+** An integer is false if zero and true otherwise. A string is
+** false if it has zero length and true otherwise.
+**
+** If the value popped of the stack is NULL, then take the jump if P1
+** is true and fall through if P1 is false.
+*/
+/* Opcode: IfNot P1 P2 *
+**
+** Pop a single boolean from the stack. If the boolean popped is
+** false, then jump to p2. Otherwise continue to the next instruction.
+** An integer is false if zero and true otherwise. A string is
+** false if it has zero length and true otherwise.
+**
+** If the value popped of the stack is NULL, then take the jump if P1
+** is true and fall through if P1 is false.
+*/
+case OP_If:
+case OP_IfNot: {
+ int c;
+ assert( pTos>=p->aStack );
+ if( pTos->flags & MEM_Null ){
+ c = pOp->p1;
+ }else{
+ Integerify(pTos);
+ c = pTos->i;
+ if( pOp->opcode==OP_IfNot ) c = !c;
+ }
+ assert( (pTos->flags & MEM_Dyn)==0 );
+ pTos--;
+ if( c ) pc = pOp->p2-1;
+ break;
+}
+
+/* Opcode: IsNull P1 P2 *
+**
+** If any of the top abs(P1) values on the stack are NULL, then jump
+** to P2. Pop the stack P1 times if P1>0. If P1<0 leave the stack
+** unchanged.
+*/
+case OP_IsNull: {
+ int i, cnt;
+ Mem *pTerm;
+ cnt = pOp->p1;
+ if( cnt<0 ) cnt = -cnt;
+ pTerm = &pTos[1-cnt];
+ assert( pTerm>=p->aStack );
+ for(i=0; i<cnt; i++, pTerm++){
+ if( pTerm->flags & MEM_Null ){
+ pc = pOp->p2-1;
+ break;
+ }
+ }
+ if( pOp->p1>0 ) popStack(&pTos, cnt);
+ break;
+}
+
+/* Opcode: NotNull P1 P2 *
+**
+** Jump to P2 if the top P1 values on the stack are all not NULL. Pop the
+** stack if P1 times if P1 is greater than zero. If P1 is less than
+** zero then leave the stack unchanged.
+*/
+case OP_NotNull: {
+ int i, cnt;
+ cnt = pOp->p1;
+ if( cnt<0 ) cnt = -cnt;
+ assert( &pTos[1-cnt] >= p->aStack );
+ for(i=0; i<cnt && (pTos[1+i-cnt].flags & MEM_Null)==0; i++){}
+ if( i>=cnt ) pc = pOp->p2-1;
+ if( pOp->p1>0 ) popStack(&pTos, cnt);
+ break;
+}
+
+/* Opcode: MakeRecord P1 P2 *
+**
+** Convert the top P1 entries of the stack into a single entry
+** suitable for use as a data record in a database table. The
+** details of the format are irrelavant as long as the OP_Column
+** opcode can decode the record later. Refer to source code
+** comments for the details of the record format.
+**
+** If P2 is true (non-zero) and one or more of the P1 entries
+** that go into building the record is NULL, then add some extra
+** bytes to the record to make it distinct for other entries created
+** during the same run of the VDBE. The extra bytes added are a
+** counter that is reset with each run of the VDBE, so records
+** created this way will not necessarily be distinct across runs.
+** But they should be distinct for transient tables (created using
+** OP_OpenTemp) which is what they are intended for.
+**
+** (Later:) The P2==1 option was intended to make NULLs distinct
+** for the UNION operator. But I have since discovered that NULLs
+** are indistinct for UNION. So this option is never used.
+*/
+case OP_MakeRecord: {
+ char *zNewRecord;
+ int nByte;
+ int nField;
+ int i, j;
+ int idxWidth;
+ u32 addr;
+ Mem *pRec;
+ int addUnique = 0; /* True to cause bytes to be added to make the
+ ** generated record distinct */
+ char zTemp[NBFS]; /* Temp space for small records */
+
+ /* Assuming the record contains N fields, the record format looks
+ ** like this:
+ **
+ ** -------------------------------------------------------------------
+ ** | idx0 | idx1 | ... | idx(N-1) | idx(N) | data0 | ... | data(N-1) |
+ ** -------------------------------------------------------------------
+ **
+ ** All data fields are converted to strings before being stored and
+ ** are stored with their null terminators. NULL entries omit the
+ ** null terminator. Thus an empty string uses 1 byte and a NULL uses
+ ** zero bytes. Data(0) is taken from the lowest element of the stack
+ ** and data(N-1) is the top of the stack.
+ **
+ ** Each of the idx() entries is either 1, 2, or 3 bytes depending on
+ ** how big the total record is. Idx(0) contains the offset to the start
+ ** of data(0). Idx(k) contains the offset to the start of data(k).
+ ** Idx(N) contains the total number of bytes in the record.
+ */
+ nField = pOp->p1;
+ pRec = &pTos[1-nField];
+ assert( pRec>=p->aStack );
+ nByte = 0;
+ for(i=0; i<nField; i++, pRec++){
+ if( pRec->flags & MEM_Null ){
+ addUnique = pOp->p2;
+ }else{
+ Stringify(pRec);
+ nByte += pRec->n;
+ }
+ }
+ if( addUnique ) nByte += sizeof(p->uniqueCnt);
+ if( nByte + nField + 1 < 256 ){
+ idxWidth = 1;
+ }else if( nByte + 2*nField + 2 < 65536 ){
+ idxWidth = 2;
+ }else{
+ idxWidth = 3;
+ }
+ nByte += idxWidth*(nField + 1);
+ if( nByte>MAX_BYTES_PER_ROW ){
+ rc = SQLITE_TOOBIG;
+ goto abort_due_to_error;
+ }
+ if( nByte<=NBFS ){
+ zNewRecord = zTemp;
+ }else{
+ zNewRecord = sqliteMallocRaw( nByte );
+ if( zNewRecord==0 ) goto no_mem;
+ }
+ j = 0;
+ addr = idxWidth*(nField+1) + addUnique*sizeof(p->uniqueCnt);
+ for(i=0, pRec=&pTos[1-nField]; i<nField; i++, pRec++){
+ zNewRecord[j++] = addr & 0xff;
+ if( idxWidth>1 ){
+ zNewRecord[j++] = (addr>>8)&0xff;
+ if( idxWidth>2 ){
+ zNewRecord[j++] = (addr>>16)&0xff;
+ }
+ }
+ if( (pRec->flags & MEM_Null)==0 ){
+ addr += pRec->n;
+ }
+ }
+ zNewRecord[j++] = addr & 0xff;
+ if( idxWidth>1 ){
+ zNewRecord[j++] = (addr>>8)&0xff;
+ if( idxWidth>2 ){
+ zNewRecord[j++] = (addr>>16)&0xff;
+ }
+ }
+ if( addUnique ){
+ memcpy(&zNewRecord[j], &p->uniqueCnt, sizeof(p->uniqueCnt));
+ p->uniqueCnt++;
+ j += sizeof(p->uniqueCnt);
+ }
+ for(i=0, pRec=&pTos[1-nField]; i<nField; i++, pRec++){
+ if( (pRec->flags & MEM_Null)==0 ){
+ memcpy(&zNewRecord[j], pRec->z, pRec->n);
+ j += pRec->n;
+ }
+ }
+ popStack(&pTos, nField);
+ pTos++;
+ pTos->n = nByte;
+ if( nByte<=NBFS ){
+ assert( zNewRecord==zTemp );
+ memcpy(pTos->zShort, zTemp, nByte);
+ pTos->z = pTos->zShort;
+ pTos->flags = MEM_Str | MEM_Short;
+ }else{
+ assert( zNewRecord!=zTemp );
+ pTos->z = zNewRecord;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ }
+ break;
+}
+
+/* Opcode: MakeKey P1 P2 P3
+**
+** Convert the top P1 entries of the stack into a single entry suitable
+** for use as the key in an index. The top P1 records are
+** converted to strings and merged. The null-terminators
+** are retained and used as separators.
+** The lowest entry in the stack is the first field and the top of the
+** stack becomes the last.
+**
+** If P2 is not zero, then the original entries remain on the stack
+** and the new key is pushed on top. If P2 is zero, the original
+** data is popped off the stack first then the new key is pushed
+** back in its place.
+**
+** P3 is a string that is P1 characters long. Each character is either
+** an 'n' or a 't' to indicates if the argument should be intepreted as
+** numeric or text type. The first character of P3 corresponds to the
+** lowest element on the stack. If P3 is NULL then all arguments are
+** assumed to be of the numeric type.
+**
+** The type makes a difference in that text-type fields may not be
+** introduced by 'b' (as described in the next paragraph). The
+** first character of a text-type field must be either 'a' (if it is NULL)
+** or 'c'. Numeric fields will be introduced by 'b' if their content
+** looks like a well-formed number. Otherwise the 'a' or 'c' will be
+** used.
+**
+** The key is a concatenation of fields. Each field is terminated by
+** a single 0x00 character. A NULL field is introduced by an 'a' and
+** is followed immediately by its 0x00 terminator. A numeric field is
+** introduced by a single character 'b' and is followed by a sequence
+** of characters that represent the number such that a comparison of
+** the character string using memcpy() sorts the numbers in numerical
+** order. The character strings for numbers are generated using the
+** sqliteRealToSortable() function. A text field is introduced by a
+** 'c' character and is followed by the exact text of the field. The
+** use of an 'a', 'b', or 'c' character at the beginning of each field
+** guarantees that NULLs sort before numbers and that numbers sort
+** before text. 0x00 characters do not occur except as separators
+** between fields.
+**
+** See also: MakeIdxKey, SortMakeKey
+*/
+/* Opcode: MakeIdxKey P1 P2 P3
+**
+** Convert the top P1 entries of the stack into a single entry suitable
+** for use as the key in an index. In addition, take one additional integer
+** off of the stack, treat that integer as a four-byte record number, and
+** append the four bytes to the key. Thus a total of P1+1 entries are
+** popped from the stack for this instruction and a single entry is pushed
+** back. The first P1 entries that are popped are strings and the last
+** entry (the lowest on the stack) is an integer record number.
+**
+** The converstion of the first P1 string entries occurs just like in
+** MakeKey. Each entry is separated from the others by a null.
+** The entire concatenation is null-terminated. The lowest entry
+** in the stack is the first field and the top of the stack becomes the
+** last.
+**
+** If P2 is not zero and one or more of the P1 entries that go into the
+** generated key is NULL, then jump to P2 after the new key has been
+** pushed on the stack. In other words, jump to P2 if the key is
+** guaranteed to be unique. This jump can be used to skip a subsequent
+** uniqueness test.
+**
+** P3 is a string that is P1 characters long. Each character is either
+** an 'n' or a 't' to indicates if the argument should be numeric or
+** text. The first character corresponds to the lowest element on the
+** stack. If P3 is null then all arguments are assumed to be numeric.
+**
+** See also: MakeKey, SortMakeKey
+*/
+case OP_MakeIdxKey:
+case OP_MakeKey: {
+ char *zNewKey;
+ int nByte;
+ int nField;
+ int addRowid;
+ int i, j;
+ int containsNull = 0;
+ Mem *pRec;
+ char zTemp[NBFS];
+
+ addRowid = pOp->opcode==OP_MakeIdxKey;
+ nField = pOp->p1;
+ pRec = &pTos[1-nField];
+ assert( pRec>=p->aStack );
+ nByte = 0;
+ for(j=0, i=0; i<nField; i++, j++, pRec++){
+ int flags = pRec->flags;
+ int len;
+ char *z;
+ if( flags & MEM_Null ){
+ nByte += 2;
+ containsNull = 1;
+ }else if( pOp->p3 && pOp->p3[j]=='t' ){
+ Stringify(pRec);
+ pRec->flags &= ~(MEM_Int|MEM_Real);
+ nByte += pRec->n+1;
+ }else if( (flags & (MEM_Real|MEM_Int))!=0 || sqliteIsNumber(pRec->z) ){
+ if( (flags & (MEM_Real|MEM_Int))==MEM_Int ){
+ pRec->r = pRec->i;
+ }else if( (flags & (MEM_Real|MEM_Int))==0 ){
+ pRec->r = sqliteAtoF(pRec->z, 0);
+ }
+ Release(pRec);
+ z = pRec->zShort;
+ sqliteRealToSortable(pRec->r, z);
+ len = strlen(z);
+ pRec->z = 0;
+ pRec->flags = MEM_Real;
+ pRec->n = len+1;
+ nByte += pRec->n+1;
+ }else{
+ nByte += pRec->n+1;
+ }
+ }
+ if( nByte+sizeof(u32)>MAX_BYTES_PER_ROW ){
+ rc = SQLITE_TOOBIG;
+ goto abort_due_to_error;
+ }
+ if( addRowid ) nByte += sizeof(u32);
+ if( nByte<=NBFS ){
+ zNewKey = zTemp;
+ }else{
+ zNewKey = sqliteMallocRaw( nByte );
+ if( zNewKey==0 ) goto no_mem;
+ }
+ j = 0;
+ pRec = &pTos[1-nField];
+ for(i=0; i<nField; i++, pRec++){
+ if( pRec->flags & MEM_Null ){
+ zNewKey[j++] = 'a';
+ zNewKey[j++] = 0;
+ }else if( pRec->flags==MEM_Real ){
+ zNewKey[j++] = 'b';
+ memcpy(&zNewKey[j], pRec->zShort, pRec->n);
+ j += pRec->n;
+ }else{
+ assert( pRec->flags & MEM_Str );
+ zNewKey[j++] = 'c';
+ memcpy(&zNewKey[j], pRec->z, pRec->n);
+ j += pRec->n;
+ }
+ }
+ if( addRowid ){
+ u32 iKey;
+ pRec = &pTos[-nField];
+ assert( pRec>=p->aStack );
+ Integerify(pRec);
+ iKey = intToKey(pRec->i);
+ memcpy(&zNewKey[j], &iKey, sizeof(u32));
+ popStack(&pTos, nField+1);
+ if( pOp->p2 && containsNull ) pc = pOp->p2 - 1;
+ }else{
+ if( pOp->p2==0 ) popStack(&pTos, nField);
+ }
+ pTos++;
+ pTos->n = nByte;
+ if( nByte<=NBFS ){
+ assert( zNewKey==zTemp );
+ pTos->z = pTos->zShort;
+ memcpy(pTos->zShort, zTemp, nByte);
+ pTos->flags = MEM_Str | MEM_Short;
+ }else{
+ pTos->z = zNewKey;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ }
+ break;
+}
+
+/* Opcode: IncrKey * * *
+**
+** The top of the stack should contain an index key generated by
+** The MakeKey opcode. This routine increases the least significant
+** byte of that key by one. This is used so that the MoveTo opcode
+** will move to the first entry greater than the key rather than to
+** the key itself.
+*/
+case OP_IncrKey: {
+ assert( pTos>=p->aStack );
+ /* The IncrKey opcode is only applied to keys generated by
+ ** MakeKey or MakeIdxKey and the results of those operands
+ ** are always dynamic strings or zShort[] strings. So we
+ ** are always free to modify the string in place.
+ */
+ assert( pTos->flags & (MEM_Dyn|MEM_Short) );
+ pTos->z[pTos->n-1]++;
+ break;
+}
+
+/* Opcode: Checkpoint P1 * *
+**
+** Begin a checkpoint. A checkpoint is the beginning of a operation that
+** is part of a larger transaction but which might need to be rolled back
+** itself without effecting the containing transaction. A checkpoint will
+** be automatically committed or rollback when the VDBE halts.
+**
+** The checkpoint is begun on the database file with index P1. The main
+** database file has an index of 0 and the file used for temporary tables
+** has an index of 1.
+*/
+case OP_Checkpoint: {
+ int i = pOp->p1;
+ if( i>=0 && i<db->nDb && db->aDb[i].pBt && db->aDb[i].inTrans==1 ){
+ rc = sqliteBtreeBeginCkpt(db->aDb[i].pBt);
+ if( rc==SQLITE_OK ) db->aDb[i].inTrans = 2;
+ }
+ break;
+}
+
+/* Opcode: Transaction P1 * *
+**
+** Begin a transaction. The transaction ends when a Commit or Rollback
+** opcode is encountered. Depending on the ON CONFLICT setting, the
+** transaction might also be rolled back if an error is encountered.
+**
+** P1 is the index of the database file on which the transaction is
+** started. Index 0 is the main database file and index 1 is the
+** file used for temporary tables.
+**
+** A write lock is obtained on the database file when a transaction is
+** started. No other process can read or write the file while the
+** transaction is underway. Starting a transaction also creates a
+** rollback journal. A transaction must be started before any changes
+** can be made to the database.
+*/
+case OP_Transaction: {
+ int busy = 1;
+ int i = pOp->p1;
+ assert( i>=0 && i<db->nDb );
+ if( db->aDb[i].inTrans ) break;
+ while( db->aDb[i].pBt!=0 && busy ){
+ rc = sqliteBtreeBeginTrans(db->aDb[i].pBt);
+ switch( rc ){
+ case SQLITE_BUSY: {
+ if( db->xBusyCallback==0 ){
+ p->pc = pc;
+ p->undoTransOnError = 1;
+ p->rc = SQLITE_BUSY;
+ p->pTos = pTos;
+ return SQLITE_BUSY;
+ }else if( (*db->xBusyCallback)(db->pBusyArg, "", busy++)==0 ){
+ sqliteSetString(&p->zErrMsg, sqlite_error_string(rc), (char*)0);
+ busy = 0;
+ }
+ break;
+ }
+ case SQLITE_READONLY: {
+ rc = SQLITE_OK;
+ /* Fall thru into the next case */
+ }
+ case SQLITE_OK: {
+ p->inTempTrans = 0;
+ busy = 0;
+ break;
+ }
+ default: {
+ goto abort_due_to_error;
+ }
+ }
+ }
+ db->aDb[i].inTrans = 1;
+ p->undoTransOnError = 1;
+ break;
+}
+
+/* Opcode: Commit * * *
+**
+** Cause all modifications to the database that have been made since the
+** last Transaction to actually take effect. No additional modifications
+** are allowed until another transaction is started. The Commit instruction
+** deletes the journal file and releases the write lock on the database.
+** A read lock continues to be held if there are still cursors open.
+*/
+case OP_Commit: {
+ int i;
+ if( db->xCommitCallback!=0 ){
+ if( sqliteSafetyOff(db) ) goto abort_due_to_misuse;
+ if( db->xCommitCallback(db->pCommitArg)!=0 ){
+ rc = SQLITE_CONSTRAINT;
+ }
+ if( sqliteSafetyOn(db) ) goto abort_due_to_misuse;
+ }
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ if( db->aDb[i].inTrans ){
+ rc = sqliteBtreeCommit(db->aDb[i].pBt);
+ db->aDb[i].inTrans = 0;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ sqliteCommitInternalChanges(db);
+ }else{
+ sqliteRollbackAll(db);
+ }
+ break;
+}
+
+/* Opcode: Rollback P1 * *
+**
+** Cause all modifications to the database that have been made since the
+** last Transaction to be undone. The database is restored to its state
+** before the Transaction opcode was executed. No additional modifications
+** are allowed until another transaction is started.
+**
+** P1 is the index of the database file that is committed. An index of 0
+** is used for the main database and an index of 1 is used for the file used
+** to hold temporary tables.
+**
+** This instruction automatically closes all cursors and releases both
+** the read and write locks on the indicated database.
+*/
+case OP_Rollback: {
+ sqliteRollbackAll(db);
+ break;
+}
+
+/* Opcode: ReadCookie P1 P2 *
+**
+** Read cookie number P2 from database P1 and push it onto the stack.
+** P2==0 is the schema version. P2==1 is the database format.
+** P2==2 is the recommended pager cache size, and so forth. P1==0 is
+** the main database file and P1==1 is the database file used to store
+** temporary tables.
+**
+** There must be a read-lock on the database (either a transaction
+** must be started or there must be an open cursor) before
+** executing this instruction.
+*/
+case OP_ReadCookie: {
+ int aMeta[SQLITE_N_BTREE_META];
+ assert( pOp->p2<SQLITE_N_BTREE_META );
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( db->aDb[pOp->p1].pBt!=0 );
+ rc = sqliteBtreeGetMeta(db->aDb[pOp->p1].pBt, aMeta);
+ pTos++;
+ pTos->i = aMeta[1+pOp->p2];
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: SetCookie P1 P2 *
+**
+** Write the top of the stack into cookie number P2 of database P1.
+** P2==0 is the schema version. P2==1 is the database format.
+** P2==2 is the recommended pager cache size, and so forth. P1==0 is
+** the main database file and P1==1 is the database file used to store
+** temporary tables.
+**
+** A transaction must be started before executing this opcode.
+*/
+case OP_SetCookie: {
+ int aMeta[SQLITE_N_BTREE_META];
+ assert( pOp->p2<SQLITE_N_BTREE_META );
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( db->aDb[pOp->p1].pBt!=0 );
+ assert( pTos>=p->aStack );
+ Integerify(pTos)
+ rc = sqliteBtreeGetMeta(db->aDb[pOp->p1].pBt, aMeta);
+ if( rc==SQLITE_OK ){
+ aMeta[1+pOp->p2] = pTos->i;
+ rc = sqliteBtreeUpdateMeta(db->aDb[pOp->p1].pBt, aMeta);
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: VerifyCookie P1 P2 *
+**
+** Check the value of global database parameter number 0 (the
+** schema version) and make sure it is equal to P2.
+** P1 is the database number which is 0 for the main database file
+** and 1 for the file holding temporary tables and some higher number
+** for auxiliary databases.
+**
+** The cookie changes its value whenever the database schema changes.
+** This operation is used to detect when that the cookie has changed
+** and that the current process needs to reread the schema.
+**
+** Either a transaction needs to have been started or an OP_Open needs
+** to be executed (to establish a read lock) before this opcode is
+** invoked.
+*/
+case OP_VerifyCookie: {
+ int aMeta[SQLITE_N_BTREE_META];
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ rc = sqliteBtreeGetMeta(db->aDb[pOp->p1].pBt, aMeta);
+ if( rc==SQLITE_OK && aMeta[1]!=pOp->p2 ){
+ sqliteSetString(&p->zErrMsg, "database schema has changed", (char*)0);
+ rc = SQLITE_SCHEMA;
+ }
+ break;
+}
+
+/* Opcode: OpenRead P1 P2 P3
+**
+** Open a read-only cursor for the database table whose root page is
+** P2 in a database file. The database file is determined by an
+** integer from the top of the stack. 0 means the main database and
+** 1 means the database used for temporary tables. Give the new
+** cursor an identifier of P1. The P1 values need not be contiguous
+** but all P1 values should be small integers. It is an error for
+** P1 to be negative.
+**
+** If P2==0 then take the root page number from the next of the stack.
+**
+** There will be a read lock on the database whenever there is an
+** open cursor. If the database was unlocked prior to this instruction
+** then a read lock is acquired as part of this instruction. A read
+** lock allows other processes to read the database but prohibits
+** any other process from modifying the database. The read lock is
+** released when all cursors are closed. If this instruction attempts
+** to get a read lock but fails, the script terminates with an
+** SQLITE_BUSY error code.
+**
+** The P3 value is the name of the table or index being opened.
+** The P3 value is not actually used by this opcode and may be
+** omitted. But the code generator usually inserts the index or
+** table name into P3 to make the code easier to read.
+**
+** See also OpenWrite.
+*/
+/* Opcode: OpenWrite P1 P2 P3
+**
+** Open a read/write cursor named P1 on the table or index whose root
+** page is P2. If P2==0 then take the root page number from the stack.
+**
+** The P3 value is the name of the table or index being opened.
+** The P3 value is not actually used by this opcode and may be
+** omitted. But the code generator usually inserts the index or
+** table name into P3 to make the code easier to read.
+**
+** This instruction works just like OpenRead except that it opens the cursor
+** in read/write mode. For a given table, there can be one or more read-only
+** cursors or a single read/write cursor but not both.
+**
+** See also OpenRead.
+*/
+case OP_OpenRead:
+case OP_OpenWrite: {
+ int busy = 0;
+ int i = pOp->p1;
+ int p2 = pOp->p2;
+ int wrFlag;
+ Btree *pX;
+ int iDb;
+
+ assert( pTos>=p->aStack );
+ Integerify(pTos);
+ iDb = pTos->i;
+ pTos--;
+ assert( iDb>=0 && iDb<db->nDb );
+ pX = db->aDb[iDb].pBt;
+ assert( pX!=0 );
+ wrFlag = pOp->opcode==OP_OpenWrite;
+ if( p2<=0 ){
+ assert( pTos>=p->aStack );
+ Integerify(pTos);
+ p2 = pTos->i;
+ pTos--;
+ if( p2<2 ){
+ sqliteSetString(&p->zErrMsg, "root page number less than 2", (char*)0);
+ rc = SQLITE_INTERNAL;
+ break;
+ }
+ }
+ assert( i>=0 );
+ if( expandCursorArraySize(p, i) ) goto no_mem;
+ sqliteVdbeCleanupCursor(&p->aCsr[i]);
+ memset(&p->aCsr[i], 0, sizeof(Cursor));
+ p->aCsr[i].nullRow = 1;
+ if( pX==0 ) break;
+ do{
+ rc = sqliteBtreeCursor(pX, p2, wrFlag, &p->aCsr[i].pCursor);
+ switch( rc ){
+ case SQLITE_BUSY: {
+ if( db->xBusyCallback==0 ){
+ p->pc = pc;
+ p->rc = SQLITE_BUSY;
+ p->pTos = &pTos[1 + (pOp->p2<=0)]; /* Operands must remain on stack */
+ return SQLITE_BUSY;
+ }else if( (*db->xBusyCallback)(db->pBusyArg, pOp->p3, ++busy)==0 ){
+ sqliteSetString(&p->zErrMsg, sqlite_error_string(rc), (char*)0);
+ busy = 0;
+ }
+ break;
+ }
+ case SQLITE_OK: {
+ busy = 0;
+ break;
+ }
+ default: {
+ goto abort_due_to_error;
+ }
+ }
+ }while( busy );
+ break;
+}
+
+/* Opcode: OpenTemp P1 P2 *
+**
+** Open a new cursor to a transient table.
+** The transient cursor is always opened read/write even if
+** the main database is read-only. The transient table is deleted
+** automatically when the cursor is closed.
+**
+** The cursor points to a BTree table if P2==0 and to a BTree index
+** if P2==1. A BTree table must have an integer key and can have arbitrary
+** data. A BTree index has no data but can have an arbitrary key.
+**
+** This opcode is used for tables that exist for the duration of a single
+** SQL statement only. Tables created using CREATE TEMPORARY TABLE
+** are opened using OP_OpenRead or OP_OpenWrite. "Temporary" in the
+** context of this opcode means for the duration of a single SQL statement
+** whereas "Temporary" in the context of CREATE TABLE means for the duration
+** of the connection to the database. Same word; different meanings.
+*/
+case OP_OpenTemp: {
+ int i = pOp->p1;
+ Cursor *pCx;
+ assert( i>=0 );
+ if( expandCursorArraySize(p, i) ) goto no_mem;
+ pCx = &p->aCsr[i];
+ sqliteVdbeCleanupCursor(pCx);
+ memset(pCx, 0, sizeof(*pCx));
+ pCx->nullRow = 1;
+ rc = sqliteBtreeFactory(db, 0, 1, TEMP_PAGES, &pCx->pBt);
+
+ if( rc==SQLITE_OK ){
+ rc = sqliteBtreeBeginTrans(pCx->pBt);
+ }
+ if( rc==SQLITE_OK ){
+ if( pOp->p2 ){
+ int pgno;
+ rc = sqliteBtreeCreateIndex(pCx->pBt, &pgno);
+ if( rc==SQLITE_OK ){
+ rc = sqliteBtreeCursor(pCx->pBt, pgno, 1, &pCx->pCursor);
+ }
+ }else{
+ rc = sqliteBtreeCursor(pCx->pBt, 2, 1, &pCx->pCursor);
+ }
+ }
+ break;
+}
+
+/* Opcode: OpenPseudo P1 * *
+**
+** Open a new cursor that points to a fake table that contains a single
+** row of data. Any attempt to write a second row of data causes the
+** first row to be deleted. All data is deleted when the cursor is
+** closed.
+**
+** A pseudo-table created by this opcode is useful for holding the
+** NEW or OLD tables in a trigger.
+*/
+case OP_OpenPseudo: {
+ int i = pOp->p1;
+ Cursor *pCx;
+ assert( i>=0 );
+ if( expandCursorArraySize(p, i) ) goto no_mem;
+ pCx = &p->aCsr[i];
+ sqliteVdbeCleanupCursor(pCx);
+ memset(pCx, 0, sizeof(*pCx));
+ pCx->nullRow = 1;
+ pCx->pseudoTable = 1;
+ break;
+}
+
+/* Opcode: Close P1 * *
+**
+** Close a cursor previously opened as P1. If P1 is not
+** currently open, this instruction is a no-op.
+*/
+case OP_Close: {
+ int i = pOp->p1;
+ if( i>=0 && i<p->nCursor ){
+ sqliteVdbeCleanupCursor(&p->aCsr[i]);
+ }
+ break;
+}
+
+/* Opcode: MoveTo P1 P2 *
+**
+** Pop the top of the stack and use its value as a key. Reposition
+** cursor P1 so that it points to an entry with a matching key. If
+** the table contains no record with a matching key, then the cursor
+** is left pointing at the first record that is greater than the key.
+** If there are no records greater than the key and P2 is not zero,
+** then an immediate jump to P2 is made.
+**
+** See also: Found, NotFound, Distinct, MoveLt
+*/
+/* Opcode: MoveLt P1 P2 *
+**
+** Pop the top of the stack and use its value as a key. Reposition
+** cursor P1 so that it points to the entry with the largest key that is
+** less than the key popped from the stack.
+** If there are no records less than than the key and P2
+** is not zero then an immediate jump to P2 is made.
+**
+** See also: MoveTo
+*/
+case OP_MoveLt:
+case OP_MoveTo: {
+ int i = pOp->p1;
+ Cursor *pC;
+
+ assert( pTos>=p->aStack );
+ assert( i>=0 && i<p->nCursor );
+ pC = &p->aCsr[i];
+ if( pC->pCursor!=0 ){
+ int res, oc;
+ pC->nullRow = 0;
+ if( pTos->flags & MEM_Int ){
+ int iKey = intToKey(pTos->i);
+ if( pOp->p2==0 && pOp->opcode==OP_MoveTo ){
+ pC->movetoTarget = iKey;
+ pC->deferredMoveto = 1;
+ Release(pTos);
+ pTos--;
+ break;
+ }
+ sqliteBtreeMoveto(pC->pCursor, (char*)&iKey, sizeof(int), &res);
+ pC->lastRecno = pTos->i;
+ pC->recnoIsValid = res==0;
+ }else{
+ Stringify(pTos);
+ sqliteBtreeMoveto(pC->pCursor, pTos->z, pTos->n, &res);
+ pC->recnoIsValid = 0;
+ }
+ pC->deferredMoveto = 0;
+ sqlite_search_count++;
+ oc = pOp->opcode;
+ if( oc==OP_MoveTo && res<0 ){
+ sqliteBtreeNext(pC->pCursor, &res);
+ pC->recnoIsValid = 0;
+ if( res && pOp->p2>0 ){
+ pc = pOp->p2 - 1;
+ }
+ }else if( oc==OP_MoveLt ){
+ if( res>=0 ){
+ sqliteBtreePrevious(pC->pCursor, &res);
+ pC->recnoIsValid = 0;
+ }else{
+ /* res might be negative because the table is empty. Check to
+ ** see if this is the case.
+ */
+ int keysize;
+ res = sqliteBtreeKeySize(pC->pCursor,&keysize)!=0 || keysize==0;
+ }
+ if( res && pOp->p2>0 ){
+ pc = pOp->p2 - 1;
+ }
+ }
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: Distinct P1 P2 *
+**
+** Use the top of the stack as a string key. If a record with that key does
+** not exist in the table of cursor P1, then jump to P2. If the record
+** does already exist, then fall thru. The cursor is left pointing
+** at the record if it exists. The key is not popped from the stack.
+**
+** This operation is similar to NotFound except that this operation
+** does not pop the key from the stack.
+**
+** See also: Found, NotFound, MoveTo, IsUnique, NotExists
+*/
+/* Opcode: Found P1 P2 *
+**
+** Use the top of the stack as a string key. If a record with that key
+** does exist in table of P1, then jump to P2. If the record
+** does not exist, then fall thru. The cursor is left pointing
+** to the record if it exists. The key is popped from the stack.
+**
+** See also: Distinct, NotFound, MoveTo, IsUnique, NotExists
+*/
+/* Opcode: NotFound P1 P2 *
+**
+** Use the top of the stack as a string key. If a record with that key
+** does not exist in table of P1, then jump to P2. If the record
+** does exist, then fall thru. The cursor is left pointing to the
+** record if it exists. The key is popped from the stack.
+**
+** The difference between this operation and Distinct is that
+** Distinct does not pop the key from the stack.
+**
+** See also: Distinct, Found, MoveTo, NotExists, IsUnique
+*/
+case OP_Distinct:
+case OP_NotFound:
+case OP_Found: {
+ int i = pOp->p1;
+ int alreadyExists = 0;
+ Cursor *pC;
+ assert( pTos>=p->aStack );
+ assert( i>=0 && i<p->nCursor );
+ if( (pC = &p->aCsr[i])->pCursor!=0 ){
+ int res, rx;
+ Stringify(pTos);
+ rx = sqliteBtreeMoveto(pC->pCursor, pTos->z, pTos->n, &res);
+ alreadyExists = rx==SQLITE_OK && res==0;
+ pC->deferredMoveto = 0;
+ }
+ if( pOp->opcode==OP_Found ){
+ if( alreadyExists ) pc = pOp->p2 - 1;
+ }else{
+ if( !alreadyExists ) pc = pOp->p2 - 1;
+ }
+ if( pOp->opcode!=OP_Distinct ){
+ Release(pTos);
+ pTos--;
+ }
+ break;
+}
+
+/* Opcode: IsUnique P1 P2 *
+**
+** The top of the stack is an integer record number. Call this
+** record number R. The next on the stack is an index key created
+** using MakeIdxKey. Call it K. This instruction pops R from the
+** stack but it leaves K unchanged.
+**
+** P1 is an index. So all but the last four bytes of K are an
+** index string. The last four bytes of K are a record number.
+**
+** This instruction asks if there is an entry in P1 where the
+** index string matches K but the record number is different
+** from R. If there is no such entry, then there is an immediate
+** jump to P2. If any entry does exist where the index string
+** matches K but the record number is not R, then the record
+** number for that entry is pushed onto the stack and control
+** falls through to the next instruction.
+**
+** See also: Distinct, NotFound, NotExists, Found
+*/
+case OP_IsUnique: {
+ int i = pOp->p1;
+ Mem *pNos = &pTos[-1];
+ BtCursor *pCrsr;
+ int R;
+
+ /* Pop the value R off the top of the stack
+ */
+ assert( pNos>=p->aStack );
+ Integerify(pTos);
+ R = pTos->i;
+ pTos--;
+ assert( i>=0 && i<=p->nCursor );
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int res, rc;
+ int v; /* The record number on the P1 entry that matches K */
+ char *zKey; /* The value of K */
+ int nKey; /* Number of bytes in K */
+
+ /* Make sure K is a string and make zKey point to K
+ */
+ Stringify(pNos);
+ zKey = pNos->z;
+ nKey = pNos->n;
+ assert( nKey >= 4 );
+
+ /* Search for an entry in P1 where all but the last four bytes match K.
+ ** If there is no such entry, jump immediately to P2.
+ */
+ assert( p->aCsr[i].deferredMoveto==0 );
+ rc = sqliteBtreeMoveto(pCrsr, zKey, nKey-4, &res);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ if( res<0 ){
+ rc = sqliteBtreeNext(pCrsr, &res);
+ if( res ){
+ pc = pOp->p2 - 1;
+ break;
+ }
+ }
+ rc = sqliteBtreeKeyCompare(pCrsr, zKey, nKey-4, 4, &res);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ if( res>0 ){
+ pc = pOp->p2 - 1;
+ break;
+ }
+
+ /* At this point, pCrsr is pointing to an entry in P1 where all but
+ ** the last for bytes of the key match K. Check to see if the last
+ ** four bytes of the key are different from R. If the last four
+ ** bytes equal R then jump immediately to P2.
+ */
+ sqliteBtreeKey(pCrsr, nKey - 4, 4, (char*)&v);
+ v = keyToInt(v);
+ if( v==R ){
+ pc = pOp->p2 - 1;
+ break;
+ }
+
+ /* The last four bytes of the key are different from R. Convert the
+ ** last four bytes of the key into an integer and push it onto the
+ ** stack. (These bytes are the record number of an entry that
+ ** violates a UNIQUE constraint.)
+ */
+ pTos++;
+ pTos->i = v;
+ pTos->flags = MEM_Int;
+ }
+ break;
+}
+
+/* Opcode: NotExists P1 P2 *
+**
+** Use the top of the stack as a integer key. If a record with that key
+** does not exist in table of P1, then jump to P2. If the record
+** does exist, then fall thru. The cursor is left pointing to the
+** record if it exists. The integer key is popped from the stack.
+**
+** The difference between this operation and NotFound is that this
+** operation assumes the key is an integer and NotFound assumes it
+** is a string.
+**
+** See also: Distinct, Found, MoveTo, NotFound, IsUnique
+*/
+case OP_NotExists: {
+ int i = pOp->p1;
+ BtCursor *pCrsr;
+ assert( pTos>=p->aStack );
+ assert( i>=0 && i<p->nCursor );
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int res, rx, iKey;
+ assert( pTos->flags & MEM_Int );
+ iKey = intToKey(pTos->i);
+ rx = sqliteBtreeMoveto(pCrsr, (char*)&iKey, sizeof(int), &res);
+ p->aCsr[i].lastRecno = pTos->i;
+ p->aCsr[i].recnoIsValid = res==0;
+ p->aCsr[i].nullRow = 0;
+ if( rx!=SQLITE_OK || res!=0 ){
+ pc = pOp->p2 - 1;
+ p->aCsr[i].recnoIsValid = 0;
+ }
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: NewRecno P1 * *
+**
+** Get a new integer record number used as the key to a table.
+** The record number is not previously used as a key in the database
+** table that cursor P1 points to. The new record number is pushed
+** onto the stack.
+*/
+case OP_NewRecno: {
+ int i = pOp->p1;
+ int v = 0;
+ Cursor *pC;
+ assert( i>=0 && i<p->nCursor );
+ if( (pC = &p->aCsr[i])->pCursor==0 ){
+ v = 0;
+ }else{
+ /* The next rowid or record number (different terms for the same
+ ** thing) is obtained in a two-step algorithm.
+ **
+ ** First we attempt to find the largest existing rowid and add one
+ ** to that. But if the largest existing rowid is already the maximum
+ ** positive integer, we have to fall through to the second
+ ** probabilistic algorithm
+ **
+ ** The second algorithm is to select a rowid at random and see if
+ ** it already exists in the table. If it does not exist, we have
+ ** succeeded. If the random rowid does exist, we select a new one
+ ** and try again, up to 1000 times.
+ **
+ ** For a table with less than 2 billion entries, the probability
+ ** of not finding a unused rowid is about 1.0e-300. This is a
+ ** non-zero probability, but it is still vanishingly small and should
+ ** never cause a problem. You are much, much more likely to have a
+ ** hardware failure than for this algorithm to fail.
+ **
+ ** The analysis in the previous paragraph assumes that you have a good
+ ** source of random numbers. Is a library function like lrand48()
+ ** good enough? Maybe. Maybe not. It's hard to know whether there
+ ** might be subtle bugs is some implementations of lrand48() that
+ ** could cause problems. To avoid uncertainty, SQLite uses its own
+ ** random number generator based on the RC4 algorithm.
+ **
+ ** To promote locality of reference for repetitive inserts, the
+ ** first few attempts at chosing a random rowid pick values just a little
+ ** larger than the previous rowid. This has been shown experimentally
+ ** to double the speed of the COPY operation.
+ */
+ int res, rx, cnt, x;
+ cnt = 0;
+ if( !pC->useRandomRowid ){
+ if( pC->nextRowidValid ){
+ v = pC->nextRowid;
+ }else{
+ rx = sqliteBtreeLast(pC->pCursor, &res);
+ if( res ){
+ v = 1;
+ }else{
+ sqliteBtreeKey(pC->pCursor, 0, sizeof(v), (void*)&v);
+ v = keyToInt(v);
+ if( v==0x7fffffff ){
+ pC->useRandomRowid = 1;
+ }else{
+ v++;
+ }
+ }
+ }
+ if( v<0x7fffffff ){
+ pC->nextRowidValid = 1;
+ pC->nextRowid = v+1;
+ }else{
+ pC->nextRowidValid = 0;
+ }
+ }
+ if( pC->useRandomRowid ){
+ v = db->priorNewRowid;
+ cnt = 0;
+ do{
+ if( v==0 || cnt>2 ){
+ sqliteRandomness(sizeof(v), &v);
+ if( cnt<5 ) v &= 0xffffff;
+ }else{
+ unsigned char r;
+ sqliteRandomness(1, &r);
+ v += r + 1;
+ }
+ if( v==0 ) continue;
+ x = intToKey(v);
+ rx = sqliteBtreeMoveto(pC->pCursor, &x, sizeof(int), &res);
+ cnt++;
+ }while( cnt<1000 && rx==SQLITE_OK && res==0 );
+ db->priorNewRowid = v;
+ if( rx==SQLITE_OK && res==0 ){
+ rc = SQLITE_FULL;
+ goto abort_due_to_error;
+ }
+ }
+ pC->recnoIsValid = 0;
+ pC->deferredMoveto = 0;
+ }
+ pTos++;
+ pTos->i = v;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: PutIntKey P1 P2 *
+**
+** Write an entry into the table of cursor P1. A new entry is
+** created if it doesn't already exist or the data for an existing
+** entry is overwritten. The data is the value on the top of the
+** stack. The key is the next value down on the stack. The key must
+** be an integer. The stack is popped twice by this instruction.
+**
+** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is
+** incremented (otherwise not). If the OPFLAG_CSCHANGE flag is set,
+** then the current statement change count is incremented (otherwise not).
+** If the OPFLAG_LASTROWID flag of P2 is set, then rowid is
+** stored for subsequent return by the sqlite_last_insert_rowid() function
+** (otherwise it's unmodified).
+*/
+/* Opcode: PutStrKey P1 * *
+**
+** Write an entry into the table of cursor P1. A new entry is
+** created if it doesn't already exist or the data for an existing
+** entry is overwritten. The data is the value on the top of the
+** stack. The key is the next value down on the stack. The key must
+** be a string. The stack is popped twice by this instruction.
+**
+** P1 may not be a pseudo-table opened using the OpenPseudo opcode.
+*/
+case OP_PutIntKey:
+case OP_PutStrKey: {
+ Mem *pNos = &pTos[-1];
+ int i = pOp->p1;
+ Cursor *pC;
+ assert( pNos>=p->aStack );
+ assert( i>=0 && i<p->nCursor );
+ if( ((pC = &p->aCsr[i])->pCursor!=0 || pC->pseudoTable) ){
+ char *zKey;
+ int nKey, iKey;
+ if( pOp->opcode==OP_PutStrKey ){
+ Stringify(pNos);
+ nKey = pNos->n;
+ zKey = pNos->z;
+ }else{
+ assert( pNos->flags & MEM_Int );
+ nKey = sizeof(int);
+ iKey = intToKey(pNos->i);
+ zKey = (char*)&iKey;
+ if( pOp->p2 & OPFLAG_NCHANGE ) db->nChange++;
+ if( pOp->p2 & OPFLAG_LASTROWID ) db->lastRowid = pNos->i;
+ if( pOp->p2 & OPFLAG_CSCHANGE ) db->csChange++;
+ if( pC->nextRowidValid && pTos->i>=pC->nextRowid ){
+ pC->nextRowidValid = 0;
+ }
+ }
+ if( pTos->flags & MEM_Null ){
+ pTos->z = 0;
+ pTos->n = 0;
+ }else{
+ assert( pTos->flags & MEM_Str );
+ }
+ if( pC->pseudoTable ){
+ /* PutStrKey does not work for pseudo-tables.
+ ** The following assert makes sure we are not trying to use
+ ** PutStrKey on a pseudo-table
+ */
+ assert( pOp->opcode==OP_PutIntKey );
+ sqliteFree(pC->pData);
+ pC->iKey = iKey;
+ pC->nData = pTos->n;
+ if( pTos->flags & MEM_Dyn ){
+ pC->pData = pTos->z;
+ pTos->flags = MEM_Null;
+ }else{
+ pC->pData = sqliteMallocRaw( pC->nData );
+ if( pC->pData ){
+ memcpy(pC->pData, pTos->z, pC->nData);
+ }
+ }
+ pC->nullRow = 0;
+ }else{
+ rc = sqliteBtreeInsert(pC->pCursor, zKey, nKey, pTos->z, pTos->n);
+ }
+ pC->recnoIsValid = 0;
+ pC->deferredMoveto = 0;
+ }
+ popStack(&pTos, 2);
+ break;
+}
+
+/* Opcode: Delete P1 P2 *
+**
+** Delete the record at which the P1 cursor is currently pointing.
+**
+** The cursor will be left pointing at either the next or the previous
+** record in the table. If it is left pointing at the next record, then
+** the next Next instruction will be a no-op. Hence it is OK to delete
+** a record from within an Next loop.
+**
+** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is
+** incremented (otherwise not). If OPFLAG_CSCHANGE flag is set,
+** then the current statement change count is incremented (otherwise not).
+**
+** If P1 is a pseudo-table, then this instruction is a no-op.
+*/
+case OP_Delete: {
+ int i = pOp->p1;
+ Cursor *pC;
+ assert( i>=0 && i<p->nCursor );
+ pC = &p->aCsr[i];
+ if( pC->pCursor!=0 ){
+ sqliteVdbeCursorMoveto(pC);
+ rc = sqliteBtreeDelete(pC->pCursor);
+ pC->nextRowidValid = 0;
+ }
+ if( pOp->p2 & OPFLAG_NCHANGE ) db->nChange++;
+ if( pOp->p2 & OPFLAG_CSCHANGE ) db->csChange++;
+ break;
+}
+
+/* Opcode: SetCounts * * *
+**
+** Called at end of statement. Updates lsChange (last statement change count)
+** and resets csChange (current statement change count) to 0.
+*/
+case OP_SetCounts: {
+ db->lsChange=db->csChange;
+ db->csChange=0;
+ break;
+}
+
+/* Opcode: KeyAsData P1 P2 *
+**
+** Turn the key-as-data mode for cursor P1 either on (if P2==1) or
+** off (if P2==0). In key-as-data mode, the OP_Column opcode pulls
+** data off of the key rather than the data. This is used for
+** processing compound selects.
+*/
+case OP_KeyAsData: {
+ int i = pOp->p1;
+ assert( i>=0 && i<p->nCursor );
+ p->aCsr[i].keyAsData = pOp->p2;
+ break;
+}
+
+/* Opcode: RowData P1 * *
+**
+** Push onto the stack the complete row data for cursor P1.
+** There is no interpretation of the data. It is just copied
+** onto the stack exactly as it is found in the database file.
+**
+** If the cursor is not pointing to a valid row, a NULL is pushed
+** onto the stack.
+*/
+/* Opcode: RowKey P1 * *
+**
+** Push onto the stack the complete row key for cursor P1.
+** There is no interpretation of the key. It is just copied
+** onto the stack exactly as it is found in the database file.
+**
+** If the cursor is not pointing to a valid row, a NULL is pushed
+** onto the stack.
+*/
+case OP_RowKey:
+case OP_RowData: {
+ int i = pOp->p1;
+ Cursor *pC;
+ int n;
+
+ pTos++;
+ assert( i>=0 && i<p->nCursor );
+ pC = &p->aCsr[i];
+ if( pC->nullRow ){
+ pTos->flags = MEM_Null;
+ }else if( pC->pCursor!=0 ){
+ BtCursor *pCrsr = pC->pCursor;
+ sqliteVdbeCursorMoveto(pC);
+ if( pC->nullRow ){
+ pTos->flags = MEM_Null;
+ break;
+ }else if( pC->keyAsData || pOp->opcode==OP_RowKey ){
+ sqliteBtreeKeySize(pCrsr, &n);
+ }else{
+ sqliteBtreeDataSize(pCrsr, &n);
+ }
+ pTos->n = n;
+ if( n<=NBFS ){
+ pTos->flags = MEM_Str | MEM_Short;
+ pTos->z = pTos->zShort;
+ }else{
+ char *z = sqliteMallocRaw( n );
+ if( z==0 ) goto no_mem;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ pTos->z = z;
+ }
+ if( pC->keyAsData || pOp->opcode==OP_RowKey ){
+ sqliteBtreeKey(pCrsr, 0, n, pTos->z);
+ }else{
+ sqliteBtreeData(pCrsr, 0, n, pTos->z);
+ }
+ }else if( pC->pseudoTable ){
+ pTos->n = pC->nData;
+ pTos->z = pC->pData;
+ pTos->flags = MEM_Str|MEM_Ephem;
+ }else{
+ pTos->flags = MEM_Null;
+ }
+ break;
+}
+
+/* Opcode: Column P1 P2 *
+**
+** Interpret the data that cursor P1 points to as
+** a structure built using the MakeRecord instruction.
+** (See the MakeRecord opcode for additional information about
+** the format of the data.)
+** Push onto the stack the value of the P2-th column contained
+** in the data.
+**
+** If the KeyAsData opcode has previously executed on this cursor,
+** then the field might be extracted from the key rather than the
+** data.
+**
+** If P1 is negative, then the record is stored on the stack rather
+** than in a table. For P1==-1, the top of the stack is used.
+** For P1==-2, the next on the stack is used. And so forth. The
+** value pushed is always just a pointer into the record which is
+** stored further down on the stack. The column value is not copied.
+*/
+case OP_Column: {
+ int amt, offset, end, payloadSize;
+ int i = pOp->p1;
+ int p2 = pOp->p2;
+ Cursor *pC;
+ char *zRec;
+ BtCursor *pCrsr;
+ int idxWidth;
+ unsigned char aHdr[10];
+
+ assert( i<p->nCursor );
+ pTos++;
+ if( i<0 ){
+ assert( &pTos[i]>=p->aStack );
+ assert( pTos[i].flags & MEM_Str );
+ zRec = pTos[i].z;
+ payloadSize = pTos[i].n;
+ }else if( (pC = &p->aCsr[i])->pCursor!=0 ){
+ sqliteVdbeCursorMoveto(pC);
+ zRec = 0;
+ pCrsr = pC->pCursor;
+ if( pC->nullRow ){
+ payloadSize = 0;
+ }else if( pC->keyAsData ){
+ sqliteBtreeKeySize(pCrsr, &payloadSize);
+ }else{
+ sqliteBtreeDataSize(pCrsr, &payloadSize);
+ }
+ }else if( pC->pseudoTable ){
+ payloadSize = pC->nData;
+ zRec = pC->pData;
+ assert( payloadSize==0 || zRec!=0 );
+ }else{
+ payloadSize = 0;
+ }
+
+ /* Figure out how many bytes in the column data and where the column
+ ** data begins.
+ */
+ if( payloadSize==0 ){
+ pTos->flags = MEM_Null;
+ break;
+ }else if( payloadSize<256 ){
+ idxWidth = 1;
+ }else if( payloadSize<65536 ){
+ idxWidth = 2;
+ }else{
+ idxWidth = 3;
+ }
+
+ /* Figure out where the requested column is stored and how big it is.
+ */
+ if( payloadSize < idxWidth*(p2+1) ){
+ rc = SQLITE_CORRUPT;
+ goto abort_due_to_error;
+ }
+ if( zRec ){
+ memcpy(aHdr, &zRec[idxWidth*p2], idxWidth*2);
+ }else if( pC->keyAsData ){
+ sqliteBtreeKey(pCrsr, idxWidth*p2, idxWidth*2, (char*)aHdr);
+ }else{
+ sqliteBtreeData(pCrsr, idxWidth*p2, idxWidth*2, (char*)aHdr);
+ }
+ offset = aHdr[0];
+ end = aHdr[idxWidth];
+ if( idxWidth>1 ){
+ offset |= aHdr[1]<<8;
+ end |= aHdr[idxWidth+1]<<8;
+ if( idxWidth>2 ){
+ offset |= aHdr[2]<<16;
+ end |= aHdr[idxWidth+2]<<16;
+ }
+ }
+ amt = end - offset;
+ if( amt<0 || offset<0 || end>payloadSize ){
+ rc = SQLITE_CORRUPT;
+ goto abort_due_to_error;
+ }
+
+ /* amt and offset now hold the offset to the start of data and the
+ ** amount of data. Go get the data and put it on the stack.
+ */
+ pTos->n = amt;
+ if( amt==0 ){
+ pTos->flags = MEM_Null;
+ }else if( zRec ){
+ pTos->flags = MEM_Str | MEM_Ephem;
+ pTos->z = &zRec[offset];
+ }else{
+ if( amt<=NBFS ){
+ pTos->flags = MEM_Str | MEM_Short;
+ pTos->z = pTos->zShort;
+ }else{
+ char *z = sqliteMallocRaw( amt );
+ if( z==0 ) goto no_mem;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ pTos->z = z;
+ }
+ if( pC->keyAsData ){
+ sqliteBtreeKey(pCrsr, offset, amt, pTos->z);
+ }else{
+ sqliteBtreeData(pCrsr, offset, amt, pTos->z);
+ }
+ }
+ break;
+}
+
+/* Opcode: Recno P1 * *
+**
+** Push onto the stack an integer which is the first 4 bytes of the
+** the key to the current entry in a sequential scan of the database
+** file P1. The sequential scan should have been started using the
+** Next opcode.
+*/
+case OP_Recno: {
+ int i = pOp->p1;
+ Cursor *pC;
+ int v;
+
+ assert( i>=0 && i<p->nCursor );
+ pC = &p->aCsr[i];
+ sqliteVdbeCursorMoveto(pC);
+ pTos++;
+ if( pC->recnoIsValid ){
+ v = pC->lastRecno;
+ }else if( pC->pseudoTable ){
+ v = keyToInt(pC->iKey);
+ }else if( pC->nullRow || pC->pCursor==0 ){
+ pTos->flags = MEM_Null;
+ break;
+ }else{
+ assert( pC->pCursor!=0 );
+ sqliteBtreeKey(pC->pCursor, 0, sizeof(u32), (char*)&v);
+ v = keyToInt(v);
+ }
+ pTos->i = v;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: FullKey P1 * *
+**
+** Extract the complete key from the record that cursor P1 is currently
+** pointing to and push the key onto the stack as a string.
+**
+** Compare this opcode to Recno. The Recno opcode extracts the first
+** 4 bytes of the key and pushes those bytes onto the stack as an
+** integer. This instruction pushes the entire key as a string.
+**
+** This opcode may not be used on a pseudo-table.
+*/
+case OP_FullKey: {
+ int i = pOp->p1;
+ BtCursor *pCrsr;
+
+ assert( p->aCsr[i].keyAsData );
+ assert( !p->aCsr[i].pseudoTable );
+ assert( i>=0 && i<p->nCursor );
+ pTos++;
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int amt;
+ char *z;
+
+ sqliteVdbeCursorMoveto(&p->aCsr[i]);
+ sqliteBtreeKeySize(pCrsr, &amt);
+ if( amt<=0 ){
+ rc = SQLITE_CORRUPT;
+ goto abort_due_to_error;
+ }
+ if( amt>NBFS ){
+ z = sqliteMallocRaw( amt );
+ if( z==0 ) goto no_mem;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ }else{
+ z = pTos->zShort;
+ pTos->flags = MEM_Str | MEM_Short;
+ }
+ sqliteBtreeKey(pCrsr, 0, amt, z);
+ pTos->z = z;
+ pTos->n = amt;
+ }
+ break;
+}
+
+/* Opcode: NullRow P1 * *
+**
+** Move the cursor P1 to a null row. Any OP_Column operations
+** that occur while the cursor is on the null row will always push
+** a NULL onto the stack.
+*/
+case OP_NullRow: {
+ int i = pOp->p1;
+
+ assert( i>=0 && i<p->nCursor );
+ p->aCsr[i].nullRow = 1;
+ p->aCsr[i].recnoIsValid = 0;
+ break;
+}
+
+/* Opcode: Last P1 P2 *
+**
+** The next use of the Recno or Column or Next instruction for P1
+** will refer to the last entry in the database table or index.
+** If the table or index is empty and P2>0, then jump immediately to P2.
+** If P2 is 0 or if the table or index is not empty, fall through
+** to the following instruction.
+*/
+case OP_Last: {
+ int i = pOp->p1;
+ Cursor *pC;
+ BtCursor *pCrsr;
+
+ assert( i>=0 && i<p->nCursor );
+ pC = &p->aCsr[i];
+ if( (pCrsr = pC->pCursor)!=0 ){
+ int res;
+ rc = sqliteBtreeLast(pCrsr, &res);
+ pC->nullRow = res;
+ pC->deferredMoveto = 0;
+ if( res && pOp->p2>0 ){
+ pc = pOp->p2 - 1;
+ }
+ }else{
+ pC->nullRow = 0;
+ }
+ break;
+}
+
+/* Opcode: Rewind P1 P2 *
+**
+** The next use of the Recno or Column or Next instruction for P1
+** will refer to the first entry in the database table or index.
+** If the table or index is empty and P2>0, then jump immediately to P2.
+** If P2 is 0 or if the table or index is not empty, fall through
+** to the following instruction.
+*/
+case OP_Rewind: {
+ int i = pOp->p1;
+ Cursor *pC;
+ BtCursor *pCrsr;
+
+ assert( i>=0 && i<p->nCursor );
+ pC = &p->aCsr[i];
+ if( (pCrsr = pC->pCursor)!=0 ){
+ int res;
+ rc = sqliteBtreeFirst(pCrsr, &res);
+ pC->atFirst = res==0;
+ pC->nullRow = res;
+ pC->deferredMoveto = 0;
+ if( res && pOp->p2>0 ){
+ pc = pOp->p2 - 1;
+ }
+ }else{
+ pC->nullRow = 0;
+ }
+ break;
+}
+
+/* Opcode: Next P1 P2 *
+**
+** Advance cursor P1 so that it points to the next key/data pair in its
+** table or index. If there are no more key/value pairs then fall through
+** to the following instruction. But if the cursor advance was successful,
+** jump immediately to P2.
+**
+** See also: Prev
+*/
+/* Opcode: Prev P1 P2 *
+**
+** Back up cursor P1 so that it points to the previous key/data pair in its
+** table or index. If there is no previous key/value pairs then fall through
+** to the following instruction. But if the cursor backup was successful,
+** jump immediately to P2.
+*/
+case OP_Prev:
+case OP_Next: {
+ Cursor *pC;
+ BtCursor *pCrsr;
+
+ CHECK_FOR_INTERRUPT;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = &p->aCsr[pOp->p1];
+ if( (pCrsr = pC->pCursor)!=0 ){
+ int res;
+ if( pC->nullRow ){
+ res = 1;
+ }else{
+ assert( pC->deferredMoveto==0 );
+ rc = pOp->opcode==OP_Next ? sqliteBtreeNext(pCrsr, &res) :
+ sqliteBtreePrevious(pCrsr, &res);
+ pC->nullRow = res;
+ }
+ if( res==0 ){
+ pc = pOp->p2 - 1;
+ sqlite_search_count++;
+ }
+ }else{
+ pC->nullRow = 1;
+ }
+ pC->recnoIsValid = 0;
+ break;
+}
+
+/* Opcode: IdxPut P1 P2 P3
+**
+** The top of the stack holds a SQL index key made using the
+** MakeIdxKey instruction. This opcode writes that key into the
+** index P1. Data for the entry is nil.
+**
+** If P2==1, then the key must be unique. If the key is not unique,
+** the program aborts with a SQLITE_CONSTRAINT error and the database
+** is rolled back. If P3 is not null, then it becomes part of the
+** error message returned with the SQLITE_CONSTRAINT.
+*/
+case OP_IdxPut: {
+ int i = pOp->p1;
+ BtCursor *pCrsr;
+ assert( pTos>=p->aStack );
+ assert( i>=0 && i<p->nCursor );
+ assert( pTos->flags & MEM_Str );
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int nKey = pTos->n;
+ const char *zKey = pTos->z;
+ if( pOp->p2 ){
+ int res, n;
+ assert( nKey >= 4 );
+ rc = sqliteBtreeMoveto(pCrsr, zKey, nKey-4, &res);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ while( res!=0 ){
+ int c;
+ sqliteBtreeKeySize(pCrsr, &n);
+ if( n==nKey
+ && sqliteBtreeKeyCompare(pCrsr, zKey, nKey-4, 4, &c)==SQLITE_OK
+ && c==0
+ ){
+ rc = SQLITE_CONSTRAINT;
+ if( pOp->p3 && pOp->p3[0] ){
+ sqliteSetString(&p->zErrMsg, pOp->p3, (char*)0);
+ }
+ goto abort_due_to_error;
+ }
+ if( res<0 ){
+ sqliteBtreeNext(pCrsr, &res);
+ res = +1;
+ }else{
+ break;
+ }
+ }
+ }
+ rc = sqliteBtreeInsert(pCrsr, zKey, nKey, "", 0);
+ assert( p->aCsr[i].deferredMoveto==0 );
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: IdxDelete P1 * *
+**
+** The top of the stack is an index key built using the MakeIdxKey opcode.
+** This opcode removes that entry from the index.
+*/
+case OP_IdxDelete: {
+ int i = pOp->p1;
+ BtCursor *pCrsr;
+ assert( pTos>=p->aStack );
+ assert( pTos->flags & MEM_Str );
+ assert( i>=0 && i<p->nCursor );
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int rx, res;
+ rx = sqliteBtreeMoveto(pCrsr, pTos->z, pTos->n, &res);
+ if( rx==SQLITE_OK && res==0 ){
+ rc = sqliteBtreeDelete(pCrsr);
+ }
+ assert( p->aCsr[i].deferredMoveto==0 );
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: IdxRecno P1 * *
+**
+** Push onto the stack an integer which is the last 4 bytes of the
+** the key to the current entry in index P1. These 4 bytes should
+** be the record number of the table entry to which this index entry
+** points.
+**
+** See also: Recno, MakeIdxKey.
+*/
+case OP_IdxRecno: {
+ int i = pOp->p1;
+ BtCursor *pCrsr;
+
+ assert( i>=0 && i<p->nCursor );
+ pTos++;
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int v;
+ int sz;
+ assert( p->aCsr[i].deferredMoveto==0 );
+ sqliteBtreeKeySize(pCrsr, &sz);
+ if( sz<sizeof(u32) ){
+ pTos->flags = MEM_Null;
+ }else{
+ sqliteBtreeKey(pCrsr, sz - sizeof(u32), sizeof(u32), (char*)&v);
+ v = keyToInt(v);
+ pTos->i = v;
+ pTos->flags = MEM_Int;
+ }
+ }else{
+ pTos->flags = MEM_Null;
+ }
+ break;
+}
+
+/* Opcode: IdxGT P1 P2 *
+**
+** Compare the top of the stack against the key on the index entry that
+** cursor P1 is currently pointing to. Ignore the last 4 bytes of the
+** index entry. If the index entry is greater than the top of the stack
+** then jump to P2. Otherwise fall through to the next instruction.
+** In either case, the stack is popped once.
+*/
+/* Opcode: IdxGE P1 P2 *
+**
+** Compare the top of the stack against the key on the index entry that
+** cursor P1 is currently pointing to. Ignore the last 4 bytes of the
+** index entry. If the index entry is greater than or equal to
+** the top of the stack
+** then jump to P2. Otherwise fall through to the next instruction.
+** In either case, the stack is popped once.
+*/
+/* Opcode: IdxLT P1 P2 *
+**
+** Compare the top of the stack against the key on the index entry that
+** cursor P1 is currently pointing to. Ignore the last 4 bytes of the
+** index entry. If the index entry is less than the top of the stack
+** then jump to P2. Otherwise fall through to the next instruction.
+** In either case, the stack is popped once.
+*/
+case OP_IdxLT:
+case OP_IdxGT:
+case OP_IdxGE: {
+ int i= pOp->p1;
+ BtCursor *pCrsr;
+
+ assert( i>=0 && i<p->nCursor );
+ assert( pTos>=p->aStack );
+ if( (pCrsr = p->aCsr[i].pCursor)!=0 ){
+ int res, rc;
+
+ Stringify(pTos);
+ assert( p->aCsr[i].deferredMoveto==0 );
+ rc = sqliteBtreeKeyCompare(pCrsr, pTos->z, pTos->n, 4, &res);
+ if( rc!=SQLITE_OK ){
+ break;
+ }
+ if( pOp->opcode==OP_IdxLT ){
+ res = -res;
+ }else if( pOp->opcode==OP_IdxGE ){
+ res++;
+ }
+ if( res>0 ){
+ pc = pOp->p2 - 1 ;
+ }
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: IdxIsNull P1 P2 *
+**
+** The top of the stack contains an index entry such as might be generated
+** by the MakeIdxKey opcode. This routine looks at the first P1 fields of
+** that key. If any of the first P1 fields are NULL, then a jump is made
+** to address P2. Otherwise we fall straight through.
+**
+** The index entry is always popped from the stack.
+*/
+case OP_IdxIsNull: {
+ int i = pOp->p1;
+ int k, n;
+ const char *z;
+
+ assert( pTos>=p->aStack );
+ assert( pTos->flags & MEM_Str );
+ z = pTos->z;
+ n = pTos->n;
+ for(k=0; k<n && i>0; i--){
+ if( z[k]=='a' ){
+ pc = pOp->p2-1;
+ break;
+ }
+ while( k<n && z[k] ){ k++; }
+ k++;
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: Destroy P1 P2 *
+**
+** Delete an entire database table or index whose root page in the database
+** file is given by P1.
+**
+** The table being destroyed is in the main database file if P2==0. If
+** P2==1 then the table to be clear is in the auxiliary database file
+** that is used to store tables create using CREATE TEMPORARY TABLE.
+**
+** See also: Clear
+*/
+case OP_Destroy: {
+ rc = sqliteBtreeDropTable(db->aDb[pOp->p2].pBt, pOp->p1);
+ break;
+}
+
+/* Opcode: Clear P1 P2 *
+**
+** Delete all contents of the database table or index whose root page
+** in the database file is given by P1. But, unlike Destroy, do not
+** remove the table or index from the database file.
+**
+** The table being clear is in the main database file if P2==0. If
+** P2==1 then the table to be clear is in the auxiliary database file
+** that is used to store tables create using CREATE TEMPORARY TABLE.
+**
+** See also: Destroy
+*/
+case OP_Clear: {
+ rc = sqliteBtreeClearTable(db->aDb[pOp->p2].pBt, pOp->p1);
+ break;
+}
+
+/* Opcode: CreateTable * P2 P3
+**
+** Allocate a new table in the main database file if P2==0 or in the
+** auxiliary database file if P2==1. Push the page number
+** for the root page of the new table onto the stack.
+**
+** The root page number is also written to a memory location that P3
+** points to. This is the mechanism is used to write the root page
+** number into the parser's internal data structures that describe the
+** new table.
+**
+** The difference between a table and an index is this: A table must
+** have a 4-byte integer key and can have arbitrary data. An index
+** has an arbitrary key but no data.
+**
+** See also: CreateIndex
+*/
+/* Opcode: CreateIndex * P2 P3
+**
+** Allocate a new index in the main database file if P2==0 or in the
+** auxiliary database file if P2==1. Push the page number of the
+** root page of the new index onto the stack.
+**
+** See documentation on OP_CreateTable for additional information.
+*/
+case OP_CreateIndex:
+case OP_CreateTable: {
+ int pgno;
+ assert( pOp->p3!=0 && pOp->p3type==P3_POINTER );
+ assert( pOp->p2>=0 && pOp->p2<db->nDb );
+ assert( db->aDb[pOp->p2].pBt!=0 );
+ if( pOp->opcode==OP_CreateTable ){
+ rc = sqliteBtreeCreateTable(db->aDb[pOp->p2].pBt, &pgno);
+ }else{
+ rc = sqliteBtreeCreateIndex(db->aDb[pOp->p2].pBt, &pgno);
+ }
+ pTos++;
+ if( rc==SQLITE_OK ){
+ pTos->i = pgno;
+ pTos->flags = MEM_Int;
+ *(u32*)pOp->p3 = pgno;
+ pOp->p3 = 0;
+ }else{
+ pTos->flags = MEM_Null;
+ }
+ break;
+}
+
+/* Opcode: IntegrityCk P1 P2 *
+**
+** Do an analysis of the currently open database. Push onto the
+** stack the text of an error message describing any problems.
+** If there are no errors, push a "ok" onto the stack.
+**
+** P1 is the index of a set that contains the root page numbers
+** for all tables and indices in the main database file. The set
+** is cleared by this opcode. In other words, after this opcode
+** has executed, the set will be empty.
+**
+** If P2 is not zero, the check is done on the auxiliary database
+** file, not the main database file.
+**
+** This opcode is used for testing purposes only.
+*/
+case OP_IntegrityCk: {
+ int nRoot;
+ int *aRoot;
+ int iSet = pOp->p1;
+ Set *pSet;
+ int j;
+ HashElem *i;
+ char *z;
+
+ assert( iSet>=0 && iSet<p->nSet );
+ pTos++;
+ pSet = &p->aSet[iSet];
+ nRoot = sqliteHashCount(&pSet->hash);
+ aRoot = sqliteMallocRaw( sizeof(int)*(nRoot+1) );
+ if( aRoot==0 ) goto no_mem;
+ for(j=0, i=sqliteHashFirst(&pSet->hash); i; i=sqliteHashNext(i), j++){
+ toInt((char*)sqliteHashKey(i), &aRoot[j]);
+ }
+ aRoot[j] = 0;
+ sqliteHashClear(&pSet->hash);
+ pSet->prev = 0;
+ z = sqliteBtreeIntegrityCheck(db->aDb[pOp->p2].pBt, aRoot, nRoot);
+ if( z==0 || z[0]==0 ){
+ if( z ) sqliteFree(z);
+ pTos->z = "ok";
+ pTos->n = 3;
+ pTos->flags = MEM_Str | MEM_Static;
+ }else{
+ pTos->z = z;
+ pTos->n = strlen(z) + 1;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ }
+ sqliteFree(aRoot);
+ break;
+}
+
+/* Opcode: ListWrite * * *
+**
+** Write the integer on the top of the stack
+** into the temporary storage list.
+*/
+case OP_ListWrite: {
+ Keylist *pKeylist;
+ assert( pTos>=p->aStack );
+ pKeylist = p->pList;
+ if( pKeylist==0 || pKeylist->nUsed>=pKeylist->nKey ){
+ pKeylist = sqliteMallocRaw( sizeof(Keylist)+999*sizeof(pKeylist->aKey[0]) );
+ if( pKeylist==0 ) goto no_mem;
+ pKeylist->nKey = 1000;
+ pKeylist->nRead = 0;
+ pKeylist->nUsed = 0;
+ pKeylist->pNext = p->pList;
+ p->pList = pKeylist;
+ }
+ Integerify(pTos);
+ pKeylist->aKey[pKeylist->nUsed++] = pTos->i;
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: ListRewind * * *
+**
+** Rewind the temporary buffer back to the beginning.
+*/
+case OP_ListRewind: {
+ /* What this opcode codes, really, is reverse the order of the
+ ** linked list of Keylist structures so that they are read out
+ ** in the same order that they were read in. */
+ Keylist *pRev, *pTop;
+ pRev = 0;
+ while( p->pList ){
+ pTop = p->pList;
+ p->pList = pTop->pNext;
+ pTop->pNext = pRev;
+ pRev = pTop;
+ }
+ p->pList = pRev;
+ break;
+}
+
+/* Opcode: ListRead * P2 *
+**
+** Attempt to read an integer from the temporary storage buffer
+** and push it onto the stack. If the storage buffer is empty,
+** push nothing but instead jump to P2.
+*/
+case OP_ListRead: {
+ Keylist *pKeylist;
+ CHECK_FOR_INTERRUPT;
+ pKeylist = p->pList;
+ if( pKeylist!=0 ){
+ assert( pKeylist->nRead>=0 );
+ assert( pKeylist->nRead<pKeylist->nUsed );
+ assert( pKeylist->nRead<pKeylist->nKey );
+ pTos++;
+ pTos->i = pKeylist->aKey[pKeylist->nRead++];
+ pTos->flags = MEM_Int;
+ if( pKeylist->nRead>=pKeylist->nUsed ){
+ p->pList = pKeylist->pNext;
+ sqliteFree(pKeylist);
+ }
+ }else{
+ pc = pOp->p2 - 1;
+ }
+ break;
+}
+
+/* Opcode: ListReset * * *
+**
+** Reset the temporary storage buffer so that it holds nothing.
+*/
+case OP_ListReset: {
+ if( p->pList ){
+ sqliteVdbeKeylistFree(p->pList);
+ p->pList = 0;
+ }
+ break;
+}
+
+/* Opcode: ListPush * * *
+**
+** Save the current Vdbe list such that it can be restored by a ListPop
+** opcode. The list is empty after this is executed.
+*/
+case OP_ListPush: {
+ p->keylistStackDepth++;
+ assert(p->keylistStackDepth > 0);
+ p->keylistStack = sqliteRealloc(p->keylistStack,
+ sizeof(Keylist *) * p->keylistStackDepth);
+ if( p->keylistStack==0 ) goto no_mem;
+ p->keylistStack[p->keylistStackDepth - 1] = p->pList;
+ p->pList = 0;
+ break;
+}
+
+/* Opcode: ListPop * * *
+**
+** Restore the Vdbe list to the state it was in when ListPush was last
+** executed.
+*/
+case OP_ListPop: {
+ assert(p->keylistStackDepth > 0);
+ p->keylistStackDepth--;
+ sqliteVdbeKeylistFree(p->pList);
+ p->pList = p->keylistStack[p->keylistStackDepth];
+ p->keylistStack[p->keylistStackDepth] = 0;
+ if( p->keylistStackDepth == 0 ){
+ sqliteFree(p->keylistStack);
+ p->keylistStack = 0;
+ }
+ break;
+}
+
+/* Opcode: ContextPush * * *
+**
+** Save the current Vdbe context such that it can be restored by a ContextPop
+** opcode. The context stores the last insert row id, the last statement change
+** count, and the current statement change count.
+*/
+case OP_ContextPush: {
+ p->contextStackDepth++;
+ assert(p->contextStackDepth > 0);
+ p->contextStack = sqliteRealloc(p->contextStack,
+ sizeof(Context) * p->contextStackDepth);
+ if( p->contextStack==0 ) goto no_mem;
+ p->contextStack[p->contextStackDepth - 1].lastRowid = p->db->lastRowid;
+ p->contextStack[p->contextStackDepth - 1].lsChange = p->db->lsChange;
+ p->contextStack[p->contextStackDepth - 1].csChange = p->db->csChange;
+ break;
+}
+
+/* Opcode: ContextPop * * *
+**
+** Restore the Vdbe context to the state it was in when contextPush was last
+** executed. The context stores the last insert row id, the last statement
+** change count, and the current statement change count.
+*/
+case OP_ContextPop: {
+ assert(p->contextStackDepth > 0);
+ p->contextStackDepth--;
+ p->db->lastRowid = p->contextStack[p->contextStackDepth].lastRowid;
+ p->db->lsChange = p->contextStack[p->contextStackDepth].lsChange;
+ p->db->csChange = p->contextStack[p->contextStackDepth].csChange;
+ if( p->contextStackDepth == 0 ){
+ sqliteFree(p->contextStack);
+ p->contextStack = 0;
+ }
+ break;
+}
+
+/* Opcode: SortPut * * *
+**
+** The TOS is the key and the NOS is the data. Pop both from the stack
+** and put them on the sorter. The key and data should have been
+** made using SortMakeKey and SortMakeRec, respectively.
+*/
+case OP_SortPut: {
+ Mem *pNos = &pTos[-1];
+ Sorter *pSorter;
+ assert( pNos>=p->aStack );
+ if( Dynamicify(pTos) || Dynamicify(pNos) ) goto no_mem;
+ pSorter = sqliteMallocRaw( sizeof(Sorter) );
+ if( pSorter==0 ) goto no_mem;
+ pSorter->pNext = p->pSort;
+ p->pSort = pSorter;
+ assert( pTos->flags & MEM_Dyn );
+ pSorter->nKey = pTos->n;
+ pSorter->zKey = pTos->z;
+ assert( pNos->flags & MEM_Dyn );
+ pSorter->nData = pNos->n;
+ pSorter->pData = pNos->z;
+ pTos -= 2;
+ break;
+}
+
+/* Opcode: SortMakeRec P1 * *
+**
+** The top P1 elements are the arguments to a callback. Form these
+** elements into a single data entry that can be stored on a sorter
+** using SortPut and later fed to a callback using SortCallback.
+*/
+case OP_SortMakeRec: {
+ char *z;
+ char **azArg;
+ int nByte;
+ int nField;
+ int i;
+ Mem *pRec;
+
+ nField = pOp->p1;
+ pRec = &pTos[1-nField];
+ assert( pRec>=p->aStack );
+ nByte = 0;
+ for(i=0; i<nField; i++, pRec++){
+ if( (pRec->flags & MEM_Null)==0 ){
+ Stringify(pRec);
+ nByte += pRec->n;
+ }
+ }
+ nByte += sizeof(char*)*(nField+1);
+ azArg = sqliteMallocRaw( nByte );
+ if( azArg==0 ) goto no_mem;
+ z = (char*)&azArg[nField+1];
+ for(pRec=&pTos[1-nField], i=0; i<nField; i++, pRec++){
+ if( pRec->flags & MEM_Null ){
+ azArg[i] = 0;
+ }else{
+ azArg[i] = z;
+ memcpy(z, pRec->z, pRec->n);
+ z += pRec->n;
+ }
+ }
+ popStack(&pTos, nField);
+ pTos++;
+ pTos->n = nByte;
+ pTos->z = (char*)azArg;
+ pTos->flags = MEM_Str | MEM_Dyn;
+ break;
+}
+
+/* Opcode: SortMakeKey * * P3
+**
+** Convert the top few entries of the stack into a sort key. The
+** number of stack entries consumed is the number of characters in
+** the string P3. One character from P3 is prepended to each entry.
+** The first character of P3 is prepended to the element lowest in
+** the stack and the last character of P3 is prepended to the top of
+** the stack. All stack entries are separated by a \000 character
+** in the result. The whole key is terminated by two \000 characters
+** in a row.
+**
+** "N" is substituted in place of the P3 character for NULL values.
+**
+** See also the MakeKey and MakeIdxKey opcodes.
+*/
+case OP_SortMakeKey: {
+ char *zNewKey;
+ int nByte;
+ int nField;
+ int i, j, k;
+ Mem *pRec;
+
+ nField = strlen(pOp->p3);
+ pRec = &pTos[1-nField];
+ nByte = 1;
+ for(i=0; i<nField; i++, pRec++){
+ if( pRec->flags & MEM_Null ){
+ nByte += 2;
+ }else{
+ Stringify(pRec);
+ nByte += pRec->n+2;
+ }
+ }
+ zNewKey = sqliteMallocRaw( nByte );
+ if( zNewKey==0 ) goto no_mem;
+ j = 0;
+ k = 0;
+ for(pRec=&pTos[1-nField], i=0; i<nField; i++, pRec++){
+ if( pRec->flags & MEM_Null ){
+ zNewKey[j++] = 'N';
+ zNewKey[j++] = 0;
+ k++;
+ }else{
+ zNewKey[j++] = pOp->p3[k++];
+ memcpy(&zNewKey[j], pRec->z, pRec->n-1);
+ j += pRec->n-1;
+ zNewKey[j++] = 0;
+ }
+ }
+ zNewKey[j] = 0;
+ assert( j<nByte );
+ popStack(&pTos, nField);
+ pTos++;
+ pTos->n = nByte;
+ pTos->flags = MEM_Str|MEM_Dyn;
+ pTos->z = zNewKey;
+ break;
+}
+
+/* Opcode: Sort * * *
+**
+** Sort all elements on the sorter. The algorithm is a
+** mergesort.
+*/
+case OP_Sort: {
+ int i;
+ Sorter *pElem;
+ Sorter *apSorter[NSORT];
+ for(i=0; i<NSORT; i++){
+ apSorter[i] = 0;
+ }
+ while( p->pSort ){
+ pElem = p->pSort;
+ p->pSort = pElem->pNext;
+ pElem->pNext = 0;
+ for(i=0; i<NSORT-1; i++){
+ if( apSorter[i]==0 ){
+ apSorter[i] = pElem;
+ break;
+ }else{
+ pElem = Merge(apSorter[i], pElem);
+ apSorter[i] = 0;
+ }
+ }
+ if( i>=NSORT-1 ){
+ apSorter[NSORT-1] = Merge(apSorter[NSORT-1],pElem);
+ }
+ }
+ pElem = 0;
+ for(i=0; i<NSORT; i++){
+ pElem = Merge(apSorter[i], pElem);
+ }
+ p->pSort = pElem;
+ break;
+}
+
+/* Opcode: SortNext * P2 *
+**
+** Push the data for the topmost element in the sorter onto the
+** stack, then remove the element from the sorter. If the sorter
+** is empty, push nothing on the stack and instead jump immediately
+** to instruction P2.
+*/
+case OP_SortNext: {
+ Sorter *pSorter = p->pSort;
+ CHECK_FOR_INTERRUPT;
+ if( pSorter!=0 ){
+ p->pSort = pSorter->pNext;
+ pTos++;
+ pTos->z = pSorter->pData;
+ pTos->n = pSorter->nData;
+ pTos->flags = MEM_Str|MEM_Dyn;
+ sqliteFree(pSorter->zKey);
+ sqliteFree(pSorter);
+ }else{
+ pc = pOp->p2 - 1;
+ }
+ break;
+}
+
+/* Opcode: SortCallback P1 * *
+**
+** The top of the stack contains a callback record built using
+** the SortMakeRec operation with the same P1 value as this
+** instruction. Pop this record from the stack and invoke the
+** callback on it.
+*/
+case OP_SortCallback: {
+ assert( pTos>=p->aStack );
+ assert( pTos->flags & MEM_Str );
+ p->nCallback++;
+ p->pc = pc+1;
+ p->azResColumn = (char**)pTos->z;
+ assert( p->nResColumn==pOp->p1 );
+ p->popStack = 1;
+ p->pTos = pTos;
+ return SQLITE_ROW;
+}
+
+/* Opcode: SortReset * * *
+**
+** Remove any elements that remain on the sorter.
+*/
+case OP_SortReset: {
+ sqliteVdbeSorterReset(p);
+ break;
+}
+
+/* Opcode: FileOpen * * P3
+**
+** Open the file named by P3 for reading using the FileRead opcode.
+** If P3 is "stdin" then open standard input for reading.
+*/
+case OP_FileOpen: {
+ assert( pOp->p3!=0 );
+ if( p->pFile ){
+ if( p->pFile!=stdin ) fclose(p->pFile);
+ p->pFile = 0;
+ }
+ if( sqliteStrICmp(pOp->p3,"stdin")==0 ){
+ p->pFile = stdin;
+ }else{
+ p->pFile = fopen(pOp->p3, "r");
+ }
+ if( p->pFile==0 ){
+ sqliteSetString(&p->zErrMsg,"unable to open file: ", pOp->p3, (char*)0);
+ rc = SQLITE_ERROR;
+ }
+ break;
+}
+
+/* Opcode: FileRead P1 P2 P3
+**
+** Read a single line of input from the open file (the file opened using
+** FileOpen). If we reach end-of-file, jump immediately to P2. If
+** we are able to get another line, split the line apart using P3 as
+** a delimiter. There should be P1 fields. If the input line contains
+** more than P1 fields, ignore the excess. If the input line contains
+** fewer than P1 fields, assume the remaining fields contain NULLs.
+**
+** Input ends if a line consists of just "\.". A field containing only
+** "\N" is a null field. The backslash \ character can be used be used
+** to escape newlines or the delimiter.
+*/
+case OP_FileRead: {
+ int n, eol, nField, i, c, nDelim;
+ char *zDelim, *z;
+ CHECK_FOR_INTERRUPT;
+ if( p->pFile==0 ) goto fileread_jump;
+ nField = pOp->p1;
+ if( nField<=0 ) goto fileread_jump;
+ if( nField!=p->nField || p->azField==0 ){
+ char **azField = sqliteRealloc(p->azField, sizeof(char*)*nField+1);
+ if( azField==0 ){ goto no_mem; }
+ p->azField = azField;
+ p->nField = nField;
+ }
+ n = 0;
+ eol = 0;
+ while( eol==0 ){
+ if( p->zLine==0 || n+200>p->nLineAlloc ){
+ char *zLine;
+ p->nLineAlloc = p->nLineAlloc*2 + 300;
+ zLine = sqliteRealloc(p->zLine, p->nLineAlloc);
+ if( zLine==0 ){
+ p->nLineAlloc = 0;
+ sqliteFree(p->zLine);
+ p->zLine = 0;
+ goto no_mem;
+ }
+ p->zLine = zLine;
+ }
+ if( vdbe_fgets(&p->zLine[n], p->nLineAlloc-n, p->pFile)==0 ){
+ eol = 1;
+ p->zLine[n] = 0;
+ }else{
+ int c;
+ while( (c = p->zLine[n])!=0 ){
+ if( c=='\\' ){
+ if( p->zLine[n+1]==0 ) break;
+ n += 2;
+ }else if( c=='\n' ){
+ p->zLine[n] = 0;
+ eol = 1;
+ break;
+ }else{
+ n++;
+ }
+ }
+ }
+ }
+ if( n==0 ) goto fileread_jump;
+ z = p->zLine;
+ if( z[0]=='\\' && z[1]=='.' && z[2]==0 ){
+ goto fileread_jump;
+ }
+ zDelim = pOp->p3;
+ if( zDelim==0 ) zDelim = "\t";
+ c = zDelim[0];
+ nDelim = strlen(zDelim);
+ p->azField[0] = z;
+ for(i=1; *z!=0 && i<=nField; i++){
+ int from, to;
+ from = to = 0;
+ if( z[0]=='\\' && z[1]=='N'
+ && (z[2]==0 || strncmp(&z[2],zDelim,nDelim)==0) ){
+ if( i<=nField ) p->azField[i-1] = 0;
+ z += 2 + nDelim;
+ if( i<nField ) p->azField[i] = z;
+ continue;
+ }
+ while( z[from] ){
+ if( z[from]=='\\' && z[from+1]!=0 ){
+ int tx = z[from+1];
+ switch( tx ){
+ case 'b': tx = '\b'; break;
+ case 'f': tx = '\f'; break;
+ case 'n': tx = '\n'; break;
+ case 'r': tx = '\r'; break;
+ case 't': tx = '\t'; break;
+ case 'v': tx = '\v'; break;
+ default: break;
+ }
+ z[to++] = tx;
+ from += 2;
+ continue;
+ }
+ if( z[from]==c && strncmp(&z[from],zDelim,nDelim)==0 ) break;
+ z[to++] = z[from++];
+ }
+ if( z[from] ){
+ z[to] = 0;
+ z += from + nDelim;
+ if( i<nField ) p->azField[i] = z;
+ }else{
+ z[to] = 0;
+ z = "";
+ }
+ }
+ while( i<nField ){
+ p->azField[i++] = 0;
+ }
+ break;
+
+ /* If we reach end-of-file, or if anything goes wrong, jump here.
+ ** This code will cause a jump to P2 */
+fileread_jump:
+ pc = pOp->p2 - 1;
+ break;
+}
+
+/* Opcode: FileColumn P1 * *
+**
+** Push onto the stack the P1-th column of the most recently read line
+** from the input file.
+*/
+case OP_FileColumn: {
+ int i = pOp->p1;
+ char *z;
+ assert( i>=0 && i<p->nField );
+ if( p->azField ){
+ z = p->azField[i];
+ }else{
+ z = 0;
+ }
+ pTos++;
+ if( z ){
+ pTos->n = strlen(z) + 1;
+ pTos->z = z;
+ pTos->flags = MEM_Str | MEM_Ephem;
+ }else{
+ pTos->flags = MEM_Null;
+ }
+ break;
+}
+
+/* Opcode: MemStore P1 P2 *
+**
+** Write the top of the stack into memory location P1.
+** P1 should be a small integer since space is allocated
+** for all memory locations between 0 and P1 inclusive.
+**
+** After the data is stored in the memory location, the
+** stack is popped once if P2 is 1. If P2 is zero, then
+** the original data remains on the stack.
+*/
+case OP_MemStore: {
+ int i = pOp->p1;
+ Mem *pMem;
+ assert( pTos>=p->aStack );
+ if( i>=p->nMem ){
+ int nOld = p->nMem;
+ Mem *aMem;
+ p->nMem = i + 5;
+ aMem = sqliteRealloc(p->aMem, p->nMem*sizeof(p->aMem[0]));
+ if( aMem==0 ) goto no_mem;
+ if( aMem!=p->aMem ){
+ int j;
+ for(j=0; j<nOld; j++){
+ if( aMem[j].flags & MEM_Short ){
+ aMem[j].z = aMem[j].zShort;
+ }
+ }
+ }
+ p->aMem = aMem;
+ if( nOld<p->nMem ){
+ memset(&p->aMem[nOld], 0, sizeof(p->aMem[0])*(p->nMem-nOld));
+ }
+ }
+ Deephemeralize(pTos);
+ pMem = &p->aMem[i];
+ Release(pMem);
+ *pMem = *pTos;
+ if( pMem->flags & MEM_Dyn ){
+ if( pOp->p2 ){
+ pTos->flags = MEM_Null;
+ }else{
+ pMem->z = sqliteMallocRaw( pMem->n );
+ if( pMem->z==0 ) goto no_mem;
+ memcpy(pMem->z, pTos->z, pMem->n);
+ }
+ }else if( pMem->flags & MEM_Short ){
+ pMem->z = pMem->zShort;
+ }
+ if( pOp->p2 ){
+ Release(pTos);
+ pTos--;
+ }
+ break;
+}
+
+/* Opcode: MemLoad P1 * *
+**
+** Push a copy of the value in memory location P1 onto the stack.
+**
+** If the value is a string, then the value pushed is a pointer to
+** the string that is stored in the memory location. If the memory
+** location is subsequently changed (using OP_MemStore) then the
+** value pushed onto the stack will change too.
+*/
+case OP_MemLoad: {
+ int i = pOp->p1;
+ assert( i>=0 && i<p->nMem );
+ pTos++;
+ memcpy(pTos, &p->aMem[i], sizeof(pTos[0])-NBFS);;
+ if( pTos->flags & MEM_Str ){
+ pTos->flags |= MEM_Ephem;
+ pTos->flags &= ~(MEM_Dyn|MEM_Static|MEM_Short);
+ }
+ break;
+}
+
+/* Opcode: MemIncr P1 P2 *
+**
+** Increment the integer valued memory cell P1 by 1. If P2 is not zero
+** and the result after the increment is greater than zero, then jump
+** to P2.
+**
+** This instruction throws an error if the memory cell is not initially
+** an integer.
+*/
+case OP_MemIncr: {
+ int i = pOp->p1;
+ Mem *pMem;
+ assert( i>=0 && i<p->nMem );
+ pMem = &p->aMem[i];
+ assert( pMem->flags==MEM_Int );
+ pMem->i++;
+ if( pOp->p2>0 && pMem->i>0 ){
+ pc = pOp->p2 - 1;
+ }
+ break;
+}
+
+/* Opcode: AggReset * P2 *
+**
+** Reset the aggregator so that it no longer contains any data.
+** Future aggregator elements will contain P2 values each.
+*/
+case OP_AggReset: {
+ sqliteVdbeAggReset(&p->agg);
+ p->agg.nMem = pOp->p2;
+ p->agg.apFunc = sqliteMalloc( p->agg.nMem*sizeof(p->agg.apFunc[0]) );
+ if( p->agg.apFunc==0 ) goto no_mem;
+ break;
+}
+
+/* Opcode: AggInit * P2 P3
+**
+** Initialize the function parameters for an aggregate function.
+** The aggregate will operate out of aggregate column P2.
+** P3 is a pointer to the FuncDef structure for the function.
+*/
+case OP_AggInit: {
+ int i = pOp->p2;
+ assert( i>=0 && i<p->agg.nMem );
+ p->agg.apFunc[i] = (FuncDef*)pOp->p3;
+ break;
+}
+
+/* Opcode: AggFunc * P2 P3
+**
+** Execute the step function for an aggregate. The
+** function has P2 arguments. P3 is a pointer to the FuncDef
+** structure that specifies the function.
+**
+** The top of the stack must be an integer which is the index of
+** the aggregate column that corresponds to this aggregate function.
+** Ideally, this index would be another parameter, but there are
+** no free parameters left. The integer is popped from the stack.
+*/
+case OP_AggFunc: {
+ int n = pOp->p2;
+ int i;
+ Mem *pMem, *pRec;
+ char **azArgv = p->zArgv;
+ sqlite_func ctx;
+
+ assert( n>=0 );
+ assert( pTos->flags==MEM_Int );
+ pRec = &pTos[-n];
+ assert( pRec>=p->aStack );
+ for(i=0; i<n; i++, pRec++){
+ if( pRec->flags & MEM_Null ){
+ azArgv[i] = 0;
+ }else{
+ Stringify(pRec);
+ azArgv[i] = pRec->z;
+ }
+ }
+ i = pTos->i;
+ assert( i>=0 && i<p->agg.nMem );
+ ctx.pFunc = (FuncDef*)pOp->p3;
+ pMem = &p->agg.pCurrent->aMem[i];
+ ctx.s.z = pMem->zShort; /* Space used for small aggregate contexts */
+ ctx.pAgg = pMem->z;
+ ctx.cnt = ++pMem->i;
+ ctx.isError = 0;
+ ctx.isStep = 1;
+ (ctx.pFunc->xStep)(&ctx, n, (const char**)azArgv);
+ pMem->z = ctx.pAgg;
+ pMem->flags = MEM_AggCtx;
+ popStack(&pTos, n+1);
+ if( ctx.isError ){
+ rc = SQLITE_ERROR;
+ }
+ break;
+}
+
+/* Opcode: AggFocus * P2 *
+**
+** Pop the top of the stack and use that as an aggregator key. If
+** an aggregator with that same key already exists, then make the
+** aggregator the current aggregator and jump to P2. If no aggregator
+** with the given key exists, create one and make it current but
+** do not jump.
+**
+** The order of aggregator opcodes is important. The order is:
+** AggReset AggFocus AggNext. In other words, you must execute
+** AggReset first, then zero or more AggFocus operations, then
+** zero or more AggNext operations. You must not execute an AggFocus
+** in between an AggNext and an AggReset.
+*/
+case OP_AggFocus: {
+ AggElem *pElem;
+ char *zKey;
+ int nKey;
+
+ assert( pTos>=p->aStack );
+ Stringify(pTos);
+ zKey = pTos->z;
+ nKey = pTos->n;
+ pElem = sqliteHashFind(&p->agg.hash, zKey, nKey);
+ if( pElem ){
+ p->agg.pCurrent = pElem;
+ pc = pOp->p2 - 1;
+ }else{
+ AggInsert(&p->agg, zKey, nKey);
+ if( sqlite_malloc_failed ) goto no_mem;
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: AggSet * P2 *
+**
+** Move the top of the stack into the P2-th field of the current
+** aggregate. String values are duplicated into new memory.
+*/
+case OP_AggSet: {
+ AggElem *pFocus = AggInFocus(p->agg);
+ Mem *pMem;
+ int i = pOp->p2;
+ assert( pTos>=p->aStack );
+ if( pFocus==0 ) goto no_mem;
+ assert( i>=0 && i<p->agg.nMem );
+ Deephemeralize(pTos);
+ pMem = &pFocus->aMem[i];
+ Release(pMem);
+ *pMem = *pTos;
+ if( pMem->flags & MEM_Dyn ){
+ pTos->flags = MEM_Null;
+ }else if( pMem->flags & MEM_Short ){
+ pMem->z = pMem->zShort;
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: AggGet * P2 *
+**
+** Push a new entry onto the stack which is a copy of the P2-th field
+** of the current aggregate. Strings are not duplicated so
+** string values will be ephemeral.
+*/
+case OP_AggGet: {
+ AggElem *pFocus = AggInFocus(p->agg);
+ Mem *pMem;
+ int i = pOp->p2;
+ if( pFocus==0 ) goto no_mem;
+ assert( i>=0 && i<p->agg.nMem );
+ pTos++;
+ pMem = &pFocus->aMem[i];
+ *pTos = *pMem;
+ if( pTos->flags & MEM_Str ){
+ pTos->flags &= ~(MEM_Dyn|MEM_Static|MEM_Short);
+ pTos->flags |= MEM_Ephem;
+ }
+ break;
+}
+
+/* Opcode: AggNext * P2 *
+**
+** Make the next aggregate value the current aggregate. The prior
+** aggregate is deleted. If all aggregate values have been consumed,
+** jump to P2.
+**
+** The order of aggregator opcodes is important. The order is:
+** AggReset AggFocus AggNext. In other words, you must execute
+** AggReset first, then zero or more AggFocus operations, then
+** zero or more AggNext operations. You must not execute an AggFocus
+** in between an AggNext and an AggReset.
+*/
+case OP_AggNext: {
+ CHECK_FOR_INTERRUPT;
+ if( p->agg.pSearch==0 ){
+ p->agg.pSearch = sqliteHashFirst(&p->agg.hash);
+ }else{
+ p->agg.pSearch = sqliteHashNext(p->agg.pSearch);
+ }
+ if( p->agg.pSearch==0 ){
+ pc = pOp->p2 - 1;
+ } else {
+ int i;
+ sqlite_func ctx;
+ Mem *aMem;
+ p->agg.pCurrent = sqliteHashData(p->agg.pSearch);
+ aMem = p->agg.pCurrent->aMem;
+ for(i=0; i<p->agg.nMem; i++){
+ int freeCtx;
+ if( p->agg.apFunc[i]==0 ) continue;
+ if( p->agg.apFunc[i]->xFinalize==0 ) continue;
+ ctx.s.flags = MEM_Null;
+ ctx.s.z = aMem[i].zShort;
+ ctx.pAgg = (void*)aMem[i].z;
+ freeCtx = aMem[i].z && aMem[i].z!=aMem[i].zShort;
+ ctx.cnt = aMem[i].i;
+ ctx.isStep = 0;
+ ctx.pFunc = p->agg.apFunc[i];
+ (*p->agg.apFunc[i]->xFinalize)(&ctx);
+ if( freeCtx ){
+ sqliteFree( aMem[i].z );
+ }
+ aMem[i] = ctx.s;
+ if( aMem[i].flags & MEM_Short ){
+ aMem[i].z = aMem[i].zShort;
+ }
+ }
+ }
+ break;
+}
+
+/* Opcode: SetInsert P1 * P3
+**
+** If Set P1 does not exist then create it. Then insert value
+** P3 into that set. If P3 is NULL, then insert the top of the
+** stack into the set.
+*/
+case OP_SetInsert: {
+ int i = pOp->p1;
+ if( p->nSet<=i ){
+ int k;
+ Set *aSet = sqliteRealloc(p->aSet, (i+1)*sizeof(p->aSet[0]) );
+ if( aSet==0 ) goto no_mem;
+ p->aSet = aSet;
+ for(k=p->nSet; k<=i; k++){
+ sqliteHashInit(&p->aSet[k].hash, SQLITE_HASH_BINARY, 1);
+ }
+ p->nSet = i+1;
+ }
+ if( pOp->p3 ){
+ sqliteHashInsert(&p->aSet[i].hash, pOp->p3, strlen(pOp->p3)+1, p);
+ }else{
+ assert( pTos>=p->aStack );
+ Stringify(pTos);
+ sqliteHashInsert(&p->aSet[i].hash, pTos->z, pTos->n, p);
+ Release(pTos);
+ pTos--;
+ }
+ if( sqlite_malloc_failed ) goto no_mem;
+ break;
+}
+
+/* Opcode: SetFound P1 P2 *
+**
+** Pop the stack once and compare the value popped off with the
+** contents of set P1. If the element popped exists in set P1,
+** then jump to P2. Otherwise fall through.
+*/
+case OP_SetFound: {
+ int i = pOp->p1;
+ assert( pTos>=p->aStack );
+ Stringify(pTos);
+ if( i>=0 && i<p->nSet && sqliteHashFind(&p->aSet[i].hash, pTos->z, pTos->n)){
+ pc = pOp->p2 - 1;
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: SetNotFound P1 P2 *
+**
+** Pop the stack once and compare the value popped off with the
+** contents of set P1. If the element popped does not exists in
+** set P1, then jump to P2. Otherwise fall through.
+*/
+case OP_SetNotFound: {
+ int i = pOp->p1;
+ assert( pTos>=p->aStack );
+ Stringify(pTos);
+ if( i<0 || i>=p->nSet ||
+ sqliteHashFind(&p->aSet[i].hash, pTos->z, pTos->n)==0 ){
+ pc = pOp->p2 - 1;
+ }
+ Release(pTos);
+ pTos--;
+ break;
+}
+
+/* Opcode: SetFirst P1 P2 *
+**
+** Read the first element from set P1 and push it onto the stack. If the
+** set is empty, push nothing and jump immediately to P2. This opcode is
+** used in combination with OP_SetNext to loop over all elements of a set.
+*/
+/* Opcode: SetNext P1 P2 *
+**
+** Read the next element from set P1 and push it onto the stack. If there
+** are no more elements in the set, do not do the push and fall through.
+** Otherwise, jump to P2 after pushing the next set element.
+*/
+case OP_SetFirst:
+case OP_SetNext: {
+ Set *pSet;
+ CHECK_FOR_INTERRUPT;
+ if( pOp->p1<0 || pOp->p1>=p->nSet ){
+ if( pOp->opcode==OP_SetFirst ) pc = pOp->p2 - 1;
+ break;
+ }
+ pSet = &p->aSet[pOp->p1];
+ if( pOp->opcode==OP_SetFirst ){
+ pSet->prev = sqliteHashFirst(&pSet->hash);
+ if( pSet->prev==0 ){
+ pc = pOp->p2 - 1;
+ break;
+ }
+ }else{
+ if( pSet->prev ){
+ pSet->prev = sqliteHashNext(pSet->prev);
+ }
+ if( pSet->prev==0 ){
+ break;
+ }else{
+ pc = pOp->p2 - 1;
+ }
+ }
+ pTos++;
+ pTos->z = sqliteHashKey(pSet->prev);
+ pTos->n = sqliteHashKeysize(pSet->prev);
+ pTos->flags = MEM_Str | MEM_Ephem;
+ break;
+}
+
+/* Opcode: Vacuum * * *
+**
+** Vacuum the entire database. This opcode will cause other virtual
+** machines to be created and run. It may not be called from within
+** a transaction.
+*/
+case OP_Vacuum: {
+ if( sqliteSafetyOff(db) ) goto abort_due_to_misuse;
+ rc = sqliteRunVacuum(&p->zErrMsg, db);
+ if( sqliteSafetyOn(db) ) goto abort_due_to_misuse;
+ break;
+}
+
+/* Opcode: StackDepth * * *
+**
+** Push an integer onto the stack which is the depth of the stack prior
+** to that integer being pushed.
+*/
+case OP_StackDepth: {
+ int depth = (&pTos[1]) - p->aStack;
+ pTos++;
+ pTos->i = depth;
+ pTos->flags = MEM_Int;
+ break;
+}
+
+/* Opcode: StackReset * * *
+**
+** Pop a single integer off of the stack. Then pop the stack
+** as many times as necessary to get the depth of the stack down
+** to the value of the integer that was popped.
+*/
+case OP_StackReset: {
+ int depth, goal;
+ assert( pTos>=p->aStack );
+ Integerify(pTos);
+ goal = pTos->i;
+ depth = (&pTos[1]) - p->aStack;
+ assert( goal<depth );
+ popStack(&pTos, depth-goal);
+ break;
+}
+
+/* An other opcode is illegal...
+*/
+default: {
+ sqlite_snprintf(sizeof(zBuf),zBuf,"%d",pOp->opcode);
+ sqliteSetString(&p->zErrMsg, "unknown opcode ", zBuf, (char*)0);
+ rc = SQLITE_INTERNAL;
+ break;
+}
+
+/*****************************************************************************
+** The cases of the switch statement above this line should all be indented
+** by 6 spaces. But the left-most 6 spaces have been removed to improve the
+** readability. From this point on down, the normal indentation rules are
+** restored.
+*****************************************************************************/
+ }
+
+#ifdef VDBE_PROFILE
+ {
+ long long elapse = hwtime() - start;
+ pOp->cycles += elapse;
+ pOp->cnt++;
+#if 0
+ fprintf(stdout, "%10lld ", elapse);
+ sqliteVdbePrintOp(stdout, origPc, &p->aOp[origPc]);
+#endif
+ }
+#endif
+
+ /* The following code adds nothing to the actual functionality
+ ** of the program. It is only here for testing and debugging.
+ ** On the other hand, it does burn CPU cycles every time through
+ ** the evaluator loop. So we can leave it out when NDEBUG is defined.
+ */
+#ifndef NDEBUG
+ /* Sanity checking on the top element of the stack */
+ if( pTos>=p->aStack ){
+ assert( pTos->flags!=0 ); /* Must define some type */
+ if( pTos->flags & MEM_Str ){
+ int x = pTos->flags & (MEM_Static|MEM_Dyn|MEM_Ephem|MEM_Short);
+ assert( x!=0 ); /* Strings must define a string subtype */
+ assert( (x & (x-1))==0 ); /* Only one string subtype can be defined */
+ assert( pTos->z!=0 ); /* Strings must have a value */
+ /* Mem.z points to Mem.zShort iff the subtype is MEM_Short */
+ assert( (pTos->flags & MEM_Short)==0 || pTos->z==pTos->zShort );
+ assert( (pTos->flags & MEM_Short)!=0 || pTos->z!=pTos->zShort );
+ }else{
+ /* Cannot define a string subtype for non-string objects */
+ assert( (pTos->flags & (MEM_Static|MEM_Dyn|MEM_Ephem|MEM_Short))==0 );
+ }
+ /* MEM_Null excludes all other types */
+ assert( pTos->flags==MEM_Null || (pTos->flags&MEM_Null)==0 );
+ }
+ if( pc<-1 || pc>=p->nOp ){
+ sqliteSetString(&p->zErrMsg, "jump destination out of range", (char*)0);
+ rc = SQLITE_INTERNAL;
+ }
+ if( p->trace && pTos>=p->aStack ){
+ int i;
+ fprintf(p->trace, "Stack:");
+ for(i=0; i>-5 && &pTos[i]>=p->aStack; i--){
+ if( pTos[i].flags & MEM_Null ){
+ fprintf(p->trace, " NULL");
+ }else if( (pTos[i].flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){
+ fprintf(p->trace, " si:%d", pTos[i].i);
+ }else if( pTos[i].flags & MEM_Int ){
+ fprintf(p->trace, " i:%d", pTos[i].i);
+ }else if( pTos[i].flags & MEM_Real ){
+ fprintf(p->trace, " r:%g", pTos[i].r);
+ }else if( pTos[i].flags & MEM_Str ){
+ int j, k;
+ char zBuf[100];
+ zBuf[0] = ' ';
+ if( pTos[i].flags & MEM_Dyn ){
+ zBuf[1] = 'z';
+ assert( (pTos[i].flags & (MEM_Static|MEM_Ephem))==0 );
+ }else if( pTos[i].flags & MEM_Static ){
+ zBuf[1] = 't';
+ assert( (pTos[i].flags & (MEM_Dyn|MEM_Ephem))==0 );
+ }else if( pTos[i].flags & MEM_Ephem ){
+ zBuf[1] = 'e';
+ assert( (pTos[i].flags & (MEM_Static|MEM_Dyn))==0 );
+ }else{
+ zBuf[1] = 's';
+ }
+ zBuf[2] = '[';
+ k = 3;
+ for(j=0; j<20 && j<pTos[i].n; j++){
+ int c = pTos[i].z[j];
+ if( c==0 && j==pTos[i].n-1 ) break;
+ if( isprint(c) && !isspace(c) ){
+ zBuf[k++] = c;
+ }else{
+ zBuf[k++] = '.';
+ }
+ }
+ zBuf[k++] = ']';
+ zBuf[k++] = 0;
+ fprintf(p->trace, "%s", zBuf);
+ }else{
+ fprintf(p->trace, " ???");
+ }
+ }
+ if( rc!=0 ) fprintf(p->trace," rc=%d",rc);
+ fprintf(p->trace,"\n");
+ }
+#endif
+ } /* The end of the for(;;) loop the loops through opcodes */
+
+ /* If we reach this point, it means that execution is finished.
+ */
+vdbe_halt:
+ CHECK_FOR_INTERRUPT
+ if( rc ){
+ p->rc = rc;
+ rc = SQLITE_ERROR;
+ }else{
+ rc = SQLITE_DONE;
+ }
+ p->magic = VDBE_MAGIC_HALT;
+ p->pTos = pTos;
+ return rc;
+
+ /* Jump to here if a malloc() fails. It's hard to get a malloc()
+ ** to fail on a modern VM computer, so this code is untested.
+ */
+no_mem:
+ sqliteSetString(&p->zErrMsg, "out of memory", (char*)0);
+ rc = SQLITE_NOMEM;
+ goto vdbe_halt;
+
+ /* Jump to here for an SQLITE_MISUSE error.
+ */
+abort_due_to_misuse:
+ rc = SQLITE_MISUSE;
+ /* Fall thru into abort_due_to_error */
+
+ /* Jump to here for any other kind of fatal error. The "rc" variable
+ ** should hold the error number.
+ */
+abort_due_to_error:
+ if( p->zErrMsg==0 ){
+ if( sqlite_malloc_failed ) rc = SQLITE_NOMEM;
+ sqliteSetString(&p->zErrMsg, sqlite_error_string(rc), (char*)0);
+ }
+ goto vdbe_halt;
+
+ /* Jump to here if the sqlite_interrupt() API sets the interrupt
+ ** flag.
+ */
+abort_due_to_interrupt:
+ assert( db->flags & SQLITE_Interrupt );
+ db->flags &= ~SQLITE_Interrupt;
+ if( db->magic!=SQLITE_MAGIC_BUSY ){
+ rc = SQLITE_MISUSE;
+ }else{
+ rc = SQLITE_INTERRUPT;
+ }
+ sqliteSetString(&p->zErrMsg, sqlite_error_string(rc), (char*)0);
+ goto vdbe_halt;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/vdbe.h b/usr/src/cmd/svc/configd/sqlite/src/vdbe.h
new file mode 100644
index 0000000000..c95a69e0e7
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/vdbe.h
@@ -0,0 +1,115 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Header file for the Virtual DataBase Engine (VDBE)
+**
+** This header defines the interface to the virtual database engine
+** or VDBE. The VDBE implements an abstract machine that runs a
+** simple program to access and modify the underlying database.
+**
+** $Id: vdbe.h,v 1.71 2004/02/22 20:05:02 drh Exp $
+*/
+#ifndef _SQLITE_VDBE_H_
+#define _SQLITE_VDBE_H_
+#include <stdio.h>
+
+/*
+** A single VDBE is an opaque structure named "Vdbe". Only routines
+** in the source file sqliteVdbe.c are allowed to see the insides
+** of this structure.
+*/
+typedef struct Vdbe Vdbe;
+
+/*
+** A single instruction of the virtual machine has an opcode
+** and as many as three operands. The instruction is recorded
+** as an instance of the following structure:
+*/
+struct VdbeOp {
+ u8 opcode; /* What operation to perform */
+ int p1; /* First operand */
+ int p2; /* Second parameter (often the jump destination) */
+ char *p3; /* Third parameter */
+ int p3type; /* P3_STATIC, P3_DYNAMIC or P3_POINTER */
+#ifdef VDBE_PROFILE
+ int cnt; /* Number of times this instruction was executed */
+ long long cycles; /* Total time spend executing this instruction */
+#endif
+};
+typedef struct VdbeOp VdbeOp;
+
+/*
+** A smaller version of VdbeOp used for the VdbeAddOpList() function because
+** it takes up less space.
+*/
+struct VdbeOpList {
+ u8 opcode; /* What operation to perform */
+ signed char p1; /* First operand */
+ short int p2; /* Second parameter (often the jump destination) */
+ char *p3; /* Third parameter */
+};
+typedef struct VdbeOpList VdbeOpList;
+
+/*
+** Allowed values of VdbeOp.p3type
+*/
+#define P3_NOTUSED 0 /* The P3 parameter is not used */
+#define P3_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */
+#define P3_STATIC (-2) /* Pointer to a static string */
+#define P3_POINTER (-3) /* P3 is a pointer to some structure or object */
+
+/*
+** The following macro converts a relative address in the p2 field
+** of a VdbeOp structure into a negative number so that
+** sqliteVdbeAddOpList() knows that the address is relative. Calling
+** the macro again restores the address.
+*/
+#define ADDR(X) (-1-(X))
+
+/*
+** The makefile scans the vdbe.c source file and creates the "opcodes.h"
+** header file that defines a number for each opcode used by the VDBE.
+*/
+#include "opcodes.h"
+
+/*
+** Prototypes for the VDBE interface. See comments on the implementation
+** for a description of what each of these routines does.
+*/
+Vdbe *sqliteVdbeCreate(sqlite*);
+void sqliteVdbeCreateCallback(Vdbe*, int*);
+int sqliteVdbeAddOp(Vdbe*,int,int,int);
+int sqliteVdbeOp3(Vdbe*,int,int,int,const char *zP3,int);
+int sqliteVdbeCode(Vdbe*,...);
+int sqliteVdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp);
+void sqliteVdbeChangeP1(Vdbe*, int addr, int P1);
+void sqliteVdbeChangeP2(Vdbe*, int addr, int P2);
+void sqliteVdbeChangeP3(Vdbe*, int addr, const char *zP1, int N);
+void sqliteVdbeDequoteP3(Vdbe*, int addr);
+int sqliteVdbeFindOp(Vdbe*, int, int);
+VdbeOp *sqliteVdbeGetOp(Vdbe*, int);
+int sqliteVdbeMakeLabel(Vdbe*);
+void sqliteVdbeDelete(Vdbe*);
+void sqliteVdbeMakeReady(Vdbe*,int,int);
+int sqliteVdbeExec(Vdbe*);
+int sqliteVdbeList(Vdbe*);
+int sqliteVdbeFinalize(Vdbe*,char**);
+void sqliteVdbeResolveLabel(Vdbe*, int);
+int sqliteVdbeCurrentAddr(Vdbe*);
+void sqliteVdbeTrace(Vdbe*,FILE*);
+void sqliteVdbeCompressSpace(Vdbe*,int);
+int sqliteVdbeReset(Vdbe*,char **);
+int sqliteVdbeSetVariables(Vdbe*,int,const char**);
+
+#endif
diff --git a/usr/src/cmd/svc/configd/sqlite/src/vdbeInt.h b/usr/src/cmd/svc/configd/sqlite/src/vdbeInt.h
new file mode 100644
index 0000000000..0d16eb23e8
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/vdbeInt.h
@@ -0,0 +1,306 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 September 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the header file for information that is private to the
+** VDBE. This information used to all be at the top of the single
+** source code file "vdbe.c". When that file became too big (over
+** 6000 lines long) it was split up into several smaller files and
+** this header information was factored out.
+*/
+
+/*
+** When converting from the native format to the key format and back
+** again, in addition to changing the byte order we invert the high-order
+** bit of the most significant byte. This causes negative numbers to
+** sort before positive numbers in the memcmp() function.
+*/
+#define keyToInt(X) (sqliteVdbeByteSwap(X) ^ 0x80000000)
+#define intToKey(X) (sqliteVdbeByteSwap((X) ^ 0x80000000))
+
+/*
+** The makefile scans this source file and creates the following
+** array of string constants which are the names of all VDBE opcodes.
+** This array is defined in a separate source code file named opcode.c
+** which is automatically generated by the makefile.
+*/
+extern char *sqliteOpcodeNames[];
+
+/*
+** SQL is translated into a sequence of instructions to be
+** executed by a virtual machine. Each instruction is an instance
+** of the following structure.
+*/
+typedef struct VdbeOp Op;
+
+/*
+** Boolean values
+*/
+typedef unsigned char Bool;
+
+/*
+** A cursor is a pointer into a single BTree within a database file.
+** The cursor can seek to a BTree entry with a particular key, or
+** loop over all entries of the Btree. You can also insert new BTree
+** entries or retrieve the key or data from the entry that the cursor
+** is currently pointing to.
+**
+** Every cursor that the virtual machine has open is represented by an
+** instance of the following structure.
+**
+** If the Cursor.isTriggerRow flag is set it means that this cursor is
+** really a single row that represents the NEW or OLD pseudo-table of
+** a row trigger. The data for the row is stored in Cursor.pData and
+** the rowid is in Cursor.iKey.
+*/
+struct Cursor {
+ BtCursor *pCursor; /* The cursor structure of the backend */
+ int lastRecno; /* Last recno from a Next or NextIdx operation */
+ int nextRowid; /* Next rowid returned by OP_NewRowid */
+ Bool recnoIsValid; /* True if lastRecno is valid */
+ Bool keyAsData; /* The OP_Column command works on key instead of data */
+ Bool atFirst; /* True if pointing to first entry */
+ Bool useRandomRowid; /* Generate new record numbers semi-randomly */
+ Bool nullRow; /* True if pointing to a row with no data */
+ Bool nextRowidValid; /* True if the nextRowid field is valid */
+ Bool pseudoTable; /* This is a NEW or OLD pseudo-tables of a trigger */
+ Bool deferredMoveto; /* A call to sqliteBtreeMoveto() is needed */
+ int movetoTarget; /* Argument to the deferred sqliteBtreeMoveto() */
+ Btree *pBt; /* Separate file holding temporary table */
+ int nData; /* Number of bytes in pData */
+ char *pData; /* Data for a NEW or OLD pseudo-table */
+ int iKey; /* Key for the NEW or OLD pseudo-table row */
+};
+typedef struct Cursor Cursor;
+
+/*
+** A sorter builds a list of elements to be sorted. Each element of
+** the list is an instance of the following structure.
+*/
+typedef struct Sorter Sorter;
+struct Sorter {
+ int nKey; /* Number of bytes in the key */
+ char *zKey; /* The key by which we will sort */
+ int nData; /* Number of bytes in the data */
+ char *pData; /* The data associated with this key */
+ Sorter *pNext; /* Next in the list */
+};
+
+/*
+** Number of buckets used for merge-sort.
+*/
+#define NSORT 30
+
+/*
+** Number of bytes of string storage space available to each stack
+** layer without having to malloc. NBFS is short for Number of Bytes
+** For Strings.
+*/
+#define NBFS 32
+
+/*
+** A single level of the stack or a single memory cell
+** is an instance of the following structure.
+*/
+struct Mem {
+ int i; /* Integer value */
+ int n; /* Number of characters in string value, including '\0' */
+ int flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */
+ double r; /* Real value */
+ char *z; /* String value */
+ char zShort[NBFS]; /* Space for short strings */
+};
+typedef struct Mem Mem;
+
+/*
+** Allowed values for Mem.flags
+*/
+#define MEM_Null 0x0001 /* Value is NULL */
+#define MEM_Str 0x0002 /* Value is a string */
+#define MEM_Int 0x0004 /* Value is an integer */
+#define MEM_Real 0x0008 /* Value is a real number */
+#define MEM_Dyn 0x0010 /* Need to call sqliteFree() on Mem.z */
+#define MEM_Static 0x0020 /* Mem.z points to a static string */
+#define MEM_Ephem 0x0040 /* Mem.z points to an ephemeral string */
+#define MEM_Short 0x0080 /* Mem.z points to Mem.zShort */
+
+/* The following MEM_ value appears only in AggElem.aMem.s.flag fields.
+** It indicates that the corresponding AggElem.aMem.z points to a
+** aggregate function context that needs to be finalized.
+*/
+#define MEM_AggCtx 0x0100 /* Mem.z points to an agg function context */
+
+/*
+** The "context" argument for a installable function. A pointer to an
+** instance of this structure is the first argument to the routines used
+** implement the SQL functions.
+**
+** There is a typedef for this structure in sqlite.h. So all routines,
+** even the public interface to SQLite, can use a pointer to this structure.
+** But this file is the only place where the internal details of this
+** structure are known.
+**
+** This structure is defined inside of vdbe.c because it uses substructures
+** (Mem) which are only defined there.
+*/
+struct sqlite_func {
+ FuncDef *pFunc; /* Pointer to function information. MUST BE FIRST */
+ Mem s; /* The return value is stored here */
+ void *pAgg; /* Aggregate context */
+ u8 isError; /* Set to true for an error */
+ u8 isStep; /* Current in the step function */
+ int cnt; /* Number of times that the step function has been called */
+};
+
+/*
+** An Agg structure describes an Aggregator. Each Agg consists of
+** zero or more Aggregator elements (AggElem). Each AggElem contains
+** a key and one or more values. The values are used in processing
+** aggregate functions in a SELECT. The key is used to implement
+** the GROUP BY clause of a select.
+*/
+typedef struct Agg Agg;
+typedef struct AggElem AggElem;
+struct Agg {
+ int nMem; /* Number of values stored in each AggElem */
+ AggElem *pCurrent; /* The AggElem currently in focus */
+ HashElem *pSearch; /* The hash element for pCurrent */
+ Hash hash; /* Hash table of all aggregate elements */
+ FuncDef **apFunc; /* Information about aggregate functions */
+};
+struct AggElem {
+ char *zKey; /* The key to this AggElem */
+ int nKey; /* Number of bytes in the key, including '\0' at end */
+ Mem aMem[1]; /* The values for this AggElem */
+};
+
+/*
+** A Set structure is used for quick testing to see if a value
+** is part of a small set. Sets are used to implement code like
+** this:
+** x.y IN ('hi','hoo','hum')
+*/
+typedef struct Set Set;
+struct Set {
+ Hash hash; /* A set is just a hash table */
+ HashElem *prev; /* Previously accessed hash elemen */
+};
+
+/*
+** A Keylist is a bunch of keys into a table. The keylist can
+** grow without bound. The keylist stores the ROWIDs of database
+** records that need to be deleted or updated.
+*/
+typedef struct Keylist Keylist;
+struct Keylist {
+ int nKey; /* Number of slots in aKey[] */
+ int nUsed; /* Next unwritten slot in aKey[] */
+ int nRead; /* Next unread slot in aKey[] */
+ Keylist *pNext; /* Next block of keys */
+ int aKey[1]; /* One or more keys. Extra space allocated as needed */
+};
+
+/*
+** A Context stores the last insert rowid, the last statement change count,
+** and the current statement change count (i.e. changes since last statement).
+** Elements of Context structure type make up the ContextStack, which is
+** updated by the ContextPush and ContextPop opcodes (used by triggers)
+*/
+typedef struct Context Context;
+struct Context {
+ int lastRowid; /* Last insert rowid (from db->lastRowid) */
+ int lsChange; /* Last statement change count (from db->lsChange) */
+ int csChange; /* Current statement change count (from db->csChange) */
+};
+
+/*
+** An instance of the virtual machine. This structure contains the complete
+** state of the virtual machine.
+**
+** The "sqlite_vm" structure pointer that is returned by sqlite_compile()
+** is really a pointer to an instance of this structure.
+*/
+struct Vdbe {
+ sqlite *db; /* The whole database */
+ Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
+ FILE *trace; /* Write an execution trace here, if not NULL */
+ int nOp; /* Number of instructions in the program */
+ int nOpAlloc; /* Number of slots allocated for aOp[] */
+ Op *aOp; /* Space to hold the virtual machine's program */
+ int nLabel; /* Number of labels used */
+ int nLabelAlloc; /* Number of slots allocated in aLabel[] */
+ int *aLabel; /* Space to hold the labels */
+ Mem *aStack; /* The operand stack, except string values */
+ Mem *pTos; /* Top entry in the operand stack */
+ char **zArgv; /* Text values used by the callback */
+ char **azColName; /* Becomes the 4th parameter to callbacks */
+ int nCursor; /* Number of slots in aCsr[] */
+ Cursor *aCsr; /* One element of this array for each open cursor */
+ Sorter *pSort; /* A linked list of objects to be sorted */
+ FILE *pFile; /* At most one open file handler */
+ int nField; /* Number of file fields */
+ char **azField; /* Data for each file field */
+ int nVar; /* Number of entries in azVariable[] */
+ char **azVar; /* Values for the OP_Variable opcode */
+ int *anVar; /* Length of each value in azVariable[] */
+ u8 *abVar; /* TRUE if azVariable[i] needs to be sqliteFree()ed */
+ char *zLine; /* A single line from the input file */
+ int nLineAlloc; /* Number of spaces allocated for zLine */
+ int magic; /* Magic number for sanity checking */
+ int nMem; /* Number of memory locations currently allocated */
+ Mem *aMem; /* The memory locations */
+ Agg agg; /* Aggregate information */
+ int nSet; /* Number of sets allocated */
+ Set *aSet; /* An array of sets */
+ int nCallback; /* Number of callbacks invoked so far */
+ Keylist *pList; /* A list of ROWIDs */
+ int keylistStackDepth; /* The size of the "keylist" stack */
+ Keylist **keylistStack; /* The stack used by opcodes ListPush & ListPop */
+ int contextStackDepth; /* The size of the "context" stack */
+ Context *contextStack; /* Stack used by opcodes ContextPush & ContextPop*/
+ int pc; /* The program counter */
+ int rc; /* Value to return */
+ unsigned uniqueCnt; /* Used by OP_MakeRecord when P2!=0 */
+ int errorAction; /* Recovery action to do in case of an error */
+ int undoTransOnError; /* If error, either ROLLBACK or COMMIT */
+ int inTempTrans; /* True if temp database is transactioned */
+ int returnStack[100]; /* Return address stack for OP_Gosub & OP_Return */
+ int returnDepth; /* Next unused element in returnStack[] */
+ int nResColumn; /* Number of columns in one row of the result set */
+ char **azResColumn; /* Values for one row of result */
+ int popStack; /* Pop the stack this much on entry to VdbeExec() */
+ char *zErrMsg; /* Error message written here */
+ u8 explain; /* True if EXPLAIN present on SQL command */
+};
+
+/*
+** The following are allowed values for Vdbe.magic
+*/
+#define VDBE_MAGIC_INIT 0x26bceaa5 /* Building a VDBE program */
+#define VDBE_MAGIC_RUN 0xbdf20da3 /* VDBE is ready to execute */
+#define VDBE_MAGIC_HALT 0x519c2973 /* VDBE has completed execution */
+#define VDBE_MAGIC_DEAD 0xb606c3c8 /* The VDBE has been deallocated */
+
+/*
+** Function prototypes
+*/
+void sqliteVdbeCleanupCursor(Cursor*);
+void sqliteVdbeSorterReset(Vdbe*);
+void sqliteVdbeAggReset(Agg*);
+void sqliteVdbeKeylistFree(Keylist*);
+void sqliteVdbePopStack(Vdbe*,int);
+int sqliteVdbeCursorMoveto(Cursor*);
+int sqliteVdbeByteSwap(int);
+#if !defined(NDEBUG) || defined(VDBE_PROFILE)
+void sqliteVdbePrintOp(FILE*, int, Op*);
+#endif
diff --git a/usr/src/cmd/svc/configd/sqlite/src/vdbeaux.c b/usr/src/cmd/svc/configd/sqlite/src/vdbeaux.c
new file mode 100644
index 0000000000..aea32dd8f8
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/vdbeaux.c
@@ -0,0 +1,1064 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2003 September 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used for creating, destroying, and populating
+** a VDBE (or an "sqlite_vm" as it is known to the outside world.) Prior
+** to version 2.8.7, all this code was combined into the vdbe.c source file.
+** But that file was getting too big so this subroutines were split out.
+*/
+#include "sqliteInt.h"
+#include "os.h"
+#include <ctype.h>
+#include "vdbeInt.h"
+
+
+/*
+** When debugging the code generator in a symbolic debugger, one can
+** set the sqlite_vdbe_addop_trace to 1 and all opcodes will be printed
+** as they are added to the instruction stream.
+*/
+#ifndef NDEBUG
+int sqlite_vdbe_addop_trace = 0;
+#endif
+
+
+/*
+** Create a new virtual database engine.
+*/
+Vdbe *sqliteVdbeCreate(sqlite *db){
+ Vdbe *p;
+ p = sqliteMalloc( sizeof(Vdbe) );
+ if( p==0 ) return 0;
+ p->db = db;
+ if( db->pVdbe ){
+ db->pVdbe->pPrev = p;
+ }
+ p->pNext = db->pVdbe;
+ p->pPrev = 0;
+ db->pVdbe = p;
+ p->magic = VDBE_MAGIC_INIT;
+ return p;
+}
+
+/*
+** Turn tracing on or off
+*/
+void sqliteVdbeTrace(Vdbe *p, FILE *trace){
+ p->trace = trace;
+}
+
+/*
+** Add a new instruction to the list of instructions current in the
+** VDBE. Return the address of the new instruction.
+**
+** Parameters:
+**
+** p Pointer to the VDBE
+**
+** op The opcode for this instruction
+**
+** p1, p2 First two of the three possible operands.
+**
+** Use the sqliteVdbeResolveLabel() function to fix an address and
+** the sqliteVdbeChangeP3() function to change the value of the P3
+** operand.
+*/
+int sqliteVdbeAddOp(Vdbe *p, int op, int p1, int p2){
+ int i;
+ VdbeOp *pOp;
+
+ i = p->nOp;
+ p->nOp++;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( i>=p->nOpAlloc ){
+ int oldSize = p->nOpAlloc;
+ Op *aNew;
+ p->nOpAlloc = p->nOpAlloc*2 + 100;
+ aNew = sqliteRealloc(p->aOp, p->nOpAlloc*sizeof(Op));
+ if( aNew==0 ){
+ p->nOpAlloc = oldSize;
+ return 0;
+ }
+ p->aOp = aNew;
+ memset(&p->aOp[oldSize], 0, (p->nOpAlloc-oldSize)*sizeof(Op));
+ }
+ pOp = &p->aOp[i];
+ pOp->opcode = op;
+ pOp->p1 = p1;
+ if( p2<0 && (-1-p2)<p->nLabel && p->aLabel[-1-p2]>=0 ){
+ p2 = p->aLabel[-1-p2];
+ }
+ pOp->p2 = p2;
+ pOp->p3 = 0;
+ pOp->p3type = P3_NOTUSED;
+#ifndef NDEBUG
+ if( sqlite_vdbe_addop_trace ) sqliteVdbePrintOp(0, i, &p->aOp[i]);
+#endif
+ return i;
+}
+
+/*
+** Add an opcode that includes the p3 value.
+*/
+int sqliteVdbeOp3(Vdbe *p, int op, int p1, int p2, const char *zP3, int p3type){
+ int addr = sqliteVdbeAddOp(p, op, p1, p2);
+ sqliteVdbeChangeP3(p, addr, zP3, p3type);
+ return addr;
+}
+
+/*
+** Add multiple opcodes. The list is terminated by an opcode of 0.
+*/
+int sqliteVdbeCode(Vdbe *p, ...){
+ int addr;
+ va_list ap;
+ int opcode, p1, p2;
+ va_start(ap, p);
+ addr = p->nOp;
+ while( (opcode = va_arg(ap,int))!=0 ){
+ p1 = va_arg(ap,int);
+ p2 = va_arg(ap,int);
+ sqliteVdbeAddOp(p, opcode, p1, p2);
+ }
+ va_end(ap);
+ return addr;
+}
+
+
+
+/*
+** Create a new symbolic label for an instruction that has yet to be
+** coded. The symbolic label is really just a negative number. The
+** label can be used as the P2 value of an operation. Later, when
+** the label is resolved to a specific address, the VDBE will scan
+** through its operation list and change all values of P2 which match
+** the label into the resolved address.
+**
+** The VDBE knows that a P2 value is a label because labels are
+** always negative and P2 values are suppose to be non-negative.
+** Hence, a negative P2 value is a label that has yet to be resolved.
+*/
+int sqliteVdbeMakeLabel(Vdbe *p){
+ int i;
+ i = p->nLabel++;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( i>=p->nLabelAlloc ){
+ int *aNew;
+ p->nLabelAlloc = p->nLabelAlloc*2 + 10;
+ aNew = sqliteRealloc( p->aLabel, p->nLabelAlloc*sizeof(p->aLabel[0]));
+ if( aNew==0 ){
+ sqliteFree(p->aLabel);
+ }
+ p->aLabel = aNew;
+ }
+ if( p->aLabel==0 ){
+ p->nLabel = 0;
+ p->nLabelAlloc = 0;
+ return 0;
+ }
+ p->aLabel[i] = -1;
+ return -1-i;
+}
+
+/*
+** Resolve label "x" to be the address of the next instruction to
+** be inserted. The parameter "x" must have been obtained from
+** a prior call to sqliteVdbeMakeLabel().
+*/
+void sqliteVdbeResolveLabel(Vdbe *p, int x){
+ int j;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( x<0 && (-x)<=p->nLabel && p->aOp ){
+ if( p->aLabel[-1-x]==p->nOp ) return;
+ assert( p->aLabel[-1-x]<0 );
+ p->aLabel[-1-x] = p->nOp;
+ for(j=0; j<p->nOp; j++){
+ if( p->aOp[j].p2==x ) p->aOp[j].p2 = p->nOp;
+ }
+ }
+}
+
+/*
+** Return the address of the next instruction to be inserted.
+*/
+int sqliteVdbeCurrentAddr(Vdbe *p){
+ assert( p->magic==VDBE_MAGIC_INIT );
+ return p->nOp;
+}
+
+/*
+** Add a whole list of operations to the operation stack. Return the
+** address of the first operation added.
+*/
+int sqliteVdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp){
+ int addr;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p->nOp + nOp >= p->nOpAlloc ){
+ int oldSize = p->nOpAlloc;
+ Op *aNew;
+ p->nOpAlloc = p->nOpAlloc*2 + nOp + 10;
+ aNew = sqliteRealloc(p->aOp, p->nOpAlloc*sizeof(Op));
+ if( aNew==0 ){
+ p->nOpAlloc = oldSize;
+ return 0;
+ }
+ p->aOp = aNew;
+ memset(&p->aOp[oldSize], 0, (p->nOpAlloc-oldSize)*sizeof(Op));
+ }
+ addr = p->nOp;
+ if( nOp>0 ){
+ int i;
+ VdbeOpList const *pIn = aOp;
+ for(i=0; i<nOp; i++, pIn++){
+ int p2 = pIn->p2;
+ VdbeOp *pOut = &p->aOp[i+addr];
+ pOut->opcode = pIn->opcode;
+ pOut->p1 = pIn->p1;
+ pOut->p2 = p2<0 ? addr + ADDR(p2) : p2;
+ pOut->p3 = pIn->p3;
+ pOut->p3type = pIn->p3 ? P3_STATIC : P3_NOTUSED;
+#ifndef NDEBUG
+ if( sqlite_vdbe_addop_trace ){
+ sqliteVdbePrintOp(0, i+addr, &p->aOp[i+addr]);
+ }
+#endif
+ }
+ p->nOp += nOp;
+ }
+ return addr;
+}
+
+/*
+** Change the value of the P1 operand for a specific instruction.
+** This routine is useful when a large program is loaded from a
+** static array using sqliteVdbeAddOpList but we want to make a
+** few minor changes to the program.
+*/
+void sqliteVdbeChangeP1(Vdbe *p, int addr, int val){
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p && addr>=0 && p->nOp>addr && p->aOp ){
+ p->aOp[addr].p1 = val;
+ }
+}
+
+/*
+** Change the value of the P2 operand for a specific instruction.
+** This routine is useful for setting a jump destination.
+*/
+void sqliteVdbeChangeP2(Vdbe *p, int addr, int val){
+ assert( val>=0 );
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p && addr>=0 && p->nOp>addr && p->aOp ){
+ p->aOp[addr].p2 = val;
+ }
+}
+
+/*
+** Change the value of the P3 operand for a specific instruction.
+** This routine is useful when a large program is loaded from a
+** static array using sqliteVdbeAddOpList but we want to make a
+** few minor changes to the program.
+**
+** If n>=0 then the P3 operand is dynamic, meaning that a copy of
+** the string is made into memory obtained from sqliteMalloc().
+** A value of n==0 means copy bytes of zP3 up to and including the
+** first null byte. If n>0 then copy n+1 bytes of zP3.
+**
+** If n==P3_STATIC it means that zP3 is a pointer to a constant static
+** string and we can just copy the pointer. n==P3_POINTER means zP3 is
+** a pointer to some object other than a string.
+**
+** If addr<0 then change P3 on the most recently inserted instruction.
+*/
+void sqliteVdbeChangeP3(Vdbe *p, int addr, const char *zP3, int n){
+ Op *pOp;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p==0 || p->aOp==0 ) return;
+ if( addr<0 || addr>=p->nOp ){
+ addr = p->nOp - 1;
+ if( addr<0 ) return;
+ }
+ pOp = &p->aOp[addr];
+ if( pOp->p3 && pOp->p3type==P3_DYNAMIC ){
+ sqliteFree(pOp->p3);
+ pOp->p3 = 0;
+ }
+ if( zP3==0 ){
+ pOp->p3 = 0;
+ pOp->p3type = P3_NOTUSED;
+ }else if( n<0 ){
+ pOp->p3 = (char*)zP3;
+ pOp->p3type = n;
+ }else{
+ sqliteSetNString(&pOp->p3, zP3, n, 0);
+ pOp->p3type = P3_DYNAMIC;
+ }
+}
+
+/*
+** If the P3 operand to the specified instruction appears
+** to be a quoted string token, then this procedure removes
+** the quotes.
+**
+** The quoting operator can be either a grave ascent (ASCII 0x27)
+** or a double quote character (ASCII 0x22). Two quotes in a row
+** resolve to be a single actual quote character within the string.
+*/
+void sqliteVdbeDequoteP3(Vdbe *p, int addr){
+ Op *pOp;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p->aOp==0 ) return;
+ if( addr<0 || addr>=p->nOp ){
+ addr = p->nOp - 1;
+ if( addr<0 ) return;
+ }
+ pOp = &p->aOp[addr];
+ if( pOp->p3==0 || pOp->p3[0]==0 ) return;
+ if( pOp->p3type==P3_POINTER ) return;
+ if( pOp->p3type!=P3_DYNAMIC ){
+ pOp->p3 = sqliteStrDup(pOp->p3);
+ pOp->p3type = P3_DYNAMIC;
+ }
+ sqliteDequote(pOp->p3);
+}
+
+/*
+** On the P3 argument of the given instruction, change all
+** strings of whitespace characters into a single space and
+** delete leading and trailing whitespace.
+*/
+void sqliteVdbeCompressSpace(Vdbe *p, int addr){
+ unsigned char *z;
+ int i, j;
+ Op *pOp;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p->aOp==0 || addr<0 || addr>=p->nOp ) return;
+ pOp = &p->aOp[addr];
+ if( pOp->p3type==P3_POINTER ){
+ return;
+ }
+ if( pOp->p3type!=P3_DYNAMIC ){
+ pOp->p3 = sqliteStrDup(pOp->p3);
+ pOp->p3type = P3_DYNAMIC;
+ }
+ z = (unsigned char*)pOp->p3;
+ if( z==0 ) return;
+ i = j = 0;
+ while( isspace(z[i]) ){ i++; }
+ while( z[i] ){
+ if( isspace(z[i]) ){
+ z[j++] = ' ';
+ while( isspace(z[++i]) ){}
+ }else{
+ z[j++] = z[i++];
+ }
+ }
+ while( j>0 && isspace(z[j-1]) ){ j--; }
+ z[j] = 0;
+}
+
+/*
+** Search for the current program for the given opcode and P2
+** value. Return the address plus 1 if found and 0 if not found.
+*/
+int sqliteVdbeFindOp(Vdbe *p, int op, int p2){
+ int i;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ for(i=0; i<p->nOp; i++){
+ if( p->aOp[i].opcode==op && p->aOp[i].p2==p2 ) return i+1;
+ }
+ return 0;
+}
+
+/*
+** Return the opcode for a given address.
+*/
+VdbeOp *sqliteVdbeGetOp(Vdbe *p, int addr){
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( addr>=0 && addr<p->nOp );
+ return &p->aOp[addr];
+}
+
+/*
+** The following group or routines are employed by installable functions
+** to return their results.
+**
+** The sqlite_set_result_string() routine can be used to return a string
+** value or to return a NULL. To return a NULL, pass in NULL for zResult.
+** A copy is made of the string before this routine returns so it is safe
+** to pass in an ephemeral string.
+**
+** sqlite_set_result_error() works like sqlite_set_result_string() except
+** that it signals a fatal error. The string argument, if any, is the
+** error message. If the argument is NULL a generic substitute error message
+** is used.
+**
+** The sqlite_set_result_int() and sqlite_set_result_double() set the return
+** value of the user function to an integer or a double.
+**
+** These routines are defined here in vdbe.c because they depend on knowing
+** the internals of the sqlite_func structure which is only defined in
+** this source file.
+*/
+char *sqlite_set_result_string(sqlite_func *p, const char *zResult, int n){
+ assert( !p->isStep );
+ if( p->s.flags & MEM_Dyn ){
+ sqliteFree(p->s.z);
+ }
+ if( zResult==0 ){
+ p->s.flags = MEM_Null;
+ n = 0;
+ p->s.z = 0;
+ p->s.n = 0;
+ }else{
+ if( n<0 ) n = strlen(zResult);
+ if( n<NBFS-1 ){
+ memcpy(p->s.zShort, zResult, n);
+ p->s.zShort[n] = 0;
+ p->s.flags = MEM_Str | MEM_Short;
+ p->s.z = p->s.zShort;
+ }else{
+ p->s.z = sqliteMallocRaw( n+1 );
+ if( p->s.z ){
+ memcpy(p->s.z, zResult, n);
+ p->s.z[n] = 0;
+ }
+ p->s.flags = MEM_Str | MEM_Dyn;
+ }
+ p->s.n = n+1;
+ }
+ return p->s.z;
+}
+void sqlite_set_result_int(sqlite_func *p, int iResult){
+ assert( !p->isStep );
+ if( p->s.flags & MEM_Dyn ){
+ sqliteFree(p->s.z);
+ }
+ p->s.i = iResult;
+ p->s.flags = MEM_Int;
+}
+void sqlite_set_result_double(sqlite_func *p, double rResult){
+ assert( !p->isStep );
+ if( p->s.flags & MEM_Dyn ){
+ sqliteFree(p->s.z);
+ }
+ p->s.r = rResult;
+ p->s.flags = MEM_Real;
+}
+void sqlite_set_result_error(sqlite_func *p, const char *zMsg, int n){
+ assert( !p->isStep );
+ sqlite_set_result_string(p, zMsg, n);
+ p->isError = 1;
+}
+
+/*
+** Extract the user data from a sqlite_func structure and return a
+** pointer to it.
+*/
+void *sqlite_user_data(sqlite_func *p){
+ assert( p && p->pFunc );
+ return p->pFunc->pUserData;
+}
+
+/*
+** Allocate or return the aggregate context for a user function. A new
+** context is allocated on the first call. Subsequent calls return the
+** same context that was returned on prior calls.
+**
+** This routine is defined here in vdbe.c because it depends on knowing
+** the internals of the sqlite_func structure which is only defined in
+** this source file.
+*/
+void *sqlite_aggregate_context(sqlite_func *p, int nByte){
+ assert( p && p->pFunc && p->pFunc->xStep );
+ if( p->pAgg==0 ){
+ if( nByte<=NBFS ){
+ p->pAgg = (void*)p->s.z;
+ memset(p->pAgg, 0, nByte);
+ }else{
+ p->pAgg = sqliteMalloc( nByte );
+ }
+ }
+ return p->pAgg;
+}
+
+/*
+** Return the number of times the Step function of a aggregate has been
+** called.
+**
+** This routine is defined here in vdbe.c because it depends on knowing
+** the internals of the sqlite_func structure which is only defined in
+** this source file.
+*/
+int sqlite_aggregate_count(sqlite_func *p){
+ assert( p && p->pFunc && p->pFunc->xStep );
+ return p->cnt;
+}
+
+#if !defined(NDEBUG) || defined(VDBE_PROFILE)
+/*
+** Print a single opcode. This routine is used for debugging only.
+*/
+void sqliteVdbePrintOp(FILE *pOut, int pc, Op *pOp){
+ char *zP3;
+ char zPtr[40];
+ if( pOp->p3type==P3_POINTER ){
+ sprintf(zPtr, "ptr(%#lx)", (long)pOp->p3);
+ zP3 = zPtr;
+ }else{
+ zP3 = pOp->p3;
+ }
+ if( pOut==0 ) pOut = stdout;
+ fprintf(pOut,"%4d %-12s %4d %4d %s\n",
+ pc, sqliteOpcodeNames[pOp->opcode], pOp->p1, pOp->p2, zP3 ? zP3 : "");
+ fflush(pOut);
+}
+#endif
+
+/*
+** Give a listing of the program in the virtual machine.
+**
+** The interface is the same as sqliteVdbeExec(). But instead of
+** running the code, it invokes the callback once for each instruction.
+** This feature is used to implement "EXPLAIN".
+*/
+int sqliteVdbeList(
+ Vdbe *p /* The VDBE */
+){
+ sqlite *db = p->db;
+ int i;
+ int rc = SQLITE_OK;
+ static char *azColumnNames[] = {
+ "addr", "opcode", "p1", "p2", "p3",
+ "int", "text", "int", "int", "text",
+ 0
+ };
+
+ assert( p->popStack==0 );
+ assert( p->explain );
+ p->azColName = azColumnNames;
+ p->azResColumn = p->zArgv;
+ for(i=0; i<5; i++) p->zArgv[i] = p->aStack[i].zShort;
+ i = p->pc;
+ if( i>=p->nOp ){
+ p->rc = SQLITE_OK;
+ rc = SQLITE_DONE;
+ }else if( db->flags & SQLITE_Interrupt ){
+ db->flags &= ~SQLITE_Interrupt;
+ if( db->magic!=SQLITE_MAGIC_BUSY ){
+ p->rc = SQLITE_MISUSE;
+ }else{
+ p->rc = SQLITE_INTERRUPT;
+ }
+ rc = SQLITE_ERROR;
+ sqliteSetString(&p->zErrMsg, sqlite_error_string(p->rc), (char*)0);
+ }else{
+ sprintf(p->zArgv[0],"%d",i);
+ sprintf(p->zArgv[2],"%d", p->aOp[i].p1);
+ sprintf(p->zArgv[3],"%d", p->aOp[i].p2);
+ if( p->aOp[i].p3type==P3_POINTER ){
+ sprintf(p->aStack[4].zShort, "ptr(%#lx)", (long)p->aOp[i].p3);
+ p->zArgv[4] = p->aStack[4].zShort;
+ }else{
+ p->zArgv[4] = p->aOp[i].p3;
+ }
+ p->zArgv[1] = sqliteOpcodeNames[p->aOp[i].opcode];
+ p->pc = i+1;
+ p->azResColumn = p->zArgv;
+ p->nResColumn = 5;
+ p->rc = SQLITE_OK;
+ rc = SQLITE_ROW;
+ }
+ return rc;
+}
+
+/*
+** Prepare a virtual machine for execution. This involves things such
+** as allocating stack space and initializing the program counter.
+** After the VDBE has be prepped, it can be executed by one or more
+** calls to sqliteVdbeExec().
+*/
+void sqliteVdbeMakeReady(
+ Vdbe *p, /* The VDBE */
+ int nVar, /* Number of '?' see in the SQL statement */
+ int isExplain /* True if the EXPLAIN keywords is present */
+){
+ int n;
+
+ assert( p!=0 );
+ assert( p->magic==VDBE_MAGIC_INIT );
+
+ /* Add a HALT instruction to the very end of the program.
+ */
+ if( p->nOp==0 || (p->aOp && p->aOp[p->nOp-1].opcode!=OP_Halt) ){
+ sqliteVdbeAddOp(p, OP_Halt, 0, 0);
+ }
+
+ /* No instruction ever pushes more than a single element onto the
+ ** stack. And the stack never grows on successive executions of the
+ ** same loop. So the total number of instructions is an upper bound
+ ** on the maximum stack depth required.
+ **
+ ** Allocation all the stack space we will ever need.
+ */
+ if( p->aStack==0 ){
+ p->nVar = nVar;
+ assert( nVar>=0 );
+ n = isExplain ? 10 : p->nOp;
+ p->aStack = sqliteMalloc(
+ n*(sizeof(p->aStack[0]) + 2*sizeof(char*)) /* aStack and zArgv */
+ + p->nVar*(sizeof(char*)+sizeof(int)+1) /* azVar, anVar, abVar */
+ );
+ p->zArgv = (char**)&p->aStack[n];
+ p->azColName = (char**)&p->zArgv[n];
+ p->azVar = (char**)&p->azColName[n];
+ p->anVar = (int*)&p->azVar[p->nVar];
+ p->abVar = (u8*)&p->anVar[p->nVar];
+ }
+
+ sqliteHashInit(&p->agg.hash, SQLITE_HASH_BINARY, 0);
+ p->agg.pSearch = 0;
+#ifdef MEMORY_DEBUG
+ if( sqliteOsFileExists("vdbe_trace") ){
+ p->trace = stdout;
+ }
+#endif
+ p->pTos = &p->aStack[-1];
+ p->pc = 0;
+ p->rc = SQLITE_OK;
+ p->uniqueCnt = 0;
+ p->returnDepth = 0;
+ p->errorAction = OE_Abort;
+ p->undoTransOnError = 0;
+ p->popStack = 0;
+ p->explain |= isExplain;
+ p->magic = VDBE_MAGIC_RUN;
+#ifdef VDBE_PROFILE
+ {
+ int i;
+ for(i=0; i<p->nOp; i++){
+ p->aOp[i].cnt = 0;
+ p->aOp[i].cycles = 0;
+ }
+ }
+#endif
+}
+
+
+/*
+** Remove any elements that remain on the sorter for the VDBE given.
+*/
+void sqliteVdbeSorterReset(Vdbe *p){
+ while( p->pSort ){
+ Sorter *pSorter = p->pSort;
+ p->pSort = pSorter->pNext;
+ sqliteFree(pSorter->zKey);
+ sqliteFree(pSorter->pData);
+ sqliteFree(pSorter);
+ }
+}
+
+/*
+** Reset an Agg structure. Delete all its contents.
+**
+** For installable aggregate functions, if the step function has been
+** called, make sure the finalizer function has also been called. The
+** finalizer might need to free memory that was allocated as part of its
+** private context. If the finalizer has not been called yet, call it
+** now.
+*/
+void sqliteVdbeAggReset(Agg *pAgg){
+ int i;
+ HashElem *p;
+ for(p = sqliteHashFirst(&pAgg->hash); p; p = sqliteHashNext(p)){
+ AggElem *pElem = sqliteHashData(p);
+ assert( pAgg->apFunc!=0 );
+ for(i=0; i<pAgg->nMem; i++){
+ Mem *pMem = &pElem->aMem[i];
+ if( pAgg->apFunc[i] && (pMem->flags & MEM_AggCtx)!=0 ){
+ sqlite_func ctx;
+ ctx.pFunc = pAgg->apFunc[i];
+ ctx.s.flags = MEM_Null;
+ ctx.pAgg = pMem->z;
+ ctx.cnt = pMem->i;
+ ctx.isStep = 0;
+ ctx.isError = 0;
+ (*pAgg->apFunc[i]->xFinalize)(&ctx);
+ if( pMem->z!=0 && pMem->z!=pMem->zShort ){
+ sqliteFree(pMem->z);
+ }
+ if( ctx.s.flags & MEM_Dyn ){
+ sqliteFree(ctx.s.z);
+ }
+ }else if( pMem->flags & MEM_Dyn ){
+ sqliteFree(pMem->z);
+ }
+ }
+ sqliteFree(pElem);
+ }
+ sqliteHashClear(&pAgg->hash);
+ sqliteFree(pAgg->apFunc);
+ pAgg->apFunc = 0;
+ pAgg->pCurrent = 0;
+ pAgg->pSearch = 0;
+ pAgg->nMem = 0;
+}
+
+/*
+** Delete a keylist
+*/
+void sqliteVdbeKeylistFree(Keylist *p){
+ while( p ){
+ Keylist *pNext = p->pNext;
+ sqliteFree(p);
+ p = pNext;
+ }
+}
+
+/*
+** Close a cursor and release all the resources that cursor happens
+** to hold.
+*/
+void sqliteVdbeCleanupCursor(Cursor *pCx){
+ if( pCx->pCursor ){
+ sqliteBtreeCloseCursor(pCx->pCursor);
+ }
+ if( pCx->pBt ){
+ sqliteBtreeClose(pCx->pBt);
+ }
+ sqliteFree(pCx->pData);
+ memset(pCx, 0, sizeof(Cursor));
+}
+
+/*
+** Close all cursors
+*/
+static void closeAllCursors(Vdbe *p){
+ int i;
+ for(i=0; i<p->nCursor; i++){
+ sqliteVdbeCleanupCursor(&p->aCsr[i]);
+ }
+ sqliteFree(p->aCsr);
+ p->aCsr = 0;
+ p->nCursor = 0;
+}
+
+/*
+** Clean up the VM after execution.
+**
+** This routine will automatically close any cursors, lists, and/or
+** sorters that were left open. It also deletes the values of
+** variables in the azVariable[] array.
+*/
+static void Cleanup(Vdbe *p){
+ int i;
+ if( p->aStack ){
+ Mem *pTos = p->pTos;
+ while( pTos>=p->aStack ){
+ if( pTos->flags & MEM_Dyn ){
+ sqliteFree(pTos->z);
+ }
+ pTos--;
+ }
+ p->pTos = pTos;
+ }
+ closeAllCursors(p);
+ if( p->aMem ){
+ for(i=0; i<p->nMem; i++){
+ if( p->aMem[i].flags & MEM_Dyn ){
+ sqliteFree(p->aMem[i].z);
+ }
+ }
+ }
+ sqliteFree(p->aMem);
+ p->aMem = 0;
+ p->nMem = 0;
+ if( p->pList ){
+ sqliteVdbeKeylistFree(p->pList);
+ p->pList = 0;
+ }
+ sqliteVdbeSorterReset(p);
+ if( p->pFile ){
+ if( p->pFile!=stdin ) fclose(p->pFile);
+ p->pFile = 0;
+ }
+ if( p->azField ){
+ sqliteFree(p->azField);
+ p->azField = 0;
+ }
+ p->nField = 0;
+ if( p->zLine ){
+ sqliteFree(p->zLine);
+ p->zLine = 0;
+ }
+ p->nLineAlloc = 0;
+ sqliteVdbeAggReset(&p->agg);
+ if( p->aSet ){
+ for(i=0; i<p->nSet; i++){
+ sqliteHashClear(&p->aSet[i].hash);
+ }
+ }
+ sqliteFree(p->aSet);
+ p->aSet = 0;
+ p->nSet = 0;
+ if( p->keylistStack ){
+ int ii;
+ for(ii = 0; ii < p->keylistStackDepth; ii++){
+ sqliteVdbeKeylistFree(p->keylistStack[ii]);
+ }
+ sqliteFree(p->keylistStack);
+ p->keylistStackDepth = 0;
+ p->keylistStack = 0;
+ }
+ sqliteFree(p->contextStack);
+ p->contextStack = 0;
+ sqliteFree(p->zErrMsg);
+ p->zErrMsg = 0;
+}
+
+/*
+** Clean up a VDBE after execution but do not delete the VDBE just yet.
+** Write any error messages into *pzErrMsg. Return the result code.
+**
+** After this routine is run, the VDBE should be ready to be executed
+** again.
+*/
+int sqliteVdbeReset(Vdbe *p, char **pzErrMsg){
+ sqlite *db = p->db;
+ int i;
+
+ if( p->magic!=VDBE_MAGIC_RUN && p->magic!=VDBE_MAGIC_HALT ){
+ sqliteSetString(pzErrMsg, sqlite_error_string(SQLITE_MISUSE), (char*)0);
+ return SQLITE_MISUSE;
+ }
+ if( p->zErrMsg ){
+ if( pzErrMsg && *pzErrMsg==0 ){
+ *pzErrMsg = p->zErrMsg;
+ }else{
+ sqliteFree(p->zErrMsg);
+ }
+ p->zErrMsg = 0;
+ }else if( p->rc ){
+ sqliteSetString(pzErrMsg, sqlite_error_string(p->rc), (char*)0);
+ }
+ Cleanup(p);
+ if( p->rc!=SQLITE_OK ){
+ switch( p->errorAction ){
+ case OE_Abort: {
+ if( !p->undoTransOnError ){
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].pBt ){
+ sqliteBtreeRollbackCkpt(db->aDb[i].pBt);
+ }
+ }
+ break;
+ }
+ /* Fall through to ROLLBACK */
+ }
+ case OE_Rollback: {
+ sqliteRollbackAll(db);
+ db->flags &= ~SQLITE_InTrans;
+ db->onError = OE_Default;
+ break;
+ }
+ default: {
+ if( p->undoTransOnError ){
+ sqliteRollbackAll(db);
+ db->flags &= ~SQLITE_InTrans;
+ db->onError = OE_Default;
+ }
+ break;
+ }
+ }
+ sqliteRollbackInternalChanges(db);
+ }
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].pBt && db->aDb[i].inTrans==2 ){
+ sqliteBtreeCommitCkpt(db->aDb[i].pBt);
+ db->aDb[i].inTrans = 1;
+ }
+ }
+ assert( p->pTos<&p->aStack[p->pc] || sqlite_malloc_failed==1 );
+#ifdef VDBE_PROFILE
+ {
+ FILE *out = fopen("vdbe_profile.out", "a");
+ if( out ){
+ int i;
+ fprintf(out, "---- ");
+ for(i=0; i<p->nOp; i++){
+ fprintf(out, "%02x", p->aOp[i].opcode);
+ }
+ fprintf(out, "\n");
+ for(i=0; i<p->nOp; i++){
+ fprintf(out, "%6d %10lld %8lld ",
+ p->aOp[i].cnt,
+ p->aOp[i].cycles,
+ p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0
+ );
+ sqliteVdbePrintOp(out, i, &p->aOp[i]);
+ }
+ fclose(out);
+ }
+ }
+#endif
+ p->magic = VDBE_MAGIC_INIT;
+ return p->rc;
+}
+
+/*
+** Clean up and delete a VDBE after execution. Return an integer which is
+** the result code. Write any error message text into *pzErrMsg.
+*/
+int sqliteVdbeFinalize(Vdbe *p, char **pzErrMsg){
+ int rc;
+ sqlite *db;
+
+ if( p->magic!=VDBE_MAGIC_RUN && p->magic!=VDBE_MAGIC_HALT ){
+ sqliteSetString(pzErrMsg, sqlite_error_string(SQLITE_MISUSE), (char*)0);
+ return SQLITE_MISUSE;
+ }
+ db = p->db;
+ rc = sqliteVdbeReset(p, pzErrMsg);
+ sqliteVdbeDelete(p);
+ if( db->want_to_close && db->pVdbe==0 ){
+ sqlite_close(db);
+ }
+ if( rc==SQLITE_SCHEMA ){
+ sqliteResetInternalSchema(db, 0);
+ }
+ return rc;
+}
+
+/*
+** Set the values of all variables. Variable $1 in the original SQL will
+** be the string azValue[0]. $2 will have the value azValue[1]. And
+** so forth. If a value is out of range (for example $3 when nValue==2)
+** then its value will be NULL.
+**
+** This routine overrides any prior call.
+*/
+int sqlite_bind(sqlite_vm *pVm, int i, const char *zVal, int len, int copy){
+ Vdbe *p = (Vdbe*)pVm;
+ if( p->magic!=VDBE_MAGIC_RUN || p->pc!=0 ){
+ return SQLITE_MISUSE;
+ }
+ if( i<1 || i>p->nVar ){
+ return SQLITE_RANGE;
+ }
+ i--;
+ if( p->abVar[i] ){
+ sqliteFree(p->azVar[i]);
+ }
+ if( zVal==0 ){
+ copy = 0;
+ len = 0;
+ }
+ if( len<0 ){
+ len = strlen(zVal)+1;
+ }
+ if( copy ){
+ p->azVar[i] = sqliteMalloc( len );
+ if( p->azVar[i] ) memcpy(p->azVar[i], zVal, len);
+ }else{
+ p->azVar[i] = (char*)zVal;
+ }
+ p->abVar[i] = copy;
+ p->anVar[i] = len;
+ return SQLITE_OK;
+}
+
+
+/*
+** Delete an entire VDBE.
+*/
+void sqliteVdbeDelete(Vdbe *p){
+ int i;
+ if( p==0 ) return;
+ Cleanup(p);
+ if( p->pPrev ){
+ p->pPrev->pNext = p->pNext;
+ }else{
+ assert( p->db->pVdbe==p );
+ p->db->pVdbe = p->pNext;
+ }
+ if( p->pNext ){
+ p->pNext->pPrev = p->pPrev;
+ }
+ p->pPrev = p->pNext = 0;
+ if( p->nOpAlloc==0 ){
+ p->aOp = 0;
+ p->nOp = 0;
+ }
+ for(i=0; i<p->nOp; i++){
+ if( p->aOp[i].p3type==P3_DYNAMIC ){
+ sqliteFree(p->aOp[i].p3);
+ }
+ }
+ for(i=0; i<p->nVar; i++){
+ if( p->abVar[i] ) sqliteFree(p->azVar[i]);
+ }
+ sqliteFree(p->aOp);
+ sqliteFree(p->aLabel);
+ sqliteFree(p->aStack);
+ p->magic = VDBE_MAGIC_DEAD;
+ sqliteFree(p);
+}
+
+/*
+** Convert an integer in between the native integer format and
+** the bigEndian format used as the record number for tables.
+**
+** The bigEndian format (most significant byte first) is used for
+** record numbers so that records will sort into the correct order
+** even though memcmp() is used to compare the keys. On machines
+** whose native integer format is little endian (ex: i486) the
+** order of bytes is reversed. On native big-endian machines
+** (ex: Alpha, Sparc, Motorola) the byte order is the same.
+**
+** This function is its own inverse. In other words
+**
+** X == byteSwap(byteSwap(X))
+*/
+int sqliteVdbeByteSwap(int x){
+ union {
+ char zBuf[sizeof(int)];
+ int i;
+ } ux;
+ ux.zBuf[3] = x&0xff;
+ ux.zBuf[2] = (x>>8)&0xff;
+ ux.zBuf[1] = (x>>16)&0xff;
+ ux.zBuf[0] = (x>>24)&0xff;
+ return ux.i;
+}
+
+/*
+** If a MoveTo operation is pending on the given cursor, then do that
+** MoveTo now. Return an error code. If no MoveTo is pending, this
+** routine does nothing and returns SQLITE_OK.
+*/
+int sqliteVdbeCursorMoveto(Cursor *p){
+ if( p->deferredMoveto ){
+ int res;
+ extern int sqlite_search_count;
+ sqliteBtreeMoveto(p->pCursor, (char*)&p->movetoTarget, sizeof(int), &res);
+ p->lastRecno = keyToInt(p->movetoTarget);
+ p->recnoIsValid = res==0;
+ if( res<0 ){
+ sqliteBtreeNext(p->pCursor, &res);
+ }
+ sqlite_search_count++;
+ p->deferredMoveto = 0;
+ }
+ return SQLITE_OK;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/src/where.c b/usr/src/cmd/svc/configd/sqlite/src/where.c
new file mode 100644
index 0000000000..dd2affec73
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/src/where.c
@@ -0,0 +1,1238 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This module contains C code that generates VDBE code used to process
+** the WHERE clause of SQL statements.
+**
+** $Id: where.c,v 1.89.2.2 2004/07/19 19:30:50 drh Exp $
+*/
+#include "sqliteInt.h"
+
+/*
+** The query generator uses an array of instances of this structure to
+** help it analyze the subexpressions of the WHERE clause. Each WHERE
+** clause subexpression is separated from the others by an AND operator.
+*/
+typedef struct ExprInfo ExprInfo;
+struct ExprInfo {
+ Expr *p; /* Pointer to the subexpression */
+ u8 indexable; /* True if this subexprssion is usable by an index */
+ short int idxLeft; /* p->pLeft is a column in this table number. -1 if
+ ** p->pLeft is not the column of any table */
+ short int idxRight; /* p->pRight is a column in this table number. -1 if
+ ** p->pRight is not the column of any table */
+ unsigned prereqLeft; /* Bitmask of tables referenced by p->pLeft */
+ unsigned prereqRight; /* Bitmask of tables referenced by p->pRight */
+ unsigned prereqAll; /* Bitmask of tables referenced by p */
+};
+
+/*
+** An instance of the following structure keeps track of a mapping
+** between VDBE cursor numbers and bitmasks. The VDBE cursor numbers
+** are small integers contained in SrcList_item.iCursor and Expr.iTable
+** fields. For any given WHERE clause, we want to track which cursors
+** are being used, so we assign a single bit in a 32-bit word to track
+** that cursor. Then a 32-bit integer is able to show the set of all
+** cursors being used.
+*/
+typedef struct ExprMaskSet ExprMaskSet;
+struct ExprMaskSet {
+ int n; /* Number of assigned cursor values */
+ int ix[31]; /* Cursor assigned to each bit */
+};
+
+/*
+** Determine the number of elements in an array.
+*/
+#define ARRAYSIZE(X) (sizeof(X)/sizeof(X[0]))
+
+/*
+** This routine is used to divide the WHERE expression into subexpressions
+** separated by the AND operator.
+**
+** aSlot[] is an array of subexpressions structures.
+** There are nSlot spaces left in this array. This routine attempts to
+** split pExpr into subexpressions and fills aSlot[] with those subexpressions.
+** The return value is the number of slots filled.
+*/
+static int exprSplit(int nSlot, ExprInfo *aSlot, Expr *pExpr){
+ int cnt = 0;
+ if( pExpr==0 || nSlot<1 ) return 0;
+ if( nSlot==1 || pExpr->op!=TK_AND ){
+ aSlot[0].p = pExpr;
+ return 1;
+ }
+ if( pExpr->pLeft->op!=TK_AND ){
+ aSlot[0].p = pExpr->pLeft;
+ cnt = 1 + exprSplit(nSlot-1, &aSlot[1], pExpr->pRight);
+ }else{
+ cnt = exprSplit(nSlot, aSlot, pExpr->pLeft);
+ cnt += exprSplit(nSlot-cnt, &aSlot[cnt], pExpr->pRight);
+ }
+ return cnt;
+}
+
+/*
+** Initialize an expression mask set
+*/
+#define initMaskSet(P) memset(P, 0, sizeof(*P))
+
+/*
+** Return the bitmask for the given cursor. Assign a new bitmask
+** if this is the first time the cursor has been seen.
+*/
+static int getMask(ExprMaskSet *pMaskSet, int iCursor){
+ int i;
+ for(i=0; i<pMaskSet->n; i++){
+ if( pMaskSet->ix[i]==iCursor ) return 1<<i;
+ }
+ if( i==pMaskSet->n && i<ARRAYSIZE(pMaskSet->ix) ){
+ pMaskSet->n++;
+ pMaskSet->ix[i] = iCursor;
+ return 1<<i;
+ }
+ return 0;
+}
+
+/*
+** Destroy an expression mask set
+*/
+#define freeMaskSet(P) /* NO-OP */
+
+/*
+** This routine walks (recursively) an expression tree and generates
+** a bitmask indicating which tables are used in that expression
+** tree.
+**
+** In order for this routine to work, the calling function must have
+** previously invoked sqliteExprResolveIds() on the expression. See
+** the header comment on that routine for additional information.
+** The sqliteExprResolveIds() routines looks for column names and
+** sets their opcodes to TK_COLUMN and their Expr.iTable fields to
+** the VDBE cursor number of the table.
+*/
+static int exprTableUsage(ExprMaskSet *pMaskSet, Expr *p){
+ unsigned int mask = 0;
+ if( p==0 ) return 0;
+ if( p->op==TK_COLUMN ){
+ mask = getMask(pMaskSet, p->iTable);
+ if( mask==0 ) mask = -1;
+ return mask;
+ }
+ if( p->pRight ){
+ mask = exprTableUsage(pMaskSet, p->pRight);
+ }
+ if( p->pLeft ){
+ mask |= exprTableUsage(pMaskSet, p->pLeft);
+ }
+ if( p->pList ){
+ int i;
+ for(i=0; i<p->pList->nExpr; i++){
+ mask |= exprTableUsage(pMaskSet, p->pList->a[i].pExpr);
+ }
+ }
+ return mask;
+}
+
+/*
+** Return TRUE if the given operator is one of the operators that is
+** allowed for an indexable WHERE clause. The allowed operators are
+** "=", "<", ">", "<=", ">=", and "IN".
+*/
+static int allowedOp(int op){
+ switch( op ){
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_EQ:
+ case TK_IN:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+** The input to this routine is an ExprInfo structure with only the
+** "p" field filled in. The job of this routine is to analyze the
+** subexpression and populate all the other fields of the ExprInfo
+** structure.
+*/
+static void exprAnalyze(ExprMaskSet *pMaskSet, ExprInfo *pInfo){
+ Expr *pExpr = pInfo->p;
+ pInfo->prereqLeft = exprTableUsage(pMaskSet, pExpr->pLeft);
+ pInfo->prereqRight = exprTableUsage(pMaskSet, pExpr->pRight);
+ pInfo->prereqAll = exprTableUsage(pMaskSet, pExpr);
+ pInfo->indexable = 0;
+ pInfo->idxLeft = -1;
+ pInfo->idxRight = -1;
+ if( allowedOp(pExpr->op) && (pInfo->prereqRight & pInfo->prereqLeft)==0 ){
+ if( pExpr->pRight && pExpr->pRight->op==TK_COLUMN ){
+ pInfo->idxRight = pExpr->pRight->iTable;
+ pInfo->indexable = 1;
+ }
+ if( pExpr->pLeft->op==TK_COLUMN ){
+ pInfo->idxLeft = pExpr->pLeft->iTable;
+ pInfo->indexable = 1;
+ }
+ }
+}
+
+/*
+** pOrderBy is an ORDER BY clause from a SELECT statement. pTab is the
+** left-most table in the FROM clause of that same SELECT statement and
+** the table has a cursor number of "base".
+**
+** This routine attempts to find an index for pTab that generates the
+** correct record sequence for the given ORDER BY clause. The return value
+** is a pointer to an index that does the job. NULL is returned if the
+** table has no index that will generate the correct sort order.
+**
+** If there are two or more indices that generate the correct sort order
+** and pPreferredIdx is one of those indices, then return pPreferredIdx.
+**
+** nEqCol is the number of columns of pPreferredIdx that are used as
+** equality constraints. Any index returned must have exactly this same
+** set of columns. The ORDER BY clause only matches index columns beyond the
+** the first nEqCol columns.
+**
+** All terms of the ORDER BY clause must be either ASC or DESC. The
+** *pbRev value is set to 1 if the ORDER BY clause is all DESC and it is
+** set to 0 if the ORDER BY clause is all ASC.
+*/
+static Index *findSortingIndex(
+ Table *pTab, /* The table to be sorted */
+ int base, /* Cursor number for pTab */
+ ExprList *pOrderBy, /* The ORDER BY clause */
+ Index *pPreferredIdx, /* Use this index, if possible and not NULL */
+ int nEqCol, /* Number of index columns used with == constraints */
+ int *pbRev /* Set to 1 if ORDER BY is DESC */
+){
+ int i, j;
+ Index *pMatch;
+ Index *pIdx;
+ int sortOrder;
+
+ assert( pOrderBy!=0 );
+ assert( pOrderBy->nExpr>0 );
+ sortOrder = pOrderBy->a[0].sortOrder & SQLITE_SO_DIRMASK;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ Expr *p;
+ if( (pOrderBy->a[i].sortOrder & SQLITE_SO_DIRMASK)!=sortOrder ){
+ /* Indices can only be used if all ORDER BY terms are either
+ ** DESC or ASC. Indices cannot be used on a mixture. */
+ return 0;
+ }
+ if( (pOrderBy->a[i].sortOrder & SQLITE_SO_TYPEMASK)!=SQLITE_SO_UNK ){
+ /* Do not sort by index if there is a COLLATE clause */
+ return 0;
+ }
+ p = pOrderBy->a[i].pExpr;
+ if( p->op!=TK_COLUMN || p->iTable!=base ){
+ /* Can not use an index sort on anything that is not a column in the
+ ** left-most table of the FROM clause */
+ return 0;
+ }
+ }
+
+ /* If we get this far, it means the ORDER BY clause consists only of
+ ** ascending columns in the left-most table of the FROM clause. Now
+ ** check for a matching index.
+ */
+ pMatch = 0;
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int nExpr = pOrderBy->nExpr;
+ if( pIdx->nColumn < nEqCol || pIdx->nColumn < nExpr ) continue;
+ for(i=j=0; i<nEqCol; i++){
+ if( pPreferredIdx->aiColumn[i]!=pIdx->aiColumn[i] ) break;
+ if( j<nExpr && pOrderBy->a[j].pExpr->iColumn==pIdx->aiColumn[i] ){ j++; }
+ }
+ if( i<nEqCol ) continue;
+ for(i=0; i+j<nExpr; i++){
+ if( pOrderBy->a[i+j].pExpr->iColumn!=pIdx->aiColumn[i+nEqCol] ) break;
+ }
+ if( i+j>=nExpr ){
+ pMatch = pIdx;
+ if( pIdx==pPreferredIdx ) break;
+ }
+ }
+ if( pMatch && pbRev ){
+ *pbRev = sortOrder==SQLITE_SO_DESC;
+ }
+ return pMatch;
+}
+
+/*
+** Disable a term in the WHERE clause. Except, do not disable the term
+** if it controls a LEFT OUTER JOIN and it did not originate in the ON
+** or USING clause of that join.
+**
+** Consider the term t2.z='ok' in the following queries:
+**
+** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok'
+** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok'
+** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok'
+**
+** The t2.z='ok' is disabled in the in (2) because it did not originate
+** in the ON clause. The term is disabled in (3) because it is not part
+** of a LEFT OUTER JOIN. In (1), the term is not disabled.
+**
+** Disabling a term causes that term to not be tested in the inner loop
+** of the join. Disabling is an optimization. We would get the correct
+** results if nothing were ever disabled, but joins might run a little
+** slower. The trick is to disable as much as we can without disabling
+** too much. If we disabled in (1), we'd get the wrong answer.
+** See ticket #813.
+*/
+static void disableTerm(WhereLevel *pLevel, Expr **ppExpr){
+ Expr *pExpr = *ppExpr;
+ if( pLevel->iLeftJoin==0 || ExprHasProperty(pExpr, EP_FromJoin) ){
+ *ppExpr = 0;
+ }
+}
+
+/*
+** Generate the beginning of the loop used for WHERE clause processing.
+** The return value is a pointer to an (opaque) structure that contains
+** information needed to terminate the loop. Later, the calling routine
+** should invoke sqliteWhereEnd() with the return value of this function
+** in order to complete the WHERE clause processing.
+**
+** If an error occurs, this routine returns NULL.
+**
+** The basic idea is to do a nested loop, one loop for each table in
+** the FROM clause of a select. (INSERT and UPDATE statements are the
+** same as a SELECT with only a single table in the FROM clause.) For
+** example, if the SQL is this:
+**
+** SELECT * FROM t1, t2, t3 WHERE ...;
+**
+** Then the code generated is conceptually like the following:
+**
+** foreach row1 in t1 do \ Code generated
+** foreach row2 in t2 do |-- by sqliteWhereBegin()
+** foreach row3 in t3 do /
+** ...
+** end \ Code generated
+** end |-- by sqliteWhereEnd()
+** end /
+**
+** There are Btree cursors associated with each table. t1 uses cursor
+** number pTabList->a[0].iCursor. t2 uses the cursor pTabList->a[1].iCursor.
+** And so forth. This routine generates code to open those VDBE cursors
+** and sqliteWhereEnd() generates the code to close them.
+**
+** If the WHERE clause is empty, the foreach loops must each scan their
+** entire tables. Thus a three-way join is an O(N^3) operation. But if
+** the tables have indices and there are terms in the WHERE clause that
+** refer to those indices, a complete table scan can be avoided and the
+** code will run much faster. Most of the work of this routine is checking
+** to see if there are indices that can be used to speed up the loop.
+**
+** Terms of the WHERE clause are also used to limit which rows actually
+** make it to the "..." in the middle of the loop. After each "foreach",
+** terms of the WHERE clause that use only terms in that loop and outer
+** loops are evaluated and if false a jump is made around all subsequent
+** inner loops (or around the "..." if the test occurs within the inner-
+** most loop)
+**
+** OUTER JOINS
+**
+** An outer join of tables t1 and t2 is conceptally coded as follows:
+**
+** foreach row1 in t1 do
+** flag = 0
+** foreach row2 in t2 do
+** start:
+** ...
+** flag = 1
+** end
+** if flag==0 then
+** move the row2 cursor to a null row
+** goto start
+** fi
+** end
+**
+** ORDER BY CLAUSE PROCESSING
+**
+** *ppOrderBy is a pointer to the ORDER BY clause of a SELECT statement,
+** if there is one. If there is no ORDER BY clause or if this routine
+** is called from an UPDATE or DELETE statement, then ppOrderBy is NULL.
+**
+** If an index can be used so that the natural output order of the table
+** scan is correct for the ORDER BY clause, then that index is used and
+** *ppOrderBy is set to NULL. This is an optimization that prevents an
+** unnecessary sort of the result set if an index appropriate for the
+** ORDER BY clause already exists.
+**
+** If the where clause loops cannot be arranged to provide the correct
+** output order, then the *ppOrderBy is unchanged.
+*/
+WhereInfo *sqliteWhereBegin(
+ Parse *pParse, /* The parser context */
+ SrcList *pTabList, /* A list of all tables to be scanned */
+ Expr *pWhere, /* The WHERE clause */
+ int pushKey, /* If TRUE, leave the table key on the stack */
+ ExprList **ppOrderBy /* An ORDER BY clause, or NULL */
+){
+ int i; /* Loop counter */
+ WhereInfo *pWInfo; /* Will become the return value of this function */
+ Vdbe *v = pParse->pVdbe; /* The virtual database engine */
+ int brk, cont = 0; /* Addresses used during code generation */
+ int nExpr; /* Number of subexpressions in the WHERE clause */
+ int loopMask; /* One bit set for each outer loop */
+ int haveKey; /* True if KEY is on the stack */
+ ExprMaskSet maskSet; /* The expression mask set */
+ int iDirectEq[32]; /* Term of the form ROWID==X for the N-th table */
+ int iDirectLt[32]; /* Term of the form ROWID<X or ROWID<=X */
+ int iDirectGt[32]; /* Term of the form ROWID>X or ROWID>=X */
+ ExprInfo aExpr[101]; /* The WHERE clause is divided into these expressions */
+
+ /* pushKey is only allowed if there is a single table (as in an INSERT or
+ ** UPDATE statement)
+ */
+ assert( pushKey==0 || pTabList->nSrc==1 );
+
+ /* Split the WHERE clause into separate subexpressions where each
+ ** subexpression is separated by an AND operator. If the aExpr[]
+ ** array fills up, the last entry might point to an expression which
+ ** contains additional unfactored AND operators.
+ */
+ initMaskSet(&maskSet);
+ memset(aExpr, 0, sizeof(aExpr));
+ nExpr = exprSplit(ARRAYSIZE(aExpr), aExpr, pWhere);
+ if( nExpr==ARRAYSIZE(aExpr) ){
+ sqliteErrorMsg(pParse, "WHERE clause too complex - no more "
+ "than %d terms allowed", (int)ARRAYSIZE(aExpr)-1);
+ return 0;
+ }
+
+ /* Allocate and initialize the WhereInfo structure that will become the
+ ** return value.
+ */
+ pWInfo = sqliteMalloc( sizeof(WhereInfo) + pTabList->nSrc*sizeof(WhereLevel));
+ if( sqlite_malloc_failed ){
+ sqliteFree(pWInfo);
+ return 0;
+ }
+ pWInfo->pParse = pParse;
+ pWInfo->pTabList = pTabList;
+ pWInfo->peakNTab = pWInfo->savedNTab = pParse->nTab;
+ pWInfo->iBreak = sqliteVdbeMakeLabel(v);
+
+ /* Special case: a WHERE clause that is constant. Evaluate the
+ ** expression and either jump over all of the code or fall thru.
+ */
+ if( pWhere && (pTabList->nSrc==0 || sqliteExprIsConstant(pWhere)) ){
+ sqliteExprIfFalse(pParse, pWhere, pWInfo->iBreak, 1);
+ pWhere = 0;
+ }
+
+ /* Analyze all of the subexpressions.
+ */
+ for(i=0; i<nExpr; i++){
+ exprAnalyze(&maskSet, &aExpr[i]);
+
+ /* If we are executing a trigger body, remove all references to
+ ** new.* and old.* tables from the prerequisite masks.
+ */
+ if( pParse->trigStack ){
+ int x;
+ if( (x = pParse->trigStack->newIdx) >= 0 ){
+ int mask = ~getMask(&maskSet, x);
+ aExpr[i].prereqRight &= mask;
+ aExpr[i].prereqLeft &= mask;
+ aExpr[i].prereqAll &= mask;
+ }
+ if( (x = pParse->trigStack->oldIdx) >= 0 ){
+ int mask = ~getMask(&maskSet, x);
+ aExpr[i].prereqRight &= mask;
+ aExpr[i].prereqLeft &= mask;
+ aExpr[i].prereqAll &= mask;
+ }
+ }
+ }
+
+ /* Figure out what index to use (if any) for each nested loop.
+ ** Make pWInfo->a[i].pIdx point to the index to use for the i-th nested
+ ** loop where i==0 is the outer loop and i==pTabList->nSrc-1 is the inner
+ ** loop.
+ **
+ ** If terms exist that use the ROWID of any table, then set the
+ ** iDirectEq[], iDirectLt[], or iDirectGt[] elements for that table
+ ** to the index of the term containing the ROWID. We always prefer
+ ** to use a ROWID which can directly access a table rather than an
+ ** index which requires reading an index first to get the rowid then
+ ** doing a second read of the actual database table.
+ **
+ ** Actually, if there are more than 32 tables in the join, only the
+ ** first 32 tables are candidates for indices. This is (again) due
+ ** to the limit of 32 bits in an integer bitmask.
+ */
+ loopMask = 0;
+ for(i=0; i<pTabList->nSrc && i<ARRAYSIZE(iDirectEq); i++){
+ int j;
+ int iCur = pTabList->a[i].iCursor; /* The cursor for this table */
+ int mask = getMask(&maskSet, iCur); /* Cursor mask for this table */
+ Table *pTab = pTabList->a[i].pTab;
+ Index *pIdx;
+ Index *pBestIdx = 0;
+ int bestScore = 0;
+
+ /* Check to see if there is an expression that uses only the
+ ** ROWID field of this table. For terms of the form ROWID==expr
+ ** set iDirectEq[i] to the index of the term. For terms of the
+ ** form ROWID<expr or ROWID<=expr set iDirectLt[i] to the term index.
+ ** For terms like ROWID>expr or ROWID>=expr set iDirectGt[i].
+ **
+ ** (Added:) Treat ROWID IN expr like ROWID=expr.
+ */
+ pWInfo->a[i].iCur = -1;
+ iDirectEq[i] = -1;
+ iDirectLt[i] = -1;
+ iDirectGt[i] = -1;
+ for(j=0; j<nExpr; j++){
+ if( aExpr[j].idxLeft==iCur && aExpr[j].p->pLeft->iColumn<0
+ && (aExpr[j].prereqRight & loopMask)==aExpr[j].prereqRight ){
+ switch( aExpr[j].p->op ){
+ case TK_IN:
+ case TK_EQ: iDirectEq[i] = j; break;
+ case TK_LE:
+ case TK_LT: iDirectLt[i] = j; break;
+ case TK_GE:
+ case TK_GT: iDirectGt[i] = j; break;
+ }
+ }
+ if( aExpr[j].idxRight==iCur && aExpr[j].p->pRight->iColumn<0
+ && (aExpr[j].prereqLeft & loopMask)==aExpr[j].prereqLeft ){
+ switch( aExpr[j].p->op ){
+ case TK_EQ: iDirectEq[i] = j; break;
+ case TK_LE:
+ case TK_LT: iDirectGt[i] = j; break;
+ case TK_GE:
+ case TK_GT: iDirectLt[i] = j; break;
+ }
+ }
+ }
+ if( iDirectEq[i]>=0 ){
+ loopMask |= mask;
+ pWInfo->a[i].pIdx = 0;
+ continue;
+ }
+
+ /* Do a search for usable indices. Leave pBestIdx pointing to
+ ** the "best" index. pBestIdx is left set to NULL if no indices
+ ** are usable.
+ **
+ ** The best index is determined as follows. For each of the
+ ** left-most terms that is fixed by an equality operator, add
+ ** 8 to the score. The right-most term of the index may be
+ ** constrained by an inequality. Add 1 if for an "x<..." constraint
+ ** and add 2 for an "x>..." constraint. Chose the index that
+ ** gives the best score.
+ **
+ ** This scoring system is designed so that the score can later be
+ ** used to determine how the index is used. If the score&7 is 0
+ ** then all constraints are equalities. If score&1 is not 0 then
+ ** there is an inequality used as a termination key. (ex: "x<...")
+ ** If score&2 is not 0 then there is an inequality used as the
+ ** start key. (ex: "x>..."). A score or 4 is the special case
+ ** of an IN operator constraint. (ex: "x IN ...").
+ **
+ ** The IN operator (as in "<expr> IN (...)") is treated the same as
+ ** an equality comparison except that it can only be used on the
+ ** left-most column of an index and other terms of the WHERE clause
+ ** cannot be used in conjunction with the IN operator to help satisfy
+ ** other columns of the index.
+ */
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int eqMask = 0; /* Index columns covered by an x=... term */
+ int ltMask = 0; /* Index columns covered by an x<... term */
+ int gtMask = 0; /* Index columns covered by an x>... term */
+ int inMask = 0; /* Index columns covered by an x IN .. term */
+ int nEq, m, score;
+
+ if( pIdx->nColumn>32 ) continue; /* Ignore indices too many columns */
+ for(j=0; j<nExpr; j++){
+ if( aExpr[j].idxLeft==iCur
+ && (aExpr[j].prereqRight & loopMask)==aExpr[j].prereqRight ){
+ int iColumn = aExpr[j].p->pLeft->iColumn;
+ int k;
+ for(k=0; k<pIdx->nColumn; k++){
+ if( pIdx->aiColumn[k]==iColumn ){
+ switch( aExpr[j].p->op ){
+ case TK_IN: {
+ if( k==0 ) inMask |= 1;
+ break;
+ }
+ case TK_EQ: {
+ eqMask |= 1<<k;
+ break;
+ }
+ case TK_LE:
+ case TK_LT: {
+ ltMask |= 1<<k;
+ break;
+ }
+ case TK_GE:
+ case TK_GT: {
+ gtMask |= 1<<k;
+ break;
+ }
+ default: {
+ /* CANT_HAPPEN */
+ assert( 0 );
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ if( aExpr[j].idxRight==iCur
+ && (aExpr[j].prereqLeft & loopMask)==aExpr[j].prereqLeft ){
+ int iColumn = aExpr[j].p->pRight->iColumn;
+ int k;
+ for(k=0; k<pIdx->nColumn; k++){
+ if( pIdx->aiColumn[k]==iColumn ){
+ switch( aExpr[j].p->op ){
+ case TK_EQ: {
+ eqMask |= 1<<k;
+ break;
+ }
+ case TK_LE:
+ case TK_LT: {
+ gtMask |= 1<<k;
+ break;
+ }
+ case TK_GE:
+ case TK_GT: {
+ ltMask |= 1<<k;
+ break;
+ }
+ default: {
+ /* CANT_HAPPEN */
+ assert( 0 );
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ /* The following loop ends with nEq set to the number of columns
+ ** on the left of the index with == constraints.
+ */
+ for(nEq=0; nEq<pIdx->nColumn; nEq++){
+ m = (1<<(nEq+1))-1;
+ if( (m & eqMask)!=m ) break;
+ }
+ score = nEq*8; /* Base score is 8 times number of == constraints */
+ m = 1<<nEq;
+ if( m & ltMask ) score++; /* Increase score for a < constraint */
+ if( m & gtMask ) score+=2; /* Increase score for a > constraint */
+ if( score==0 && inMask ) score = 4; /* Default score for IN constraint */
+ if( score>bestScore ){
+ pBestIdx = pIdx;
+ bestScore = score;
+ }
+ }
+ pWInfo->a[i].pIdx = pBestIdx;
+ pWInfo->a[i].score = bestScore;
+ pWInfo->a[i].bRev = 0;
+ loopMask |= mask;
+ if( pBestIdx ){
+ pWInfo->a[i].iCur = pParse->nTab++;
+ pWInfo->peakNTab = pParse->nTab;
+ }
+ }
+
+ /* Check to see if the ORDER BY clause is or can be satisfied by the
+ ** use of an index on the first table.
+ */
+ if( ppOrderBy && *ppOrderBy && pTabList->nSrc>0 ){
+ Index *pSortIdx;
+ Index *pIdx;
+ Table *pTab;
+ int bRev = 0;
+
+ pTab = pTabList->a[0].pTab;
+ pIdx = pWInfo->a[0].pIdx;
+ if( pIdx && pWInfo->a[0].score==4 ){
+ /* If there is already an IN index on the left-most table,
+ ** it will not give the correct sort order.
+ ** So, pretend that no suitable index is found.
+ */
+ pSortIdx = 0;
+ }else if( iDirectEq[0]>=0 || iDirectLt[0]>=0 || iDirectGt[0]>=0 ){
+ /* If the left-most column is accessed using its ROWID, then do
+ ** not try to sort by index.
+ */
+ pSortIdx = 0;
+ }else{
+ int nEqCol = (pWInfo->a[0].score+4)/8;
+ pSortIdx = findSortingIndex(pTab, pTabList->a[0].iCursor,
+ *ppOrderBy, pIdx, nEqCol, &bRev);
+ }
+ if( pSortIdx && (pIdx==0 || pIdx==pSortIdx) ){
+ if( pIdx==0 ){
+ pWInfo->a[0].pIdx = pSortIdx;
+ pWInfo->a[0].iCur = pParse->nTab++;
+ pWInfo->peakNTab = pParse->nTab;
+ }
+ pWInfo->a[0].bRev = bRev;
+ *ppOrderBy = 0;
+ }
+ }
+
+ /* Open all tables in the pTabList and all indices used by those tables.
+ */
+ for(i=0; i<pTabList->nSrc; i++){
+ Table *pTab;
+ Index *pIx;
+
+ pTab = pTabList->a[i].pTab;
+ if( pTab->isTransient || pTab->pSelect ) continue;
+ sqliteVdbeAddOp(v, OP_Integer, pTab->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, pTabList->a[i].iCursor, pTab->tnum,
+ pTab->zName, P3_STATIC);
+ sqliteCodeVerifySchema(pParse, pTab->iDb);
+ if( (pIx = pWInfo->a[i].pIdx)!=0 ){
+ sqliteVdbeAddOp(v, OP_Integer, pIx->iDb, 0);
+ sqliteVdbeOp3(v, OP_OpenRead, pWInfo->a[i].iCur, pIx->tnum, pIx->zName,0);
+ }
+ }
+
+ /* Generate the code to do the search
+ */
+ loopMask = 0;
+ for(i=0; i<pTabList->nSrc; i++){
+ int j, k;
+ int iCur = pTabList->a[i].iCursor;
+ Index *pIdx;
+ WhereLevel *pLevel = &pWInfo->a[i];
+
+ /* If this is the right table of a LEFT OUTER JOIN, allocate and
+ ** initialize a memory cell that records if this table matches any
+ ** row of the left table of the join.
+ */
+ if( i>0 && (pTabList->a[i-1].jointype & JT_LEFT)!=0 ){
+ if( !pParse->nMem ) pParse->nMem++;
+ pLevel->iLeftJoin = pParse->nMem++;
+ sqliteVdbeAddOp(v, OP_String, 0, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iLeftJoin, 1);
+ }
+
+ pIdx = pLevel->pIdx;
+ pLevel->inOp = OP_Noop;
+ if( i<ARRAYSIZE(iDirectEq) && iDirectEq[i]>=0 ){
+ /* Case 1: We can directly reference a single row using an
+ ** equality comparison against the ROWID field. Or
+ ** we reference multiple rows using a "rowid IN (...)"
+ ** construct.
+ */
+ k = iDirectEq[i];
+ assert( k<nExpr );
+ assert( aExpr[k].p!=0 );
+ assert( aExpr[k].idxLeft==iCur || aExpr[k].idxRight==iCur );
+ brk = pLevel->brk = sqliteVdbeMakeLabel(v);
+ if( aExpr[k].idxLeft==iCur ){
+ Expr *pX = aExpr[k].p;
+ if( pX->op!=TK_IN ){
+ sqliteExprCode(pParse, aExpr[k].p->pRight);
+ }else if( pX->pList ){
+ sqliteVdbeAddOp(v, OP_SetFirst, pX->iTable, brk);
+ pLevel->inOp = OP_SetNext;
+ pLevel->inP1 = pX->iTable;
+ pLevel->inP2 = sqliteVdbeCurrentAddr(v);
+ }else{
+ assert( pX->pSelect );
+ sqliteVdbeAddOp(v, OP_Rewind, pX->iTable, brk);
+ sqliteVdbeAddOp(v, OP_KeyAsData, pX->iTable, 1);
+ pLevel->inP2 = sqliteVdbeAddOp(v, OP_FullKey, pX->iTable, 0);
+ pLevel->inOp = OP_Next;
+ pLevel->inP1 = pX->iTable;
+ }
+ }else{
+ sqliteExprCode(pParse, aExpr[k].p->pLeft);
+ }
+ disableTerm(pLevel, &aExpr[k].p);
+ cont = pLevel->cont = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_MustBeInt, 1, brk);
+ haveKey = 0;
+ sqliteVdbeAddOp(v, OP_NotExists, iCur, brk);
+ pLevel->op = OP_Noop;
+ }else if( pIdx!=0 && pLevel->score>0 && pLevel->score%4==0 ){
+ /* Case 2: There is an index and all terms of the WHERE clause that
+ ** refer to the index use the "==" or "IN" operators.
+ */
+ int start;
+ int testOp;
+ int nColumn = (pLevel->score+4)/8;
+ brk = pLevel->brk = sqliteVdbeMakeLabel(v);
+ for(j=0; j<nColumn; j++){
+ for(k=0; k<nExpr; k++){
+ Expr *pX = aExpr[k].p;
+ if( pX==0 ) continue;
+ if( aExpr[k].idxLeft==iCur
+ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight
+ && pX->pLeft->iColumn==pIdx->aiColumn[j]
+ ){
+ if( pX->op==TK_EQ ){
+ sqliteExprCode(pParse, pX->pRight);
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ if( pX->op==TK_IN && nColumn==1 ){
+ if( pX->pList ){
+ sqliteVdbeAddOp(v, OP_SetFirst, pX->iTable, brk);
+ pLevel->inOp = OP_SetNext;
+ pLevel->inP1 = pX->iTable;
+ pLevel->inP2 = sqliteVdbeCurrentAddr(v);
+ }else{
+ assert( pX->pSelect );
+ sqliteVdbeAddOp(v, OP_Rewind, pX->iTable, brk);
+ sqliteVdbeAddOp(v, OP_KeyAsData, pX->iTable, 1);
+ pLevel->inP2 = sqliteVdbeAddOp(v, OP_FullKey, pX->iTable, 0);
+ pLevel->inOp = OP_Next;
+ pLevel->inP1 = pX->iTable;
+ }
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ }
+ if( aExpr[k].idxRight==iCur
+ && aExpr[k].p->op==TK_EQ
+ && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft
+ && aExpr[k].p->pRight->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, aExpr[k].p->pLeft);
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ }
+ }
+ pLevel->iMem = pParse->nMem++;
+ cont = pLevel->cont = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_NotNull, -nColumn, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, nColumn, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, brk);
+ sqliteVdbeAddOp(v, OP_MakeKey, nColumn, 0);
+ sqliteAddIdxKeyType(v, pIdx);
+ if( nColumn==pIdx->nColumn || pLevel->bRev ){
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iMem, 0);
+ testOp = OP_IdxGT;
+ }else{
+ sqliteVdbeAddOp(v, OP_Dup, 0, 0);
+ sqliteVdbeAddOp(v, OP_IncrKey, 0, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iMem, 1);
+ testOp = OP_IdxGE;
+ }
+ if( pLevel->bRev ){
+ /* Scan in reverse order */
+ sqliteVdbeAddOp(v, OP_IncrKey, 0, 0);
+ sqliteVdbeAddOp(v, OP_MoveLt, pLevel->iCur, brk);
+ start = sqliteVdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0);
+ sqliteVdbeAddOp(v, OP_IdxLT, pLevel->iCur, brk);
+ pLevel->op = OP_Prev;
+ }else{
+ /* Scan in the forward order */
+ sqliteVdbeAddOp(v, OP_MoveTo, pLevel->iCur, brk);
+ start = sqliteVdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0);
+ sqliteVdbeAddOp(v, testOp, pLevel->iCur, brk);
+ pLevel->op = OP_Next;
+ }
+ sqliteVdbeAddOp(v, OP_RowKey, pLevel->iCur, 0);
+ sqliteVdbeAddOp(v, OP_IdxIsNull, nColumn, cont);
+ sqliteVdbeAddOp(v, OP_IdxRecno, pLevel->iCur, 0);
+ if( i==pTabList->nSrc-1 && pushKey ){
+ haveKey = 1;
+ }else{
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, 0);
+ haveKey = 0;
+ }
+ pLevel->p1 = pLevel->iCur;
+ pLevel->p2 = start;
+ }else if( i<ARRAYSIZE(iDirectLt) && (iDirectLt[i]>=0 || iDirectGt[i]>=0) ){
+ /* Case 3: We have an inequality comparison against the ROWID field.
+ */
+ int testOp = OP_Noop;
+ int start;
+
+ brk = pLevel->brk = sqliteVdbeMakeLabel(v);
+ cont = pLevel->cont = sqliteVdbeMakeLabel(v);
+ if( iDirectGt[i]>=0 ){
+ k = iDirectGt[i];
+ assert( k<nExpr );
+ assert( aExpr[k].p!=0 );
+ assert( aExpr[k].idxLeft==iCur || aExpr[k].idxRight==iCur );
+ if( aExpr[k].idxLeft==iCur ){
+ sqliteExprCode(pParse, aExpr[k].p->pRight);
+ }else{
+ sqliteExprCode(pParse, aExpr[k].p->pLeft);
+ }
+ sqliteVdbeAddOp(v, OP_ForceInt,
+ aExpr[k].p->op==TK_LT || aExpr[k].p->op==TK_GT, brk);
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, brk);
+ disableTerm(pLevel, &aExpr[k].p);
+ }else{
+ sqliteVdbeAddOp(v, OP_Rewind, iCur, brk);
+ }
+ if( iDirectLt[i]>=0 ){
+ k = iDirectLt[i];
+ assert( k<nExpr );
+ assert( aExpr[k].p!=0 );
+ assert( aExpr[k].idxLeft==iCur || aExpr[k].idxRight==iCur );
+ if( aExpr[k].idxLeft==iCur ){
+ sqliteExprCode(pParse, aExpr[k].p->pRight);
+ }else{
+ sqliteExprCode(pParse, aExpr[k].p->pLeft);
+ }
+ /* sqliteVdbeAddOp(v, OP_MustBeInt, 0, sqliteVdbeCurrentAddr(v)+1); */
+ pLevel->iMem = pParse->nMem++;
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iMem, 1);
+ if( aExpr[k].p->op==TK_LT || aExpr[k].p->op==TK_GT ){
+ testOp = OP_Ge;
+ }else{
+ testOp = OP_Gt;
+ }
+ disableTerm(pLevel, &aExpr[k].p);
+ }
+ start = sqliteVdbeCurrentAddr(v);
+ pLevel->op = OP_Next;
+ pLevel->p1 = iCur;
+ pLevel->p2 = start;
+ if( testOp!=OP_Noop ){
+ sqliteVdbeAddOp(v, OP_Recno, iCur, 0);
+ sqliteVdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0);
+ sqliteVdbeAddOp(v, testOp, 0, brk);
+ }
+ haveKey = 0;
+ }else if( pIdx==0 ){
+ /* Case 4: There is no usable index. We must do a complete
+ ** scan of the entire database table.
+ */
+ int start;
+
+ brk = pLevel->brk = sqliteVdbeMakeLabel(v);
+ cont = pLevel->cont = sqliteVdbeMakeLabel(v);
+ sqliteVdbeAddOp(v, OP_Rewind, iCur, brk);
+ start = sqliteVdbeCurrentAddr(v);
+ pLevel->op = OP_Next;
+ pLevel->p1 = iCur;
+ pLevel->p2 = start;
+ haveKey = 0;
+ }else{
+ /* Case 5: The WHERE clause term that refers to the right-most
+ ** column of the index is an inequality. For example, if
+ ** the index is on (x,y,z) and the WHERE clause is of the
+ ** form "x=5 AND y<10" then this case is used. Only the
+ ** right-most column can be an inequality - the rest must
+ ** use the "==" operator.
+ **
+ ** This case is also used when there are no WHERE clause
+ ** constraints but an index is selected anyway, in order
+ ** to force the output order to conform to an ORDER BY.
+ */
+ int score = pLevel->score;
+ int nEqColumn = score/8;
+ int start;
+ int leFlag, geFlag;
+ int testOp;
+
+ /* Evaluate the equality constraints
+ */
+ for(j=0; j<nEqColumn; j++){
+ for(k=0; k<nExpr; k++){
+ if( aExpr[k].p==0 ) continue;
+ if( aExpr[k].idxLeft==iCur
+ && aExpr[k].p->op==TK_EQ
+ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight
+ && aExpr[k].p->pLeft->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, aExpr[k].p->pRight);
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ if( aExpr[k].idxRight==iCur
+ && aExpr[k].p->op==TK_EQ
+ && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft
+ && aExpr[k].p->pRight->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, aExpr[k].p->pLeft);
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ }
+ }
+
+ /* Duplicate the equality term values because they will all be
+ ** used twice: once to make the termination key and once to make the
+ ** start key.
+ */
+ for(j=0; j<nEqColumn; j++){
+ sqliteVdbeAddOp(v, OP_Dup, nEqColumn-1, 0);
+ }
+
+ /* Labels for the beginning and end of the loop
+ */
+ cont = pLevel->cont = sqliteVdbeMakeLabel(v);
+ brk = pLevel->brk = sqliteVdbeMakeLabel(v);
+
+ /* Generate the termination key. This is the key value that
+ ** will end the search. There is no termination key if there
+ ** are no equality terms and no "X<..." term.
+ **
+ ** 2002-Dec-04: On a reverse-order scan, the so-called "termination"
+ ** key computed here really ends up being the start key.
+ */
+ if( (score & 1)!=0 ){
+ for(k=0; k<nExpr; k++){
+ Expr *pExpr = aExpr[k].p;
+ if( pExpr==0 ) continue;
+ if( aExpr[k].idxLeft==iCur
+ && (pExpr->op==TK_LT || pExpr->op==TK_LE)
+ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight
+ && pExpr->pLeft->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, pExpr->pRight);
+ leFlag = pExpr->op==TK_LE;
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ if( aExpr[k].idxRight==iCur
+ && (pExpr->op==TK_GT || pExpr->op==TK_GE)
+ && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft
+ && pExpr->pRight->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, pExpr->pLeft);
+ leFlag = pExpr->op==TK_GE;
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ }
+ testOp = OP_IdxGE;
+ }else{
+ testOp = nEqColumn>0 ? OP_IdxGE : OP_Noop;
+ leFlag = 1;
+ }
+ if( testOp!=OP_Noop ){
+ int nCol = nEqColumn + (score & 1);
+ pLevel->iMem = pParse->nMem++;
+ sqliteVdbeAddOp(v, OP_NotNull, -nCol, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, nCol, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, brk);
+ sqliteVdbeAddOp(v, OP_MakeKey, nCol, 0);
+ sqliteAddIdxKeyType(v, pIdx);
+ if( leFlag ){
+ sqliteVdbeAddOp(v, OP_IncrKey, 0, 0);
+ }
+ if( pLevel->bRev ){
+ sqliteVdbeAddOp(v, OP_MoveLt, pLevel->iCur, brk);
+ }else{
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iMem, 1);
+ }
+ }else if( pLevel->bRev ){
+ sqliteVdbeAddOp(v, OP_Last, pLevel->iCur, brk);
+ }
+
+ /* Generate the start key. This is the key that defines the lower
+ ** bound on the search. There is no start key if there are no
+ ** equality terms and if there is no "X>..." term. In
+ ** that case, generate a "Rewind" instruction in place of the
+ ** start key search.
+ **
+ ** 2002-Dec-04: In the case of a reverse-order search, the so-called
+ ** "start" key really ends up being used as the termination key.
+ */
+ if( (score & 2)!=0 ){
+ for(k=0; k<nExpr; k++){
+ Expr *pExpr = aExpr[k].p;
+ if( pExpr==0 ) continue;
+ if( aExpr[k].idxLeft==iCur
+ && (pExpr->op==TK_GT || pExpr->op==TK_GE)
+ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight
+ && pExpr->pLeft->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, pExpr->pRight);
+ geFlag = pExpr->op==TK_GE;
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ if( aExpr[k].idxRight==iCur
+ && (pExpr->op==TK_LT || pExpr->op==TK_LE)
+ && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft
+ && pExpr->pRight->iColumn==pIdx->aiColumn[j]
+ ){
+ sqliteExprCode(pParse, pExpr->pLeft);
+ geFlag = pExpr->op==TK_LE;
+ disableTerm(pLevel, &aExpr[k].p);
+ break;
+ }
+ }
+ }else{
+ geFlag = 1;
+ }
+ if( nEqColumn>0 || (score&2)!=0 ){
+ int nCol = nEqColumn + ((score&2)!=0);
+ sqliteVdbeAddOp(v, OP_NotNull, -nCol, sqliteVdbeCurrentAddr(v)+3);
+ sqliteVdbeAddOp(v, OP_Pop, nCol, 0);
+ sqliteVdbeAddOp(v, OP_Goto, 0, brk);
+ sqliteVdbeAddOp(v, OP_MakeKey, nCol, 0);
+ sqliteAddIdxKeyType(v, pIdx);
+ if( !geFlag ){
+ sqliteVdbeAddOp(v, OP_IncrKey, 0, 0);
+ }
+ if( pLevel->bRev ){
+ pLevel->iMem = pParse->nMem++;
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iMem, 1);
+ testOp = OP_IdxLT;
+ }else{
+ sqliteVdbeAddOp(v, OP_MoveTo, pLevel->iCur, brk);
+ }
+ }else if( pLevel->bRev ){
+ testOp = OP_Noop;
+ }else{
+ sqliteVdbeAddOp(v, OP_Rewind, pLevel->iCur, brk);
+ }
+
+ /* Generate the the top of the loop. If there is a termination
+ ** key we have to test for that key and abort at the top of the
+ ** loop.
+ */
+ start = sqliteVdbeCurrentAddr(v);
+ if( testOp!=OP_Noop ){
+ sqliteVdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0);
+ sqliteVdbeAddOp(v, testOp, pLevel->iCur, brk);
+ }
+ sqliteVdbeAddOp(v, OP_RowKey, pLevel->iCur, 0);
+ sqliteVdbeAddOp(v, OP_IdxIsNull, nEqColumn + (score & 1), cont);
+ sqliteVdbeAddOp(v, OP_IdxRecno, pLevel->iCur, 0);
+ if( i==pTabList->nSrc-1 && pushKey ){
+ haveKey = 1;
+ }else{
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, 0);
+ haveKey = 0;
+ }
+
+ /* Record the instruction used to terminate the loop.
+ */
+ pLevel->op = pLevel->bRev ? OP_Prev : OP_Next;
+ pLevel->p1 = pLevel->iCur;
+ pLevel->p2 = start;
+ }
+ loopMask |= getMask(&maskSet, iCur);
+
+ /* Insert code to test every subexpression that can be completely
+ ** computed using the current set of tables.
+ */
+ for(j=0; j<nExpr; j++){
+ if( aExpr[j].p==0 ) continue;
+ if( (aExpr[j].prereqAll & loopMask)!=aExpr[j].prereqAll ) continue;
+ if( pLevel->iLeftJoin && !ExprHasProperty(aExpr[j].p,EP_FromJoin) ){
+ continue;
+ }
+ if( haveKey ){
+ haveKey = 0;
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, 0);
+ }
+ sqliteExprIfFalse(pParse, aExpr[j].p, cont, 1);
+ aExpr[j].p = 0;
+ }
+ brk = cont;
+
+ /* For a LEFT OUTER JOIN, generate code that will record the fact that
+ ** at least one row of the right table has matched the left table.
+ */
+ if( pLevel->iLeftJoin ){
+ pLevel->top = sqliteVdbeCurrentAddr(v);
+ sqliteVdbeAddOp(v, OP_Integer, 1, 0);
+ sqliteVdbeAddOp(v, OP_MemStore, pLevel->iLeftJoin, 1);
+ for(j=0; j<nExpr; j++){
+ if( aExpr[j].p==0 ) continue;
+ if( (aExpr[j].prereqAll & loopMask)!=aExpr[j].prereqAll ) continue;
+ if( haveKey ){
+ /* Cannot happen. "haveKey" can only be true if pushKey is true
+ ** an pushKey can only be true for DELETE and UPDATE and there are
+ ** no outer joins with DELETE and UPDATE.
+ */
+ haveKey = 0;
+ sqliteVdbeAddOp(v, OP_MoveTo, iCur, 0);
+ }
+ sqliteExprIfFalse(pParse, aExpr[j].p, cont, 1);
+ aExpr[j].p = 0;
+ }
+ }
+ }
+ pWInfo->iContinue = cont;
+ if( pushKey && !haveKey ){
+ sqliteVdbeAddOp(v, OP_Recno, pTabList->a[0].iCursor, 0);
+ }
+ freeMaskSet(&maskSet);
+ return pWInfo;
+}
+
+/*
+** Generate the end of the WHERE loop. See comments on
+** sqliteWhereBegin() for additional information.
+*/
+void sqliteWhereEnd(WhereInfo *pWInfo){
+ Vdbe *v = pWInfo->pParse->pVdbe;
+ int i;
+ WhereLevel *pLevel;
+ SrcList *pTabList = pWInfo->pTabList;
+
+ for(i=pTabList->nSrc-1; i>=0; i--){
+ pLevel = &pWInfo->a[i];
+ sqliteVdbeResolveLabel(v, pLevel->cont);
+ if( pLevel->op!=OP_Noop ){
+ sqliteVdbeAddOp(v, pLevel->op, pLevel->p1, pLevel->p2);
+ }
+ sqliteVdbeResolveLabel(v, pLevel->brk);
+ if( pLevel->inOp!=OP_Noop ){
+ sqliteVdbeAddOp(v, pLevel->inOp, pLevel->inP1, pLevel->inP2);
+ }
+ if( pLevel->iLeftJoin ){
+ int addr;
+ addr = sqliteVdbeAddOp(v, OP_MemLoad, pLevel->iLeftJoin, 0);
+ sqliteVdbeAddOp(v, OP_NotNull, 1, addr+4 + (pLevel->iCur>=0));
+ sqliteVdbeAddOp(v, OP_NullRow, pTabList->a[i].iCursor, 0);
+ if( pLevel->iCur>=0 ){
+ sqliteVdbeAddOp(v, OP_NullRow, pLevel->iCur, 0);
+ }
+ sqliteVdbeAddOp(v, OP_Goto, 0, pLevel->top);
+ }
+ }
+ sqliteVdbeResolveLabel(v, pWInfo->iBreak);
+ for(i=0; i<pTabList->nSrc; i++){
+ Table *pTab = pTabList->a[i].pTab;
+ assert( pTab!=0 );
+ if( pTab->isTransient || pTab->pSelect ) continue;
+ pLevel = &pWInfo->a[i];
+ sqliteVdbeAddOp(v, OP_Close, pTabList->a[i].iCursor, 0);
+ if( pLevel->pIdx!=0 ){
+ sqliteVdbeAddOp(v, OP_Close, pLevel->iCur, 0);
+ }
+ }
+#if 0 /* Never reuse a cursor */
+ if( pWInfo->pParse->nTab==pWInfo->peakNTab ){
+ pWInfo->pParse->nTab = pWInfo->savedNTab;
+ }
+#endif
+ sqliteFree(pWInfo);
+ return;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/test/all.test b/usr/src/cmd/svc/configd/sqlite/test/all.test
new file mode 100644
index 0000000000..2cdd89c2bf
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/all.test
@@ -0,0 +1,112 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file runs all tests.
+#
+# $Id: all.test,v 1.19 2003/02/16 22:21:33 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+rename finish_test really_finish_test
+proc finish_test {} {memleak_check}
+
+if {[file exists ./sqlite_test_count]} {
+ set COUNT [exec cat ./sqlite_test_count]
+} else {
+ set COUNT 4
+}
+
+if {[llength $argv]>0} {
+ foreach {name value} $argv {
+ switch -- $name {
+ -count {
+ set COUNT $value
+ }
+ -quick {
+ set ISQUICK $value
+ }
+ default {
+ puts stderr "Unknown option: $name"
+ exit
+ }
+ }
+ }
+}
+set argv {}
+
+# LeakList will hold a list of the number of unfreed mallocs after
+# each round of the test. This number should be constant. If it
+# grows, it may mean there is a memory leak in the library.
+#
+set LeakList {}
+
+set EXCLUDE {
+ all.test
+ quick.test
+ malloc.test
+ misuse.test
+ memleak.test
+}
+# btree2.test
+
+for {set Counter 0} {$Counter<$COUNT && $nErr==0} {incr Counter} {
+ set btree_native_byte_order [expr {($Counter>>1)&0x1}]
+ if {$Counter%2} {
+ set ::SETUP_SQL {PRAGMA default_synchronous=off;}
+ } else {
+ catch {unset ::SETUP_SQL}
+ }
+ foreach testfile [lsort -dictionary [glob $testdir/*.test]] {
+ set tail [file tail $testfile]
+ if {[lsearch -exact $EXCLUDE $tail]>=0} continue
+ source $testfile
+ catch {db close}
+ if {$sqlite_open_file_count>0} {
+ puts "$tail did not close all files: $sqlite_open_file_count"
+ incr nErr
+ lappend ::failList $tail
+ }
+ }
+ if {[info exists Leak]} {
+ lappend LeakList $Leak
+ }
+}
+
+# Do one last test to look for a memory leak in the library. This will
+# only work if SQLite is compiled with the -DMEMORY_DEBUG=1 flag.
+#
+if {$LeakList!=""} {
+ puts -nonewline memory-leak-test...
+ incr ::nTest
+ foreach x $LeakList {
+ if {$x!=[lindex $LeakList 0]} {
+ puts " failed!"
+ puts "Expected: all values to be the same"
+ puts " Got: $LeakList"
+ incr ::nErr
+ lappend ::failList memory-leak-test
+ break
+ }
+ }
+ puts " Ok"
+}
+
+# Run the malloc tests and the misuse test after memory leak detection.
+# Both tests leak memory.
+#
+catch {source $testdir/misuse.test}
+catch {source $testdir/malloc.test}
+
+catch {db close}
+set sqlite_open_file_count 0
+really_finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/attach.test b/usr/src/cmd/svc/configd/sqlite/test/attach.test
new file mode 100644
index 0000000000..67521eaa85
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/attach.test
@@ -0,0 +1,589 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 April 4
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the ATTACH and DETACH commands
+# and related functionality.
+#
+# $Id: attach.test,v 1.13 2004/02/14 01:39:50 drh Exp $
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+for {set i 2} {$i<=15} {incr i} {
+ file delete -force test$i.db
+ file delete -force test$i.db-journal
+}
+
+do_test attach-1.1 {
+ execsql {
+ CREATE TABLE t1(a,b);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t1 VALUES(3,4);
+ SELECT * FROM t1;
+ }
+} {1 2 3 4}
+do_test attach-1.2 {
+ sqlite db2 test2.db
+ execsql {
+ CREATE TABLE t2(x,y);
+ INSERT INTO t2 VALUES(1,'x');
+ INSERT INTO t2 VALUES(2,'y');
+ SELECT * FROM t2;
+ } db2
+} {1 x 2 y}
+do_test attach-1.3 {
+ execsql {
+ ATTACH DATABASE 'test2.db' AS two;
+ SELECT * FROM two.t2;
+ }
+} {1 x 2 y}
+do_test attach-1.4 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {1 x 2 y}
+do_test attach-1.5 {
+ execsql {
+ DETACH DATABASE two;
+ SELECT * FROM t1;
+ }
+} {1 2 3 4}
+do_test attach-1.6 {
+ catchsql {
+ SELECT * FROM t2;
+ }
+} {1 {no such table: t2}}
+do_test attach-1.7 {
+ catchsql {
+ SELECT * FROM two.t2;
+ }
+} {1 {no such table: two.t2}}
+do_test attach-1.8 {
+ catchsql {
+ ATTACH DATABASE 'test3.db' AS three;
+ }
+} {1 {cannot attach empty database: three}}
+do_test attach-1.9 {
+ catchsql {
+ SELECT * FROM three.sqlite_master;
+ }
+} {1 {no such table: three.sqlite_master}}
+do_test attach-1.10 {
+ catchsql {
+ DETACH DATABASE three;
+ }
+} {1 {no such database: three}}
+do_test attach-1.11 {
+ execsql {
+ ATTACH 'test.db' AS db2;
+ ATTACH 'test.db' AS db3;
+ ATTACH 'test.db' AS db4;
+ ATTACH 'test.db' AS db5;
+ ATTACH 'test.db' AS db6;
+ ATTACH 'test.db' AS db7;
+ ATTACH 'test.db' AS db8;
+ ATTACH 'test.db' AS db9;
+ }
+} {}
+proc db_list {db} {
+ set list {}
+ foreach {idx name file} [execsql {PRAGMA database_list} $db] {
+ lappend list $idx $name
+ }
+ return $list
+}
+do_test attach-1.11b {
+ db_list db
+} {0 main 1 temp 2 db2 3 db3 4 db4 5 db5 6 db6 7 db7 8 db8 9 db9}
+do_test attach-1.12 {
+ catchsql {
+ ATTACH 'test.db' as db2;
+ }
+} {1 {database db2 is already in use}}
+do_test attach-1.13 {
+ catchsql {
+ ATTACH 'test.db' as db5;
+ }
+} {1 {database db5 is already in use}}
+do_test attach-1.14 {
+ catchsql {
+ ATTACH 'test.db' as db9;
+ }
+} {1 {database db9 is already in use}}
+do_test attach-1.15 {
+ catchsql {
+ ATTACH 'test.db' as main;
+ }
+} {1 {database main is already in use}}
+do_test attach-1.16 {
+ catchsql {
+ ATTACH 'test.db' as temp;
+ }
+} {1 {database temp is already in use}}
+do_test attach-1.17 {
+ catchsql {
+ ATTACH 'test.db' as MAIN;
+ }
+} {1 {database MAIN is already in use}}
+do_test attach-1.18 {
+ catchsql {
+ ATTACH 'test.db' as db10;
+ ATTACH 'test.db' as db11;
+ }
+} {0 {}}
+do_test attach-1.19 {
+ catchsql {
+ ATTACH 'test.db' as db12;
+ }
+} {1 {too many attached databases - max 10}}
+do_test attach-1.20.1 {
+ execsql {
+ DETACH db5;
+ }
+ db_list db
+} {0 main 1 temp 2 db2 3 db3 4 db4 5 db11 6 db6 7 db7 8 db8 9 db9 10 db10}
+integrity_check attach-1.20.2
+do_test attach-1.21 {
+ catchsql {
+ ATTACH 'test.db' as db12;
+ }
+} {0 {}}
+do_test attach-1.22 {
+ catchsql {
+ ATTACH 'test.db' as db13;
+ }
+} {1 {too many attached databases - max 10}}
+do_test attach-1.23 {
+ catchsql {
+ DETACH db14;
+ }
+} {1 {no such database: db14}}
+do_test attach-1.24 {
+ catchsql {
+ DETACH db12;
+ }
+} {0 {}}
+do_test attach-1.25 {
+ catchsql {
+ DETACH db12;
+ }
+} {1 {no such database: db12}}
+do_test attach-1.26 {
+ catchsql {
+ DETACH main;
+ }
+} {1 {cannot detach database main}}
+do_test attach-1.27 {
+ catchsql {
+ DETACH Temp;
+ }
+} {1 {cannot detach database Temp}}
+do_test attach-1.28 {
+ catchsql {
+ DETACH db11;
+ DETACH db10;
+ DETACH db9;
+ DETACH db8;
+ DETACH db7;
+ DETACH db6;
+ DETACH db4;
+ DETACH db3;
+ DETACH db2;
+ }
+} {0 {}}
+do_test attach-1.29 {
+ db_list db
+} {0 main 1 temp}
+
+do_test attach-2.1 {
+ execsql {
+ CREATE TABLE tx(x1,x2,y1,y2);
+ CREATE TRIGGER r1 AFTER UPDATE ON t2 FOR EACH ROW BEGIN
+ INSERT INTO tx(x1,x2,y1,y2) VALUES(OLD.x,NEW.x,OLD.y,NEW.y);
+ END;
+ SELECT * FROM tx;
+ } db2;
+} {}
+do_test attach-2.2 {
+ execsql {
+ UPDATE t2 SET x=x+10;
+ SELECT * FROM tx;
+ } db2;
+} {1 11 x x 2 12 y y}
+do_test attach-2.3 {
+ execsql {
+ CREATE TABLE tx(x1,x2,y1,y2);
+ SELECT * FROM tx;
+ }
+} {}
+do_test attach-2.4 {
+ execsql {
+ ATTACH 'test2.db' AS db2;
+ }
+} {}
+do_test attach-2.5 {
+ execsql {
+ UPDATE db2.t2 SET x=x+10;
+ SELECT * FROM db2.tx;
+ }
+} {1 11 x x 2 12 y y 11 21 x x 12 22 y y}
+do_test attach-2.6 {
+ execsql {
+ SELECT * FROM main.tx;
+ }
+} {}
+do_test attach-2.7 {
+ execsql {
+ SELECT type, name, tbl_name FROM db2.sqlite_master;
+ }
+} {table t2 t2 table tx tx trigger r1 t2}
+do_test attach-2.8 {
+ db_list db
+} {0 main 1 temp 2 db2}
+do_test attach-2.9 {
+ execsql {
+ CREATE INDEX i2 ON t2(x);
+ SELECT * FROM t2 WHERE x>5;
+ } db2
+} {21 x 22 y}
+do_test attach-2.10 {
+ execsql {
+ SELECT type, name, tbl_name FROM sqlite_master;
+ } db2
+} {table t2 t2 table tx tx trigger r1 t2 index i2 t2}
+#do_test attach-2.11 {
+# catchsql {
+# SELECT * FROM t2 WHERE x>5;
+# }
+#} {1 {database schema has changed}}
+do_test attach-2.12 {
+ db_list db
+} {0 main 1 temp 2 db2}
+do_test attach-2.13 {
+ catchsql {
+ SELECT * FROM t2 WHERE x>5;
+ }
+} {0 {21 x 22 y}}
+do_test attach-2.14 {
+ execsql {
+ SELECT type, name, tbl_name FROM sqlite_master;
+ }
+} {table t1 t1 table tx tx}
+do_test attach-2.15 {
+ execsql {
+ SELECT type, name, tbl_name FROM db2.sqlite_master;
+ }
+} {table t2 t2 table tx tx trigger r1 t2 index i2 t2}
+do_test attach-2.16 {
+ db close
+ sqlite db test.db
+ execsql {
+ ATTACH 'test2.db' AS db2;
+ SELECT type, name, tbl_name FROM db2.sqlite_master;
+ }
+} {table t2 t2 table tx tx trigger r1 t2 index i2 t2}
+
+do_test attach-3.1 {
+ db close
+ db2 close
+ sqlite db test.db
+ sqlite db2 test2.db
+ execsql {
+ SELECT * FROM t1
+ }
+} {1 2 3 4}
+do_test attach-3.2 {
+ catchsql {
+ SELECT * FROM t2
+ }
+} {1 {no such table: t2}}
+do_test attach-3.3 {
+ catchsql {
+ ATTACH DATABASE 'test2.db' AS db2;
+ SELECT * FROM t2
+ }
+} {0 {21 x 22 y}}
+
+# Even though main has a transaction, test2.db should not be locked.
+do_test attach-3.4 {
+ execsql BEGIN
+ catchsql {
+ SELECT * FROM t2;
+ } db2;
+} {0 {21 x 22 y}}
+
+# Reading from db2 should not lock test2.db
+do_test attach-3.5 {
+ execsql {SELECT * FROM t2}
+ catchsql {
+ SELECT * FROM t2;
+ } db2;
+} {0 {21 x 22 y}}
+
+# Making a change to db2 causes test2.ddb to become locked.
+do_test attach-3.6 {
+ execsql {
+ UPDATE t2 SET x=x+1 WHERE x=50;
+ }
+ catchsql {
+ SELECT * FROM t2;
+ } db2;
+} {1 {database is locked}}
+
+do_test attach-3.7 {
+ execsql ROLLBACK
+ execsql {SELECT * FROM t2} db2
+} {21 x 22 y}
+do_test attach-3.8 {
+ execsql BEGIN
+ execsql BEGIN db2
+ catchsql {SELECT * FROM t2}
+} {1 {database is locked}}
+do_test attach-3.9 {
+ catchsql {SELECT * FROM t2} db2
+} {0 {21 x 22 y}}
+do_test attach-3.10 {
+ execsql {SELECT * FROM t1}
+} {1 2 3 4}
+do_test attach-3.11 {
+ catchsql {UPDATE t1 SET a=a+1}
+} {0 {}}
+do_test attach-3.12 {
+ execsql {SELECT * FROM t1}
+} {2 2 4 4}
+do_test attach-3.13 {
+ catchsql {UPDATE t2 SET x=x+1 WHERE x=50}
+} {1 {database is locked}}
+do_test attach-3.14 {
+ # Unable to reinitialize the schema tables because the aux database
+ # is still locked.
+ catchsql {SELECT * FROM t1}
+} {1 {database is locked}}
+do_test attach-3.15 {
+ execsql COMMIT db2
+ execsql {SELECT * FROM t1}
+} {1 2 3 4}
+
+# Ticket #323
+do_test attach-4.1 {
+ execsql {DETACH db2}
+ db2 close
+ sqlite db2 test2.db
+ execsql {
+ CREATE TABLE t3(x,y);
+ CREATE UNIQUE INDEX t3i1 ON t3(x);
+ INSERT INTO t3 VALUES(1,2);
+ SELECT * FROM t3;
+ } db2;
+} {1 2}
+do_test attach-4.2 {
+ execsql {
+ CREATE TABLE t3(a,b);
+ CREATE UNIQUE INDEX t3i1b ON t3(a);
+ INSERT INTO t3 VALUES(9,10);
+ SELECT * FROM t3;
+ }
+} {9 10}
+do_test attach-4.3 {
+ execsql {
+ ATTACH DATABASE 'test2.db' AS db2;
+ SELECT * FROM db2.t3;
+ }
+} {1 2}
+do_test attach-4.4 {
+ execsql {
+ SELECT * FROM main.t3;
+ }
+} {9 10}
+do_test attach-4.5 {
+ execsql {
+ INSERT INTO db2.t3 VALUES(9,10);
+ SELECT * FROM db2.t3;
+ }
+} {1 2 9 10}
+do_test attach-4.6 {
+ execsql {
+ DETACH db2;
+ }
+ execsql {
+ CREATE TABLE t4(x);
+ CREATE TRIGGER t3r3 AFTER INSERT ON t3 BEGIN
+ INSERT INTO t4 VALUES('db2.' || NEW.x);
+ END;
+ INSERT INTO t3 VALUES(6,7);
+ SELECT * FROM t4;
+ } db2
+} {db2.6}
+do_test attach-4.7 {
+ execsql {
+ CREATE TABLE t4(y);
+ CREATE TRIGGER t3r3 AFTER INSERT ON t3 BEGIN
+ INSERT INTO t4 VALUES('main.' || NEW.a);
+ END;
+ INSERT INTO main.t3 VALUES(11,12);
+ SELECT * FROM main.t4;
+ }
+} {main.11}
+do_test attach-4.8 {
+ execsql {
+ ATTACH DATABASE 'test2.db' AS db2;
+ INSERT INTO db2.t3 VALUES(13,14);
+ SELECT * FROM db2.t4 UNION ALL SELECT * FROM main.t4;
+ }
+} {db2.6 db2.13 main.11}
+do_test attach-4.9 {
+ execsql {
+ INSERT INTO main.t3 VALUES(15,16);
+ SELECT * FROM db2.t4 UNION ALL SELECT * FROM main.t4;
+ }
+} {db2.6 db2.13 main.11 main.15}
+do_test attach-4.10 {
+ execsql {
+ DETACH DATABASE db2;
+ }
+ execsql {
+ CREATE VIEW v3 AS SELECT x*100+y FROM t3;
+ SELECT * FROM v3;
+ } db2
+} {102 910 607 1314}
+do_test attach-4.11 {
+ execsql {
+ CREATE VIEW v3 AS SELECT a*100+b FROM t3;
+ SELECT * FROM v3;
+ }
+} {910 1112 1516}
+do_test attach-4.12 {
+ execsql {
+ ATTACH DATABASE 'test2.db' AS db2;
+ SELECT * FROM db2.v3;
+ }
+} {102 910 607 1314}
+do_test attach-4.13 {
+ execsql {
+ SELECT * FROM main.v3;
+ }
+} {910 1112 1516}
+
+# Tests for the sqliteFix...() routines in attach.c
+#
+do_test attach-5.1 {
+ db close
+ sqlite db test.db
+ db2 close
+ file delete -force test2.db
+ sqlite db2 test2.db
+ catchsql {
+ ATTACH DATABASE 'test.db' AS orig;
+ CREATE TRIGGER r1 AFTER INSERT ON orig.t1 BEGIN;
+ SELECT 'no-op';
+ END;
+ } db2
+} {1 {triggers may not be added to auxiliary database orig}}
+do_test attach-5.2 {
+ catchsql {
+ CREATE TABLE t5(x,y);
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT 'no-op';
+ END;
+ } db2
+} {0 {}}
+do_test attach-5.3 {
+ catchsql {
+ DROP TRIGGER r5;
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT 'no-op' FROM orig.t1;
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database orig}}
+do_test attach-5.4 {
+ catchsql {
+ CREATE TEMP TABLE t6(p,q,r);
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT 'no-op' FROM temp.t6;
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+do_test attach-5.5 {
+ catchsql {
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT 'no-op' || (SELECT * FROM temp.t6);
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+do_test attach-5.6 {
+ catchsql {
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT 'no-op' FROM t1 WHERE x<(SELECT min(x) FROM temp.t6);
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+do_test attach-5.7 {
+ catchsql {
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT 'no-op' FROM t1 GROUP BY 1 HAVING x<(SELECT min(x) FROM temp.t6);
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+do_test attach-5.7 {
+ catchsql {
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ SELECT max(1,x,(SELECT min(x) FROM temp.t6)) FROM t1;
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+do_test attach-5.8 {
+ catchsql {
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ INSERT INTO t1 VALUES((SELECT min(x) FROM temp.t6),5);
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+do_test attach-5.9 {
+ catchsql {
+ CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
+ DELETE FROM t1 WHERE x<(SELECT min(x) FROM temp.t6);
+ END;
+ } db2
+} {1 {trigger r5 cannot reference objects in database temp}}
+
+# Check to make sure we get a sensible error if unable to open
+# the file that we are trying to attach.
+#
+do_test attach-6.1 {
+ catchsql {
+ ATTACH DATABASE 'no-such-file' AS nosuch;
+ }
+} {1 {cannot attach empty database: nosuch}}
+file delete -force no-such-file
+if {$tcl_platform(platform)=="unix"} {
+ do_test attach-6.2 {
+ sqlite dbx cannot-read
+ dbx eval {CREATE TABLE t1(a,b,c)}
+ dbx close
+ file attributes cannot-read -permission 0000
+ catchsql {
+ ATTACH DATABASE 'cannot-read' AS noread;
+ }
+ } {1 {unable to open database: cannot-read}}
+ file delete -force cannot-read
+}
+
+for {set i 2} {$i<=15} {incr i} {
+ catch {db$i close}
+}
+file delete -force test2.db
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/attach2.test b/usr/src/cmd/svc/configd/sqlite/test/attach2.test
new file mode 100644
index 0000000000..2ed427205a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/attach2.test
@@ -0,0 +1,149 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 July 1
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the ATTACH and DETACH commands
+# and related functionality.
+#
+# $Id: attach2.test,v 1.5 2004/02/12 15:31:22 drh Exp $
+#
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Ticket #354
+#
+do_test attach2-1.1 {
+ db eval {
+ CREATE TABLE t1(a,b);
+ CREATE INDEX x1 ON t1(a);
+ }
+ file delete -force test2.db
+ file delete -force test2.db-journal
+ sqlite db2 test2.db
+ db2 eval {
+ CREATE TABLE t1(a,b);
+ CREATE INDEX x1 ON t1(a);
+ }
+ catchsql {
+ ATTACH 'test2.db' AS t2;
+ }
+} {0 {}}
+
+# Ticket #514
+#
+proc db_list {db} {
+ set list {}
+ foreach {idx name file} [execsql {PRAGMA database_list} $db] {
+ lappend list $idx $name
+ }
+ return $list
+}
+db eval {DETACH t2}
+do_test attach2-2.1 {
+ # lock test2.db then try to attach it. Should get an error.
+ db2 eval {BEGIN}
+ catchsql {
+ ATTACH 'test2.db' AS t2;
+ }
+} {1 {database is locked}}
+do_test attach2-2.2 {
+ # make sure test2.db did not get attached.
+ db_list db
+} {0 main 1 temp}
+do_test attach2-2.3 {
+ # unlock test2.db and try to attach again. should work this time.
+ db2 eval {COMMIT}
+ catchsql {
+ ATTACH 'test2.db' AS t2;
+ }
+} {0 {}}
+do_test attach2-2.4 {
+ db_list db
+} {0 main 1 temp 2 t2}
+do_test attach2-2.5 {
+ catchsql {
+ SELECT name FROM t2.sqlite_master;
+ }
+} {0 {t1 x1}}
+do_test attach2-2.6 {
+ # lock test2.db and try to read from it. should get an error.
+ db2 eval BEGIN
+ catchsql {
+ SELECT name FROM t2.sqlite_master;
+ }
+} {1 {database is locked}}
+do_test attach2-2.7 {
+ # but we can still read from test1.db even though test2.db is locked.
+ catchsql {
+ SELECT name FROM main.sqlite_master;
+ }
+} {0 {t1 x1}}
+do_test attach2-2.8 {
+ # start a transaction on test.db even though test2.db is locked.
+ catchsql {
+ BEGIN;
+ INSERT INTO t1 VALUES(8,9);
+ }
+} {0 {}}
+do_test attach2-2.9 {
+ execsql {
+ SELECT * FROM t1
+ }
+} {8 9}
+do_test attach2-2.10 {
+ # now try to write to test2.db. the write should fail
+ catchsql {
+ INSERT INTO t2.t1 VALUES(1,2);
+ }
+} {1 {database is locked}}
+do_test attach2-2.11 {
+ # when the write failed in the previous test, the transaction should
+ # have rolled back.
+ db2 eval ROLLBACK
+ execsql {
+ SELECT * FROM t1
+ }
+} {}
+do_test attach2-2.12 {
+ catchsql {
+ COMMIT
+ }
+} {1 {cannot commit - no transaction is active}}
+
+# Ticket #574: Make sure it works usingi the non-callback API
+#
+do_test attach2-3.1 {
+ db close
+ set DB [sqlite db test.db]
+ set rc [catch {sqlite_compile $DB "ATTACH 'test2.db' AS t2" TAIL} VM]
+ if {$rc} {lappend rc $VM}
+ sqlite_finalize $VM
+ set rc
+} {0}
+do_test attach2-3.2 {
+ set rc [catch {sqlite_compile $DB "DETACH t2" TAIL} VM]
+ if {$rc} {lappend rc $VM}
+ sqlite_finalize $VM
+ set rc
+} {0}
+
+db close
+for {set i 2} {$i<=15} {incr i} {
+ catch {db$i close}
+}
+file delete -force test2.db
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/auth.test b/usr/src/cmd/svc/configd/sqlite/test/auth.test
new file mode 100644
index 0000000000..1719ec7ad1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/auth.test
@@ -0,0 +1,1895 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 April 4
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the ATTACH and DETACH commands
+# and related functionality.
+#
+# $Id: auth.test,v 1.12 2003/12/07 00:24:35 drh Exp $
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# disable this test if the SQLITE_OMIT_AUTHORIZATION macro is
+# defined during compilation.
+
+do_test auth-1.1.1 {
+ db close
+ set ::DB [sqlite db test.db]
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ db authorizer ::auth
+ catchsql {CREATE TABLE t1(a,b,c)}
+} {1 {not authorized}}
+do_test auth-1.1.2 {
+ db errorcode
+} {23}
+do_test auth-1.2 {
+ execsql {SELECT name FROM sqlite_master}
+} {}
+do_test auth-1.3.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TABLE t1(a,b,c)}
+} {1 {not authorized}}
+do_test auth-1.3.2 {
+ db errorcode
+} {23}
+do_test auth-1.3.3 {
+ set ::authargs
+} {t1 {} main {}}
+do_test auth-1.4 {
+ execsql {SELECT name FROM sqlite_master}
+} {}
+
+do_test auth-1.5 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMP TABLE t1(a,b,c)}
+} {1 {not authorized}}
+do_test auth-1.6 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {}
+do_test auth-1.7.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMP TABLE t1(a,b,c)}
+} {1 {not authorized}}
+do_test auth-1.7.2 {
+ set ::authargs
+} {t1 {} temp {}}
+do_test auth-1.8 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {}
+
+do_test auth-1.9 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TABLE t1(a,b,c)}
+} {0 {}}
+do_test auth-1.10 {
+ execsql {SELECT name FROM sqlite_master}
+} {}
+do_test auth-1.11 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TABLE t1(a,b,c)}
+} {0 {}}
+do_test auth-1.12 {
+ execsql {SELECT name FROM sqlite_master}
+} {}
+do_test auth-1.13 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMP TABLE t1(a,b,c)}
+} {0 {}}
+do_test auth-1.14 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {}
+do_test auth-1.15 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMP TABLE t1(a,b,c)}
+} {0 {}}
+do_test auth-1.16 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {}
+
+do_test auth-1.17 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMP TABLE t1(a,b,c)}
+} {0 {}}
+do_test auth-1.18 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.19.1 {
+ set ::authargs {}
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TABLE t2(a,b,c)}
+} {0 {}}
+do_test auth-1.19.2 {
+ set ::authargs
+} {}
+do_test auth-1.20 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+
+do_test auth-1.21.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t2}
+} {1 {not authorized}}
+do_test auth-1.21.2 {
+ set ::authargs
+} {t2 {} main {}}
+do_test auth-1.22 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.23.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t2}
+} {0 {}}
+do_test auth-1.23.2 {
+ set ::authargs
+} {t2 {} main {}}
+do_test auth-1.24 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+
+do_test auth-1.25 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t1}
+} {1 {not authorized}}
+do_test auth-1.26 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.27 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_TABLE"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t1}
+} {0 {}}
+do_test auth-1.28 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.29 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="t2"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {INSERT INTO t2 VALUES(1,2,3)}
+} {1 {not authorized}}
+do_test auth-1.30 {
+ execsql {SELECT * FROM t2}
+} {}
+do_test auth-1.31 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="t2"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {INSERT INTO t2 VALUES(1,2,3)}
+} {0 {}}
+do_test auth-1.32 {
+ execsql {SELECT * FROM t2}
+} {}
+do_test auth-1.33 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="t1"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {INSERT INTO t2 VALUES(1,2,3)}
+} {0 {}}
+do_test auth-1.34 {
+ execsql {SELECT * FROM t2}
+} {1 2 3}
+
+do_test auth-1.35.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2}
+} {1 {access to t2.b is prohibited}}
+do_test auth-1.35.2 {
+ execsql {ATTACH DATABASE 'test.db' AS two}
+ catchsql {SELECT * FROM two.t2}
+} {1 {access to two.t2.b is prohibited}}
+execsql {DETACH DATABASE two}
+do_test auth-1.36 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2}
+} {0 {1 {} 3}}
+do_test auth-1.37 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2 WHERE b=2}
+} {0 {}}
+do_test auth-1.38 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="a"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2 WHERE b=2}
+} {0 {{} 2 3}}
+do_test auth-1.39 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2 WHERE b IS NULL}
+} {0 {1 {} 3}}
+do_test auth-1.40 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT a,c FROM t2 WHERE b IS NULL}
+} {1 {access to t2.b is prohibited}}
+
+do_test auth-1.41 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_UPDATE" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {UPDATE t2 SET a=11}
+} {0 {}}
+do_test auth-1.42 {
+ execsql {SELECT * FROM t2}
+} {11 2 3}
+do_test auth-1.43 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_UPDATE" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {UPDATE t2 SET b=22, c=33}
+} {1 {not authorized}}
+do_test auth-1.44 {
+ execsql {SELECT * FROM t2}
+} {11 2 3}
+do_test auth-1.45 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_UPDATE" && $arg1=="t2" && $arg2=="b"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {UPDATE t2 SET b=22, c=33}
+} {0 {}}
+do_test auth-1.46 {
+ execsql {SELECT * FROM t2}
+} {11 2 33}
+
+do_test auth-1.47 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="t2"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DELETE FROM t2 WHERE a=11}
+} {1 {not authorized}}
+do_test auth-1.48 {
+ execsql {SELECT * FROM t2}
+} {11 2 33}
+do_test auth-1.49 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="t2"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DELETE FROM t2 WHERE a=11}
+} {0 {}}
+do_test auth-1.50 {
+ execsql {SELECT * FROM t2}
+} {11 2 33}
+
+do_test auth-1.51 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_SELECT"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2}
+} {1 {not authorized}}
+do_test auth-1.52 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_SELECT"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2}
+} {0 {}}
+do_test auth-1.53 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_SELECT"} {
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2}
+} {0 {11 2 33}}
+
+set f [open data1.txt w]
+puts $f "7:8:9"
+close $f
+do_test auth-1.54 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_COPY"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {COPY t2 FROM 'data1.txt' USING DELIMITERS ':'}
+} {1 {not authorized}}
+do_test auth-1.55 {
+ set ::authargs
+} {t2 data1.txt main {}}
+do_test auth-1.56 {
+ execsql {SELECT * FROM t2}
+} {11 2 33}
+do_test auth-1.57 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_COPY"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {COPY t2 FROM 'data1.txt' USING DELIMITERS ':'}
+} {0 {}}
+do_test auth-1.58 {
+ set ::authargs
+} {t2 data1.txt main {}}
+do_test auth-1.59 {
+ execsql {SELECT * FROM t2}
+} {11 2 33}
+do_test auth-1.60 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_COPY"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {COPY t2 FROM 'data1.txt' USING DELIMITERS ':'}
+} {0 {}}
+do_test auth-1.61 {
+ set ::authargs
+} {t2 data1.txt main {}}
+do_test auth-1.62 {
+ execsql {SELECT * FROM t2}
+} {11 2 33 7 8 9}
+
+do_test auth-1.63 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t2}
+} {1 {not authorized}}
+do_test auth-1.64 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.65 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="t2"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t2}
+} {1 {not authorized}}
+do_test auth-1.66 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.67 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t1}
+} {1 {not authorized}}
+do_test auth-1.68 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.69 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="t1"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t1}
+} {1 {not authorized}}
+do_test auth-1.70 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.71 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t2}
+} {0 {}}
+do_test auth-1.72 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.73 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="t2"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t2}
+} {0 {}}
+do_test auth-1.74 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.75 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t1}
+} {0 {}}
+do_test auth-1.76 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.77 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="t1"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TABLE t1}
+} {0 {}}
+do_test auth-1.78 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.79 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {1 {not authorized}}
+do_test auth-1.80 {
+ set ::authargs
+} {v1 {} main {}}
+do_test auth-1.81 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.82 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {0 {}}
+do_test auth-1.83 {
+ set ::authargs
+} {v1 {} main {}}
+do_test auth-1.84 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+
+do_test auth-1.85 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {1 {not authorized}}
+do_test auth-1.86 {
+ set ::authargs
+} {v1 {} temp {}}
+do_test auth-1.87 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.88 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {0 {}}
+do_test auth-1.89 {
+ set ::authargs
+} {v1 {} temp {}}
+do_test auth-1.90 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.91 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {1 {not authorized}}
+do_test auth-1.92 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.93 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {0 {}}
+do_test auth-1.94 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+
+do_test auth-1.95 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {1 {not authorized}}
+do_test auth-1.96 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.97 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2}
+} {0 {}}
+do_test auth-1.98 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.99 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE VIEW v2 AS SELECT a+1,b+1 FROM t2;
+ DROP VIEW v2
+ }
+} {1 {not authorized}}
+do_test auth-1.100 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 v2}
+do_test auth-1.101 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v2}
+} {1 {not authorized}}
+do_test auth-1.102 {
+ set ::authargs
+} {v2 {} main {}}
+do_test auth-1.103 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 v2}
+do_test auth-1.104 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v2}
+} {0 {}}
+do_test auth-1.105 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 v2}
+do_test auth-1.106 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v2}
+} {0 {}}
+do_test auth-1.107 {
+ set ::authargs
+} {v2 {} main {}}
+do_test auth-1.108 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 v2}
+do_test auth-1.109 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v2}
+} {0 {}}
+do_test auth-1.110 {
+ set ::authargs
+} {v2 {} main {}}
+do_test auth-1.111 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+
+
+do_test auth-1.112 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TEMP VIEW v1 AS SELECT a+1,b+1 FROM t1;
+ DROP VIEW v1
+ }
+} {1 {not authorized}}
+do_test auth-1.113 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 v1}
+do_test auth-1.114 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v1}
+} {1 {not authorized}}
+do_test auth-1.115 {
+ set ::authargs
+} {v1 {} temp {}}
+do_test auth-1.116 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 v1}
+do_test auth-1.117 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v1}
+} {0 {}}
+do_test auth-1.118 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 v1}
+do_test auth-1.119 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v1}
+} {0 {}}
+do_test auth-1.120 {
+ set ::authargs
+} {v1 {} temp {}}
+do_test auth-1.121 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 v1}
+do_test auth-1.122 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_VIEW"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP VIEW v1}
+} {0 {}}
+do_test auth-1.123 {
+ set ::authargs
+} {v1 {} temp {}}
+do_test auth-1.124 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.125 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r2 DELETE on t2 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {1 {not authorized}}
+do_test auth-1.126 {
+ set ::authargs
+} {r2 t2 main {}}
+do_test auth-1.127 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.128 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r2 DELETE on t2 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {1 {not authorized}}
+do_test auth-1.129 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.130 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r2 DELETE on t2 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {0 {}}
+do_test auth-1.131 {
+ set ::authargs
+} {r2 t2 main {}}
+do_test auth-1.132 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.133 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r2 DELETE on t2 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {0 {}}
+do_test auth-1.134 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.135 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TABLE tx(id);
+ CREATE TRIGGER r2 AFTER INSERT ON t2 BEGIN
+ INSERT INTO tx VALUES(NEW.rowid);
+ END;
+ }
+} {0 {}}
+do_test auth-1.136.1 {
+ set ::authargs
+} {r2 t2 main {}}
+do_test auth-1.136.2 {
+ execsql {
+ SELECT name FROM sqlite_master WHERE type='trigger'
+ }
+} {r2}
+do_test auth-1.136.3 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ lappend ::authargs $code $arg1 $arg2 $arg3 $arg4
+ return SQLITE_OK
+ }
+ set ::authargs {}
+ execsql {
+ INSERT INTO t2 VALUES(1,2,3);
+ }
+ set ::authargs
+} {SQLITE_INSERT t2 {} main {} SQLITE_INSERT tx {} main r2 SQLITE_READ t2 ROWID main r2}
+do_test auth-1.136.4 {
+ execsql {
+ SELECT * FROM tx;
+ }
+} {3}
+do_test auth-1.137 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 tx r2}
+do_test auth-1.138 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r1 DELETE on t1 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {1 {not authorized}}
+do_test auth-1.139 {
+ set ::authargs
+} {r1 t1 temp {}}
+do_test auth-1.140 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.141 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r1 DELETE on t1 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {1 {not authorized}}
+do_test auth-1.142 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.143 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r1 DELETE on t1 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {0 {}}
+do_test auth-1.144 {
+ set ::authargs
+} {r1 t1 temp {}}
+do_test auth-1.145 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.146 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r1 DELETE on t1 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {0 {}}
+do_test auth-1.147 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.148 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ CREATE TRIGGER r1 DELETE on t1 BEGIN
+ SELECT NULL;
+ END;
+ }
+} {0 {}}
+do_test auth-1.149 {
+ set ::authargs
+} {r1 t1 temp {}}
+do_test auth-1.150 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 r1}
+
+do_test auth-1.151 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r2}
+} {1 {not authorized}}
+do_test auth-1.152 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 tx r2}
+do_test auth-1.153 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r2}
+} {1 {not authorized}}
+do_test auth-1.154 {
+ set ::authargs
+} {r2 t2 main {}}
+do_test auth-1.155 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 tx r2}
+do_test auth-1.156 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r2}
+} {0 {}}
+do_test auth-1.157 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 tx r2}
+do_test auth-1.158 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r2}
+} {0 {}}
+do_test auth-1.159 {
+ set ::authargs
+} {r2 t2 main {}}
+do_test auth-1.160 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 tx r2}
+do_test auth-1.161 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r2}
+} {0 {}}
+do_test auth-1.162 {
+ set ::authargs
+} {r2 t2 main {}}
+do_test auth-1.163 {
+ execsql {
+ DROP TABLE tx;
+ DELETE FROM t2 WHERE a=1 AND b=2 AND c=3;
+ SELECT name FROM sqlite_master;
+ }
+} {t2}
+
+do_test auth-1.164 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r1}
+} {1 {not authorized}}
+do_test auth-1.165 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 r1}
+do_test auth-1.166 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r1}
+} {1 {not authorized}}
+do_test auth-1.167 {
+ set ::authargs
+} {r1 t1 temp {}}
+do_test auth-1.168 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 r1}
+do_test auth-1.169 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r1}
+} {0 {}}
+do_test auth-1.170 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 r1}
+do_test auth-1.171 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r1}
+} {0 {}}
+do_test auth-1.172 {
+ set ::authargs
+} {r1 t1 temp {}}
+do_test auth-1.173 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 r1}
+do_test auth-1.174 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_TRIGGER"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP TRIGGER r1}
+} {0 {}}
+do_test auth-1.175 {
+ set ::authargs
+} {r1 t1 temp {}}
+do_test auth-1.176 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.177 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i2 ON t2(a)}
+} {1 {not authorized}}
+do_test auth-1.178 {
+ set ::authargs
+} {i2 t2 main {}}
+do_test auth-1.179 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.180 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i2 ON t2(a)}
+} {1 {not authorized}}
+do_test auth-1.181 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.182 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i2 ON t2(b)}
+} {0 {}}
+do_test auth-1.183 {
+ set ::authargs
+} {i2 t2 main {}}
+do_test auth-1.184 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.185 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i2 ON t2(b)}
+} {0 {}}
+do_test auth-1.186 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+do_test auth-1.187 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i2 ON t2(a)}
+} {0 {}}
+do_test auth-1.188 {
+ set ::authargs
+} {i2 t2 main {}}
+do_test auth-1.189 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 i2}
+
+do_test auth-1.190 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i1 ON t1(a)}
+} {1 {not authorized}}
+do_test auth-1.191 {
+ set ::authargs
+} {i1 t1 temp {}}
+do_test auth-1.192 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.193 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i1 ON t1(b)}
+} {1 {not authorized}}
+do_test auth-1.194 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.195 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i1 ON t1(b)}
+} {0 {}}
+do_test auth-1.196 {
+ set ::authargs
+} {i1 t1 temp {}}
+do_test auth-1.197 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.198 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i1 ON t1(c)}
+} {0 {}}
+do_test auth-1.199 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+do_test auth-1.200 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_CREATE_TEMP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {CREATE INDEX i1 ON t1(a)}
+} {0 {}}
+do_test auth-1.201 {
+ set ::authargs
+} {i1 t1 temp {}}
+do_test auth-1.202 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 i1}
+
+do_test auth-1.203 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i2}
+} {1 {not authorized}}
+do_test auth-1.204 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 i2}
+do_test auth-1.205 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i2}
+} {1 {not authorized}}
+do_test auth-1.206 {
+ set ::authargs
+} {i2 t2 main {}}
+do_test auth-1.207 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 i2}
+do_test auth-1.208 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i2}
+} {0 {}}
+do_test auth-1.209 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 i2}
+do_test auth-1.210 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i2}
+} {0 {}}
+do_test auth-1.211 {
+ set ::authargs
+} {i2 t2 main {}}
+do_test auth-1.212 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2 i2}
+do_test auth-1.213 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i2}
+} {0 {}}
+do_test auth-1.214 {
+ set ::authargs
+} {i2 t2 main {}}
+do_test auth-1.215 {
+ execsql {SELECT name FROM sqlite_master}
+} {t2}
+
+do_test auth-1.216 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i1}
+} {1 {not authorized}}
+do_test auth-1.217 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 i1}
+do_test auth-1.218 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i1}
+} {1 {not authorized}}
+do_test auth-1.219 {
+ set ::authargs
+} {i1 t1 temp {}}
+do_test auth-1.220 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 i1}
+do_test auth-1.221 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i1}
+} {0 {}}
+do_test auth-1.222 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 i1}
+do_test auth-1.223 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i1}
+} {0 {}}
+do_test auth-1.224 {
+ set ::authargs
+} {i1 t1 temp {}}
+do_test auth-1.225 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1 i1}
+do_test auth-1.226 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DROP_TEMP_INDEX"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {DROP INDEX i1}
+} {0 {}}
+do_test auth-1.227 {
+ set ::authargs
+} {i1 t1 temp {}}
+do_test auth-1.228 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {t1}
+
+do_test auth-1.229 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_PRAGMA"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {PRAGMA full_column_names=on}
+} {1 {not authorized}}
+do_test auth-1.230 {
+ set ::authargs
+} {full_column_names on {} {}}
+do_test auth-1.231 {
+ execsql2 {SELECT a FROM t2}
+} {a 11 a 7}
+do_test auth-1.232 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_PRAGMA"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {PRAGMA full_column_names=on}
+} {0 {}}
+do_test auth-1.233 {
+ set ::authargs
+} {full_column_names on {} {}}
+do_test auth-1.234 {
+ execsql2 {SELECT a FROM t2}
+} {a 11 a 7}
+do_test auth-1.235 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_PRAGMA"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {PRAGMA full_column_names=on}
+} {0 {}}
+do_test auth-1.236 {
+ execsql2 {SELECT a FROM t2}
+} {t2.a 11 t2.a 7}
+do_test auth-1.237 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_PRAGMA"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ catchsql {PRAGMA full_column_names=OFF}
+} {0 {}}
+do_test auth-1.238 {
+ set ::authargs
+} {full_column_names OFF {} {}}
+do_test auth-1.239 {
+ execsql2 {SELECT a FROM t2}
+} {a 11 a 7}
+
+do_test auth-1.240 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_TRANSACTION"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {BEGIN}
+} {1 {not authorized}}
+do_test auth-1.241 {
+ set ::authargs
+} {BEGIN {} {} {}}
+do_test auth-1.242 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_TRANSACTION" && $arg1!="BEGIN"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {BEGIN; INSERT INTO t2 VALUES(44,55,66); COMMIT}
+} {1 {not authorized}}
+do_test auth-1.243 {
+ set ::authargs
+} {COMMIT {} {} {}}
+do_test auth-1.244 {
+ execsql {SELECT * FROM t2}
+} {11 2 33 7 8 9 44 55 66}
+do_test auth-1.245 {
+ catchsql {ROLLBACK}
+} {1 {not authorized}}
+do_test auth-1.246 {
+ set ::authargs
+} {ROLLBACK {} {} {}}
+do_test auth-1.247 {
+ catchsql {END TRANSACTION}
+} {1 {not authorized}}
+do_test auth-1.248 {
+ set ::authargs
+} {COMMIT {} {} {}}
+do_test auth-1.249 {
+ db authorizer {}
+ catchsql {ROLLBACK}
+} {0 {}}
+do_test auth-1.250 {
+ execsql {SELECT * FROM t2}
+} {11 2 33 7 8 9}
+
+# ticket #340 - authorization for ATTACH and DETACH.
+#
+do_test auth-1.251 {
+ db authorizer ::auth
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_ATTACH"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ ATTACH DATABASE ':memory:' AS test1
+ }
+} {0 {}}
+do_test auth-1.252 {
+ set ::authargs
+} {:memory: {} {} {}}
+do_test auth-1.253 {
+ catchsql {DETACH DATABASE test1}
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_ATTACH"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ ATTACH DATABASE ':memory:' AS test1;
+ }
+} {1 {not authorized}}
+do_test auth-1.254 {
+ lindex [execsql {PRAGMA database_list}] 7
+} {}
+do_test auth-1.255 {
+ catchsql {DETACH DATABASE test1}
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_ATTACH"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ ATTACH DATABASE ':memory:' AS test1;
+ }
+} {0 {}}
+do_test auth-1.256 {
+ lindex [execsql {PRAGMA database_list}] 7
+} {}
+do_test auth-1.257 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DETACH"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_OK
+ }
+ return SQLITE_OK
+ }
+ execsql {ATTACH DATABASE ':memory:' AS test1}
+ catchsql {
+ DETACH DATABASE test1;
+ }
+} {0 {}}
+do_test auth-1.258 {
+ lindex [execsql {PRAGMA database_list}] 7
+} {}
+do_test auth-1.259 {
+ execsql {ATTACH DATABASE ':memory:' AS test1}
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DETACH"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ DETACH DATABASE test1;
+ }
+} {0 {}}
+do_test auth-1.260 {
+ lindex [execsql {PRAGMA database_list}] 7
+} {test1}
+do_test auth-1.261 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_DETACH"} {
+ set ::authargs [list $arg1 $arg2 $arg3 $arg4]
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ catchsql {
+ DETACH DATABASE test1;
+ }
+} {1 {not authorized}}
+do_test auth-1.262 {
+ lindex [execsql {PRAGMA database_list}] 7
+} {test1}
+db authorizer {}
+execsql {DETACH DATABASE test1}
+
+
+do_test auth-2.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t3" && $arg2=="x"} {
+ return SQLITE_DENY
+ }
+ return SQLITE_OK
+ }
+ db authorizer ::auth
+ execsql {CREATE TABLE t3(x INTEGER PRIMARY KEY, y, z)}
+ catchsql {SELECT * FROM t3}
+} {1 {access to t3.x is prohibited}}
+do_test auth-2.1 {
+ catchsql {SELECT y,z FROM t3}
+} {0 {}}
+do_test auth-2.2 {
+ catchsql {SELECT ROWID,y,z FROM t3}
+} {1 {access to t3.x is prohibited}}
+do_test auth-2.3 {
+ catchsql {SELECT OID,y,z FROM t3}
+} {1 {access to t3.x is prohibited}}
+do_test auth-2.4 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t3" && $arg2=="x"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ execsql {INSERT INTO t3 VALUES(44,55,66)}
+ catchsql {SELECT * FROM t3}
+} {0 {{} 55 66}}
+do_test auth-2.5 {
+ catchsql {SELECT rowid,y,z FROM t3}
+} {0 {{} 55 66}}
+do_test auth-2.6 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t3" && $arg2=="ROWID"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t3}
+} {0 {44 55 66}}
+do_test auth-2.7 {
+ catchsql {SELECT ROWID,y,z FROM t3}
+} {0 {44 55 66}}
+do_test auth-2.8 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="ROWID"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT ROWID,b,c FROM t2}
+} {0 {{} 2 33 {} 8 9}}
+do_test auth-2.9.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="ROWID"} {
+ return bogus
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT ROWID,b,c FROM t2}
+} {1 {illegal return value (999) from the authorization function - should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY}}
+do_test auth-2.9.2 {
+ db errorcode
+} {21}
+do_test auth-2.10 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_SELECT"} {
+ return bogus
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT ROWID,b,c FROM t2}
+} {1 {illegal return value (1) from the authorization function - should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY}}
+do_test auth-2.11.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg2=="a"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2, t3}
+} {0 {{} 2 33 44 55 66 {} 8 9 44 55 66}}
+do_test auth-2.11.2 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg2=="x"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ catchsql {SELECT * FROM t2, t3}
+} {0 {11 2 33 {} 55 66 7 8 9 {} 55 66}}
+
+# Make sure the OLD and NEW pseudo-tables of a trigger get authorized.
+#
+do_test auth-3.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ return SQLITE_OK
+ }
+ execsql {
+ CREATE TABLE tx(a1,a2,b1,b2,c1,c2);
+ CREATE TRIGGER r1 AFTER UPDATE ON t2 FOR EACH ROW BEGIN
+ INSERT INTO tx VALUES(OLD.a,NEW.a,OLD.b,NEW.b,OLD.c,NEW.c);
+ END;
+ UPDATE t2 SET a=a+1;
+ SELECT * FROM tx;
+ }
+} {11 12 2 2 33 33 7 8 8 8 9 9}
+do_test auth-3.2 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="c"} {
+ return SQLITE_IGNORE
+ }
+ return SQLITE_OK
+ }
+ execsql {
+ DELETE FROM tx;
+ UPDATE t2 SET a=a+100;
+ SELECT * FROM tx;
+ }
+} {12 112 2 2 {} {} 8 108 8 8 {} {}}
+
+# Make sure the names of views and triggers are passed on on arg4.
+#
+do_test auth-4.1 {
+ proc auth {code arg1 arg2 arg3 arg4} {
+ lappend ::authargs $code $arg1 $arg2 $arg3 $arg4
+ return SQLITE_OK
+ }
+ set authargs {}
+ execsql {
+ UPDATE t2 SET a=a+1;
+ }
+ set authargs
+} [list \
+ SQLITE_READ t2 a main {} \
+ SQLITE_UPDATE t2 a main {} \
+ SQLITE_INSERT tx {} main r1 \
+ SQLITE_READ t2 a main r1 \
+ SQLITE_READ t2 a main r1 \
+ SQLITE_READ t2 b main r1 \
+ SQLITE_READ t2 b main r1 \
+ SQLITE_READ t2 c main r1 \
+ SQLITE_READ t2 c main r1]
+do_test auth-4.2 {
+ execsql {
+ CREATE VIEW v1 AS SELECT a+b AS x FROM t2;
+ CREATE TABLE v1chng(x1,x2);
+ CREATE TRIGGER r2 INSTEAD OF UPDATE ON v1 BEGIN
+ INSERT INTO v1chng VALUES(OLD.x,NEW.x);
+ END;
+ SELECT * FROM v1;
+ }
+} {115 117}
+do_test auth-4.3 {
+ set authargs {}
+ execsql {
+ UPDATE v1 SET x=1 WHERE x=117
+ }
+ set authargs
+} [list \
+ SQLITE_UPDATE v1 x main {} \
+ SQLITE_READ v1 x main {} \
+ SQLITE_SELECT {} {} {} v1 \
+ SQLITE_READ t2 a main v1 \
+ SQLITE_READ t2 b main v1 \
+ SQLITE_INSERT v1chng {} main r2 \
+ SQLITE_READ v1 x main r2 \
+ SQLITE_READ v1 x main r2]
+do_test auth-4.4 {
+ execsql {
+ CREATE TRIGGER r3 INSTEAD OF DELETE ON v1 BEGIN
+ INSERT INTO v1chng VALUES(OLD.x,NULL);
+ END;
+ SELECT * FROM v1;
+ }
+} {115 117}
+do_test auth-4.5 {
+ set authargs {}
+ execsql {
+ DELETE FROM v1 WHERE x=117
+ }
+ set authargs
+} [list \
+ SQLITE_DELETE v1 {} main {} \
+ SQLITE_READ v1 x main {} \
+ SQLITE_SELECT {} {} {} v1 \
+ SQLITE_READ t2 a main v1 \
+ SQLITE_READ t2 b main v1 \
+ SQLITE_INSERT v1chng {} main r3 \
+ SQLITE_READ v1 x main r3]
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/bigfile.test b/usr/src/cmd/svc/configd/sqlite/test/bigfile.test
new file mode 100644
index 0000000000..d3d0d21fb9
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/bigfile.test
@@ -0,0 +1,180 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 November 30
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script testing the ability of SQLite to handle database
+# files larger than 4GB.
+#
+# $Id: bigfile.test,v 1.3 2003/12/19 12:31:22 drh Exp $
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# These tests only work for Tcl version 8.4 and later. Prior to 8.4,
+# Tcl was unable to handle large files.
+#
+scan $::tcl_version %f vx
+if {$vx<8.4} return
+
+# This is the md5 checksum of all the data in table t1 as created
+# by the first test. We will use this number to make sure that data
+# never changes.
+#
+set MAGIC_SUM {593f1efcfdbe698c28b4b1b693f7e4cf}
+
+do_test bigfile-1.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES('abcdefghijklmnopqrstuvwxyz');
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
+ COMMIT;
+ }
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+
+# Try to create a large file - a file that is larger than 2^32 bytes.
+# If this fails, it means that the system being tested does not support
+# large files. So skip all of the remaining tests in this file.
+#
+db close
+if {[catch {fake_big_file 4096 test.db}]} {
+ puts "**** Unable to create a file larger than 4096 MB. *****"
+ finish_test
+ return
+}
+
+do_test bigfile-1.2 {
+ sqlite db test.db
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+
+# The previous test may fail on some systems because they are unable
+# to handle large files. If that is so, then skip all of the following
+# tests. We will know the above test failed because the "db" command
+# does not exist.
+#
+if {[llength [info command db]]>0} {
+
+do_test bigfile-1.3 {
+ execsql {
+ CREATE TABLE t2 AS SELECT * FROM t1;
+ SELECT md5sum(x) FROM t2;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.4 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.5 {
+ execsql {
+ SELECT md5sum(x) FROM t2;
+ }
+} $::MAGIC_SUM
+
+db close
+if {[catch {fake_big_file 8192 test.db}]} {
+ puts "**** Unable to create a file larger than 8192 MB. *****"
+ finish_test
+ return
+}
+
+do_test bigfile-1.6 {
+ sqlite db test.db
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.7 {
+ execsql {
+ CREATE TABLE t3 AS SELECT * FROM t1;
+ SELECT md5sum(x) FROM t3;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.8 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.9 {
+ execsql {
+ SELECT md5sum(x) FROM t2;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.10 {
+ execsql {
+ SELECT md5sum(x) FROM t3;
+ }
+} $::MAGIC_SUM
+
+db close
+if {[catch {fake_big_file 16384 test.db}]} {
+ puts "**** Unable to create a file larger than 16384 MB. *****"
+ finish_test
+ return
+}
+
+do_test bigfile-1.11 {
+ sqlite db test.db
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.12 {
+ execsql {
+ CREATE TABLE t4 AS SELECT * FROM t1;
+ SELECT md5sum(x) FROM t4;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.13 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT md5sum(x) FROM t1;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.14 {
+ execsql {
+ SELECT md5sum(x) FROM t2;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.15 {
+ execsql {
+ SELECT md5sum(x) FROM t3;
+ }
+} $::MAGIC_SUM
+do_test bigfile-1.16 {
+ execsql {
+ SELECT md5sum(x) FROM t3;
+ }
+} $::MAGIC_SUM
+
+} ;# End of the "if( db command exists )"
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/bigrow.test b/usr/src/cmd/svc/configd/sqlite/test/bigrow.test
new file mode 100644
index 0000000000..b9aed8e7e1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/bigrow.test
@@ -0,0 +1,218 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 23
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is stressing the library by putting large amounts
+# of data in a single row of a table.
+#
+# $Id: bigrow.test,v 1.4 2001/11/24 00:31:47 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Make a big string that we can use for test data
+#
+do_test bigrow-1.0 {
+ set ::bigstr {}
+ for {set i 1} {$i<=9999} {incr i} {
+ set sep [string index "abcdefghijklmnopqrstuvwxyz" [expr {$i%26}]]
+ append ::bigstr "$sep [format %04d $i] "
+ }
+ string length $::bigstr
+} {69993}
+
+# Make a table into which we can insert some but records.
+#
+do_test bigrow-1.1 {
+ execsql {
+ CREATE TABLE t1(a text, b text, c text);
+ SELECT name FROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name
+ }
+} {t1}
+
+do_test bigrow-1.2 {
+ set ::big1 [string range $::bigstr 0 65519]
+ set sql "INSERT INTO t1 VALUES('abc',"
+ append sql "'$::big1', 'xyz');"
+ execsql $sql
+ execsql {SELECT a, c FROM t1}
+} {abc xyz}
+do_test bigrow-1.3 {
+ execsql {SELECT b FROM t1}
+} [list $::big1]
+do_test bigrow-1.4 {
+ set ::big2 [string range $::bigstr 0 65520]
+ set sql "INSERT INTO t1 VALUES('abc2',"
+ append sql "'$::big2', 'xyz2');"
+ set r [catch {execsql $sql} msg]
+ lappend r $msg
+} {0 {}}
+do_test bigrow-1.4.1 {
+ execsql {SELECT b FROM t1 ORDER BY c}
+} [list $::big1 $::big2]
+do_test bigrow-1.4.2 {
+ execsql {SELECT c FROM t1 ORDER BY c}
+} {xyz xyz2}
+do_test bigrow-1.4.3 {
+ execsql {DELETE FROM t1 WHERE a='abc2'}
+ execsql {SELECT c FROM t1}
+} {xyz}
+
+do_test bigrow-1.5 {
+ execsql {
+ UPDATE t1 SET a=b, b=a;
+ SELECT b,c FROM t1
+ }
+} {abc xyz}
+do_test bigrow-1.6 {
+ execsql {
+ SELECT * FROM t1
+ }
+} [list $::big1 abc xyz]
+do_test bigrow-1.7 {
+ execsql {
+ INSERT INTO t1 VALUES('1','2','3');
+ INSERT INTO t1 VALUES('A','B','C');
+ SELECT b FROM t1 WHERE a=='1';
+ }
+} {2}
+do_test bigrow-1.8 {
+ execsql "SELECT b FROM t1 WHERE a=='$::big1'"
+} {abc}
+do_test bigrow-1.9 {
+ execsql "SELECT b FROM t1 WHERE a!='$::big1' ORDER BY a"
+} {2 B}
+
+# Try doing some indexing on big columns
+#
+do_test bigrow-2.1 {
+ execsql {
+ CREATE INDEX i1 ON t1(a)
+ }
+ execsql "SELECT b FROM t1 WHERE a=='$::big1'"
+} {abc}
+do_test bigrow-2.2 {
+ execsql {
+ UPDATE t1 SET a=b, b=a
+ }
+ execsql "SELECT b FROM t1 WHERE a=='abc'"
+} [list $::big1]
+do_test bigrow-2.3 {
+ execsql {
+ UPDATE t1 SET a=b, b=a
+ }
+ execsql "SELECT b FROM t1 WHERE a=='$::big1'"
+} {abc}
+catch {unset ::bigstr}
+catch {unset ::big1}
+catch {unset ::big2}
+
+# Mosts of the tests above were created back when rows were limited in
+# size to 64K. Now rows can be much bigger. Test that logic. Also
+# make sure things work correctly at the transition boundries between
+# row sizes of 256 to 257 bytes and from 65536 to 65537 bytes.
+#
+# We begin by testing the 256..257 transition.
+#
+do_test bigrow-3.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 30 hi}
+do_test bigrow-3.2 {
+ execsql {
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 240 hi}
+for {set i 1} {$i<10} {incr i} {
+ do_test bigrow-3.3.$i {
+ execsql "UPDATE t1 SET b=b||'$i'"
+ execsql {SELECT a,length(b),c FROM t1}
+ } "one [expr {240+$i}] hi"
+}
+
+# Now test the 65536..65537 row-size transition.
+#
+do_test bigrow-4.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 30 hi}
+do_test bigrow-4.2 {
+ execsql {
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 122880 hi}
+do_test bigrow-4.3 {
+ execsql {
+ UPDATE t1 SET b=substr(b,1,65515)
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 65515 hi}
+for {set i 1} {$i<10} {incr i} {
+ do_test bigrow-4.4.$i {
+ execsql "UPDATE t1 SET b=b||'$i'"
+ execsql {SELECT a,length(b),c FROM t1}
+ } "one [expr {65515+$i}] hi"
+}
+
+# Check to make sure the library recovers safely if a row contains
+# too much data.
+#
+do_test bigrow-5.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 30 hi}
+set i 1
+for {set sz 60} {$sz<1048560} {incr sz $sz} {
+ do_test bigrow-5.2.$i {
+ execsql {
+ UPDATE t1 SET b=b||b;
+ SELECT a,length(b),c FROM t1;
+ }
+ } "one $sz hi"
+ incr i
+}
+do_test bigrow-5.3 {
+ set r [catch {execsql {UPDATE t1 SET b=b||b}} msg]
+ lappend r $msg
+} {1 {too much data for one table row}}
+do_test bigrow-5.4 {
+ execsql {DROP TABLE t1}
+} {}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/bind.test b/usr/src/cmd/svc/configd/sqlite/test/bind.test
new file mode 100644
index 0000000000..0f87255666
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/bind.test
@@ -0,0 +1,75 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 September 6
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script testing the sqlite_bind API.
+#
+# $Id: bind.test,v 1.1 2003/09/06 22:45:21 drh Exp $
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test bind-1.1 {
+ db close
+ set DB [sqlite db test.db]
+ execsql {CREATE TABLE t1(a,b,c)}
+ set VM [sqlite_compile $DB {INSERT INTO t1 VALUES(?,?,?)} TAIL]
+ set TAIL
+} {}
+do_test bind-1.2 {
+ sqlite_step $VM N VALUES COLNAMES
+} {SQLITE_DONE}
+do_test bind-1.3 {
+ execsql {SELECT rowid, * FROM t1}
+} {1 {} {} {}}
+do_test bind-1.4 {
+ sqlite_reset $VM
+ sqlite_bind $VM 1 {test value 1} normal
+ sqlite_step $VM N VALUES COLNAMES
+} SQLITE_DONE
+do_test bind-1.5 {
+ execsql {SELECT rowid, * FROM t1}
+} {1 {} {} {} 2 {test value 1} {} {}}
+do_test bind-1.6 {
+ sqlite_reset $VM
+ sqlite_bind $VM 3 {'test value 2'} normal
+ sqlite_step $VM N VALUES COLNAMES
+} SQLITE_DONE
+do_test bind-1.7 {
+ execsql {SELECT rowid, * FROM t1}
+} {1 {} {} {} 2 {test value 1} {} {} 3 {test value 1} {} {'test value 2'}}
+do_test bind-1.8 {
+ sqlite_reset $VM
+ set sqlite_static_bind_value 123
+ sqlite_bind $VM 1 {} static
+ sqlite_bind $VM 2 {abcdefg} normal
+ sqlite_bind $VM 3 {} null
+ execsql {DELETE FROM t1}
+ sqlite_step $VM N VALUES COLNAMES
+ execsql {SELECT rowid, * FROM t1}
+} {1 123 abcdefg {}}
+do_test bind-1.9 {
+ sqlite_reset $VM
+ sqlite_bind $VM 1 {456} normal
+ sqlite_step $VM N VALUES COLNAMES
+ execsql {SELECT rowid, * FROM t1}
+} {1 123 abcdefg {} 2 456 abcdefg {}}
+
+
+do_test bind-1.99 {
+ sqlite_finalize $VM
+} {}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/btree.test b/usr/src/cmd/svc/configd/sqlite/test/btree.test
new file mode 100644
index 0000000000..9d1c4153d7
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/btree.test
@@ -0,0 +1,1023 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is btree database backend
+#
+# $Id: btree.test,v 1.15 2004/02/10 01:54:28 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands btree_open]!="" && $SQLITE_PAGE_SIZE==1024
+ && $SQLITE_USABLE_SIZE==1024} {
+
+# Basic functionality. Open and close a database.
+#
+do_test btree-1.1 {
+ file delete -force test1.bt
+ file delete -force test1.bt-journal
+ set rc [catch {btree_open test1.bt} ::b1]
+} {0}
+
+# The second element of the list returned by btree_pager_stats is the
+# number of pages currently checked out. We'll be checking this value
+# frequently during this test script, to make sure the btree library
+# is properly releasing the pages it checks out, and thus avoiding
+# page leaks.
+#
+do_test btree-1.1.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {0}
+do_test btree-1.2 {
+ set rc [catch {btree_open test1.bt} ::b2]
+} {0}
+do_test btree-1.3 {
+ set rc [catch {btree_close $::b2} msg]
+ lappend rc $msg
+} {0 {}}
+
+# Do an insert and verify that the database file grows in size.
+#
+do_test btree-1.4 {
+ set rc [catch {btree_begin_transaction $::b1} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-1.4.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-1.5 {
+ set rc [catch {btree_cursor $::b1 2 1} ::c1]
+ if {$rc} {lappend rc $::c1}
+ set rc
+} {0}
+do_test btree-1.6 {
+ set rc [catch {btree_insert $::c1 one 1.00} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-1.7 {
+ btree_key $::c1
+} {one}
+do_test btree-1.8 {
+ btree_data $::c1
+} {1.00}
+do_test btree-1.9 {
+ set rc [catch {btree_close_cursor $::c1} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-1.10 {
+ set rc [catch {btree_commit $::b1} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-1.11 {
+ file size test1.bt
+} {2048}
+do_test btree-1.12 {
+ lindex [btree_pager_stats $::b1] 1
+} {0}
+
+# Reopen the database and attempt to read the record that we wrote.
+#
+do_test btree-2.1 {
+ set rc [catch {btree_cursor $::b1 2 1} ::c1]
+ if {$rc} {lappend rc $::c1}
+ set rc
+} {0}
+do_test btree-2.2 {
+ btree_move_to $::c1 abc
+} {1}
+do_test btree-2.3 {
+ btree_move_to $::c1 xyz
+} {-1}
+do_test btree-2.4 {
+ btree_move_to $::c1 one
+} {0}
+do_test btree-2.5 {
+ btree_key $::c1
+} {one}
+do_test btree-2.6 {
+ btree_data $::c1
+} {1.00}
+do_test btree-2.7 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+
+# Do some additional inserts
+#
+do_test btree-3.1 {
+ btree_begin_transaction $::b1
+ btree_insert $::c1 two 2.00
+ btree_key $::c1
+} {two}
+do_test btree-3.1.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-3.2 {
+ btree_insert $::c1 three 3.00
+ btree_key $::c1
+} {three}
+do_test btree-3.4 {
+ btree_insert $::c1 four 4.00
+ btree_key $::c1
+} {four}
+do_test btree-3.5 {
+ btree_insert $::c1 five 5.00
+ btree_key $::c1
+} {five}
+do_test btree-3.6 {
+ btree_insert $::c1 six 6.00
+ btree_key $::c1
+} {six}
+#btree_page_dump $::b1 2
+do_test btree-3.7 {
+ set rc [btree_move_to $::c1 {}]
+ expr {$rc>0}
+} {1}
+do_test btree-3.8 {
+ btree_key $::c1
+} {five}
+do_test btree-3.9 {
+ btree_data $::c1
+} {5.00}
+do_test btree-3.10 {
+ btree_next $::c1
+ btree_key $::c1
+} {four}
+do_test btree-3.11 {
+ btree_data $::c1
+} {4.00}
+do_test btree-3.12 {
+ btree_next $::c1
+ btree_key $::c1
+} {one}
+do_test btree-3.13 {
+ btree_data $::c1
+} {1.00}
+do_test btree-3.14 {
+ btree_next $::c1
+ btree_key $::c1
+} {six}
+do_test btree-3.15 {
+ btree_data $::c1
+} {6.00}
+do_test btree-3.16 {
+ btree_next $::c1
+ btree_key $::c1
+} {three}
+do_test btree-3.17 {
+ btree_data $::c1
+} {3.00}
+do_test btree-3.18 {
+ btree_next $::c1
+ btree_key $::c1
+} {two}
+do_test btree-3.19 {
+ btree_data $::c1
+} {2.00}
+do_test btree-3.20 {
+ btree_next $::c1
+ btree_key $::c1
+} {}
+do_test btree-3.21 {
+ btree_data $::c1
+} {}
+
+# Commit the changes, reopen and reread the data
+#
+do_test btree-3.22 {
+ set rc [catch {btree_close_cursor $::c1} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-3.22.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-3.23 {
+ set rc [catch {btree_commit $::b1} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-3.23.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {0}
+do_test btree-3.24 {
+ file size test1.bt
+} {2048}
+do_test btree-3.25 {
+ set rc [catch {btree_cursor $::b1 2 1} ::c1]
+ if {$rc} {lappend rc $::c1}
+ set rc
+} {0}
+do_test btree-3.25.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-3.26 {
+ set rc [btree_move_to $::c1 {}]
+ expr {$rc>0}
+} {1}
+do_test btree-3.27 {
+ btree_key $::c1
+} {five}
+do_test btree-3.28 {
+ btree_data $::c1
+} {5.00}
+do_test btree-3.29 {
+ btree_next $::c1
+ btree_key $::c1
+} {four}
+do_test btree-3.30 {
+ btree_data $::c1
+} {4.00}
+do_test btree-3.31 {
+ btree_next $::c1
+ btree_key $::c1
+} {one}
+do_test btree-3.32 {
+ btree_data $::c1
+} {1.00}
+do_test btree-3.33 {
+ btree_next $::c1
+ btree_key $::c1
+} {six}
+do_test btree-3.34 {
+ btree_data $::c1
+} {6.00}
+do_test btree-3.35 {
+ btree_next $::c1
+ btree_key $::c1
+} {three}
+do_test btree-3.36 {
+ btree_data $::c1
+} {3.00}
+do_test btree-3.37 {
+ btree_next $::c1
+ btree_key $::c1
+} {two}
+do_test btree-3.38 {
+ btree_data $::c1
+} {2.00}
+do_test btree-3.39 {
+ btree_next $::c1
+ btree_key $::c1
+} {}
+do_test btree-3.40 {
+ btree_data $::c1
+} {}
+do_test btree-3.41 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+
+
+# Now try a delete
+#
+do_test btree-4.1 {
+ btree_begin_transaction $::b1
+ btree_move_to $::c1 one
+ btree_key $::c1
+} {one}
+do_test btree-4.1.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-4.2 {
+ btree_delete $::c1
+} {}
+do_test btree-4.3 {
+ btree_key $::c1
+} {six}
+do_test btree-4.4 {
+ btree_next $::c1
+ btree_key $::c1
+} {six}
+do_test btree-4.5 {
+ btree_next $::c1
+ btree_key $::c1
+} {three}
+do_test btree-4.4 {
+ btree_move_to $::c1 {}
+ set r {}
+ while 1 {
+ set key [btree_key $::c1]
+ if {$key==""} break
+ lappend r $key
+ lappend r [btree_data $::c1]
+ btree_next $::c1
+ }
+ set r
+} {five 5.00 four 4.00 six 6.00 three 3.00 two 2.00}
+
+# Commit and make sure the delete is still there.
+#
+do_test btree-4.5 {
+ btree_commit $::b1
+ btree_move_to $::c1 {}
+ set r {}
+ while 1 {
+ set key [btree_key $::c1]
+ if {$key==""} break
+ lappend r $key
+ lappend r [btree_data $::c1]
+ btree_next $::c1
+ }
+ set r
+} {five 5.00 four 4.00 six 6.00 three 3.00 two 2.00}
+
+# Completely close the database and reopen it. Then check
+# the data again.
+#
+do_test btree-4.6 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-4.7 {
+ btree_close_cursor $::c1
+ lindex [btree_pager_stats $::b1] 1
+} {0}
+do_test btree-4.8 {
+ btree_close $::b1
+ set ::b1 [btree_open test1.bt]
+ set ::c1 [btree_cursor $::b1 2 1]
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-4.9 {
+ set r {}
+ btree_first $::c1
+ while 1 {
+ set key [btree_key $::c1]
+ if {$key==""} break
+ lappend r $key
+ lappend r [btree_data $::c1]
+ btree_next $::c1
+ }
+ set r
+} {five 5.00 four 4.00 six 6.00 three 3.00 two 2.00}
+
+# Try to read and write meta data
+#
+do_test btree-5.1 {
+ btree_get_meta $::b1
+} {0 0 0 0 0 0 0 0 0 0}
+do_test btree-5.2 {
+ set rc [catch {btree_update_meta $::b1 1 2 3 4 5 6 7 8 9 10} msg]
+ lappend rc $msg
+} {1 SQLITE_ERROR}
+do_test btree-5.3 {
+ btree_begin_transaction $::b1
+ set rc [catch {btree_update_meta $::b1 1 2 3 4 5 6 7 8 9 10} msg]
+ lappend rc $msg
+} {0 {}}
+do_test btree-5.4 {
+ btree_get_meta $::b1
+} {0 2 3 4 5 6 7 8 9 10}
+do_test btree-5.5 {
+ btree_close_cursor $::c1
+ btree_rollback $::b1
+ btree_get_meta $::b1
+} {0 0 0 0 0 0 0 0 0 0}
+do_test btree-5.6 {
+ btree_begin_transaction $::b1
+ btree_update_meta $::b1 999 10 20 30 40 50 60 70 80 90
+ btree_commit $::b1
+ btree_get_meta $::b1
+} {0 10 20 30 40 50 60 70 80 90}
+
+proc select_all {cursor} {
+ set r {}
+ btree_move_to $cursor {}
+ while 1 {
+ set key [btree_key $cursor]
+ if {$key==""} break
+ lappend r $key
+ lappend r [btree_data $cursor]
+ btree_next $cursor
+ }
+ return $r
+}
+proc select_keys {cursor} {
+ set r {}
+ btree_move_to $cursor {}
+ while 1 {
+ set key [btree_key $cursor]
+ if {$key==""} break
+ lappend r $key
+ btree_next $cursor
+ }
+ return $r
+}
+
+# Try to create a new table in the database file
+#
+do_test btree-6.1 {
+ set rc [catch {btree_create_table $::b1} msg]
+ lappend rc $msg
+} {1 SQLITE_ERROR}
+do_test btree-6.2 {
+ btree_begin_transaction $::b1
+ set ::t2 [btree_create_table $::b1]
+} {3}
+do_test btree-6.2.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-6.2.2 {
+ set ::c2 [btree_cursor $::b1 $::t2 1]
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-6.2.3 {
+ btree_insert $::c2 ten 10
+ btree_key $::c2
+} {ten}
+do_test btree-6.3 {
+ btree_commit $::b1
+ set ::c1 [btree_cursor $::b1 2 1]
+ lindex [btree_pager_stats $::b1] 1
+} {3}
+do_test btree-6.3.1 {
+ select_all $::c1
+} {five 5.00 four 4.00 six 6.00 three 3.00 two 2.00}
+#btree_page_dump $::b1 3
+do_test btree-6.4 {
+ select_all $::c2
+} {ten 10}
+
+# Drop the new table, then create it again anew.
+#
+do_test btree-6.5 {
+ btree_begin_transaction $::b1
+} {}
+do_test btree-6.6 {
+ btree_close_cursor $::c2
+} {}
+do_test btree-6.6.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-6.7 {
+ btree_drop_table $::b1 $::t2
+} {}
+do_test btree-6.7.1 {
+ lindex [btree_get_meta $::b1] 0
+} {1}
+do_test btree-6.8 {
+ set ::t2 [btree_create_table $::b1]
+} {3}
+do_test btree-6.8.1 {
+ lindex [btree_get_meta $::b1] 0
+} {0}
+do_test btree-6.9 {
+ set ::c2 [btree_cursor $::b1 $::t2 1]
+ lindex [btree_pager_stats $::b1] 1
+} {3}
+
+do_test btree-6.9.1 {
+ btree_move_to $::c2 {}
+ btree_key $::c2
+} {}
+
+# If we drop table 2 it just clears the table. Table 2 always exists.
+#
+do_test btree-6.10 {
+ btree_close_cursor $::c1
+ btree_drop_table $::b1 2
+ set ::c1 [btree_cursor $::b1 2 1]
+ btree_move_to $::c1 {}
+ btree_key $::c1
+} {}
+do_test btree-6.11 {
+ btree_commit $::b1
+ select_all $::c1
+} {}
+do_test btree-6.12 {
+ select_all $::c2
+} {}
+do_test btree-6.13 {
+ btree_close_cursor $::c2
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+
+# Check to see that pages defragment properly. To do this test we will
+#
+# 1. Fill the first page table 2 with data.
+# 2. Delete every other entry of table 2.
+# 3. Insert a single entry that requires more contiguous
+# space than is available.
+#
+do_test btree-7.1 {
+ btree_begin_transaction $::b1
+} {}
+catch {unset key}
+catch {unset data}
+do_test btree-7.2 {
+ for {set i 0} {$i<36} {incr i} {
+ set key [format %03d $i]
+ set data "*** $key ***"
+ btree_insert $::c1 $key $data
+ }
+ lrange [btree_cursor_dump $::c1] 4 5
+} {8 1}
+do_test btree-7.3 {
+ btree_move_to $::c1 000
+ while {[btree_key $::c1]!=""} {
+ btree_delete $::c1
+ btree_next $::c1
+ btree_next $::c1
+ }
+ lrange [btree_cursor_dump $::c1] 4 5
+} {512 19}
+#btree_page_dump $::b1 2
+do_test btree-7.4 {
+ btree_insert $::c1 018 {*** 018 ***+++}
+ btree_key $::c1
+} {018}
+do_test btree-7.5 {
+ lrange [btree_cursor_dump $::c1] 4 5
+} {480 1}
+#btree_page_dump $::b1 2
+
+# Delete an entry to make a hole of a known size, then immediately recreate
+# that entry. This tests the path into allocateSpace where the hole exactly
+# matches the size of the desired space.
+#
+do_test btree-7.6 {
+ btree_move_to $::c1 007
+ btree_delete $::c1
+ btree_move_to $::c1 011
+ btree_delete $::c1
+} {}
+do_test btree-7.7 {
+ lindex [btree_cursor_dump $::c1] 5
+} {3}
+#btree_page_dump $::b1 2
+do_test btree-7.8 {
+ btree_insert $::c1 007 {*** 007 ***}
+ lindex [btree_cursor_dump $::c1] 5
+} {2}
+#btree_page_dump $::b1 2
+
+# Make sure the freeSpace() routine properly coaleses adjacent memory blocks
+#
+do_test btree-7.9 {
+ btree_move_to $::c1 013
+ btree_delete $::c1
+ lrange [btree_cursor_dump $::c1] 4 5
+} {536 2}
+do_test btree-7.10 {
+ btree_move_to $::c1 009
+ btree_delete $::c1
+ lrange [btree_cursor_dump $::c1] 4 5
+} {564 2}
+do_test btree-7.11 {
+ btree_move_to $::c1 018
+ btree_delete $::c1
+ lrange [btree_cursor_dump $::c1] 4 5
+} {596 2}
+do_test btree-7.13 {
+ btree_move_to $::c1 033
+ btree_delete $::c1
+ lrange [btree_cursor_dump $::c1] 4 5
+} {624 3}
+do_test btree-7.14 {
+ btree_move_to $::c1 035
+ btree_delete $::c1
+ lrange [btree_cursor_dump $::c1] 4 5
+} {652 2}
+#btree_page_dump $::b1 2
+do_test btree-7.15 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+
+# Check to see that data on overflow pages work correctly.
+#
+do_test btree-8.1 {
+ set data "*** This is a very long key "
+ while {[string length $data]<256} {append data $data}
+ set ::data $data
+ btree_insert $::c1 020 $data
+} {}
+#btree_page_dump $::b1 2
+do_test btree-8.1.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+#btree_pager_ref_dump $::b1
+do_test btree-8.2 {
+ string length [btree_data $::c1]
+} [string length $::data]
+do_test btree-8.3 {
+ btree_data $::c1
+} $::data
+do_test btree-8.4 {
+ btree_delete $::c1
+} {}
+do_test btree-8.4.1 {
+ lindex [btree_get_meta $::b1] 0
+} [expr {int(([string length $::data]-238+1019)/1020)}]
+do_test btree-8.5 {
+ set data "*** This is an even longer key"
+ while {[string length $data]<2000} {append data $data}
+ set ::data $data
+ btree_insert $::c1 020 $data
+} {}
+do_test btree-8.6 {
+ string length [btree_data $::c1]
+} [string length $::data]
+do_test btree-8.7 {
+ btree_data $::c1
+} $::data
+do_test btree-8.8 {
+ btree_commit $::b1
+ btree_data $::c1
+} $::data
+do_test btree-8.9 {
+ btree_close_cursor $::c1
+ btree_close $::b1
+ set ::b1 [btree_open test1.bt]
+ set ::c1 [btree_cursor $::b1 2 1]
+ btree_move_to $::c1 020
+ btree_data $::c1
+} $::data
+do_test btree-8.10 {
+ btree_begin_transaction $::b1
+ btree_delete $::c1
+} {}
+do_test btree-8.11 {
+ lindex [btree_get_meta $::b1] 0
+} [expr {int(([string length $::data]-238+1019)/1020)}]
+
+# Now check out keys on overflow pages.
+#
+do_test btree-8.12 {
+ set ::keyprefix "This is a long prefix to a key "
+ while {[string length $::keyprefix]<256} {append ::keyprefix $::keyprefix}
+ btree_close_cursor $::c1
+ btree_drop_table $::b1 2
+ lindex [btree_get_meta $::b1] 0
+} {4}
+do_test btree-8.12.1 {
+ set ::c1 [btree_cursor $::b1 2 1]
+ btree_insert $::c1 ${::keyprefix}1 1
+ btree_data $::c1
+} {1}
+do_test btree-8.13 {
+ btree_key $::c1
+} ${keyprefix}1
+do_test btree-8.14 {
+ btree_insert $::c1 ${::keyprefix}2 2
+ btree_insert $::c1 ${::keyprefix}3 3
+ btree_key $::c1
+} ${keyprefix}3
+do_test btree-8.15 {
+ btree_move_to $::c1 ${::keyprefix}2
+ btree_data $::c1
+} {2}
+do_test btree-8.16 {
+ btree_move_to $::c1 ${::keyprefix}1
+ btree_data $::c1
+} {1}
+do_test btree-8.17 {
+ btree_move_to $::c1 ${::keyprefix}3
+ btree_data $::c1
+} {3}
+do_test btree-8.18 {
+ lindex [btree_get_meta $::b1] 0
+} {1}
+do_test btree-8.19 {
+ btree_move_to $::c1 ${::keyprefix}2
+ btree_key $::c1
+} ${::keyprefix}2
+#btree_page_dump $::b1 2
+do_test btree-8.20 {
+ btree_delete $::c1
+ btree_next $::c1
+ btree_key $::c1
+} ${::keyprefix}3
+#btree_page_dump $::b1 2
+do_test btree-8.21 {
+ lindex [btree_get_meta $::b1] 0
+} {2}
+do_test btree-8.22 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-8.23 {
+ btree_close_cursor $::c1
+ btree_drop_table $::b1 2
+ set ::c1 [btree_cursor $::b1 2 1]
+ lindex [btree_get_meta $::b1] 0
+} {4}
+do_test btree-8.24 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+#btree_pager_ref_dump $::b1
+
+# Check page splitting logic
+#
+do_test btree-9.1 {
+ for {set i 1} {$i<=19} {incr i} {
+ set key [format %03d $i]
+ set data "*** $key *** $key *** $key *** $key ***"
+ btree_insert $::c1 $key $data
+ }
+} {}
+#btree_tree_dump $::b1 2
+#btree_pager_ref_dump $::b1
+#set pager_refinfo_enable 1
+do_test btree-9.2 {
+ btree_insert $::c1 020 {*** 020 *** 020 *** 020 *** 020 ***}
+ select_keys $::c1
+} {001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020}
+#btree_page_dump $::b1 5
+#btree_page_dump $::b1 2
+#btree_page_dump $::b1 7
+#btree_pager_ref_dump $::b1
+#set pager_refinfo_enable 0
+
+# The previous "select_keys" command left the cursor pointing at the root
+# page. So there should only be two pages checked out. 2 (the root) and
+# page 1.
+do_test btree-9.2.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+for {set i 1} {$i<=20} {incr i} {
+ do_test btree-9.3.$i.1 [subst {
+ btree_move_to $::c1 [format %03d $i]
+ btree_key $::c1
+ }] [format %03d $i]
+ do_test btree-9.3.$i.2 [subst {
+ btree_move_to $::c1 [format %03d $i]
+ string range \[btree_data $::c1\] 0 10
+ }] "*** [format %03d $i] ***"
+}
+do_test btree-9.4.1 {
+ lindex [btree_pager_stats $::b1] 1
+} {3}
+
+# Check the page joining logic.
+#
+#btree_page_dump $::b1 2
+#btree_pager_ref_dump $::b1
+do_test btree-9.4.2 {
+ btree_move_to $::c1 005
+ btree_delete $::c1
+} {}
+#btree_page_dump $::b1 2
+for {set i 1} {$i<=19} {incr i} {
+ if {$i==5} continue
+ do_test btree-9.5.$i.1 [subst {
+ btree_move_to $::c1 [format %03d $i]
+ btree_key $::c1
+ }] [format %03d $i]
+ do_test btree-9.5.$i.2 [subst {
+ btree_move_to $::c1 [format %03d $i]
+ string range \[btree_data $::c1\] 0 10
+ }] "*** [format %03d $i] ***"
+}
+#btree_pager_ref_dump $::b1
+do_test btree-9.6 {
+ btree_close_cursor $::c1
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-9.7 {
+ btree_rollback $::b1
+ lindex [btree_pager_stats $::b1] 1
+} {0}
+
+# Create a tree of depth two. That is, there is a single divider entry
+# on the root pages and two leaf pages. Then delete the divider entry
+# see what happens.
+#
+do_test btree-10.1 {
+ btree_begin_transaction $::b1
+ btree_drop_table $::b1 2
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-10.2 {
+ set ::c1 [btree_cursor $::b1 2 1]
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+do_test btree-10.3 {
+ for {set i 1} {$i<=20} {incr i} {
+ set key [format %03d $i]
+ set data "*** $key *** $key *** $key *** $key ***"
+ btree_insert $::c1 $key $data
+ }
+ select_keys $::c1
+} {001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020}
+#btree_page_dump $::b1 7
+#btree_page_dump $::b1 2
+#btree_page_dump $::b1 6
+do_test btree-10.4 {
+ btree_move_to $::c1 011
+ btree_delete $::c1
+ select_keys $::c1
+} {001 002 003 004 005 006 007 008 009 010 012 013 014 015 016 017 018 019 020}
+#btree_tree_dump $::b1 2
+#btree_pager_ref_dump $::b1
+for {set i 1} {$i<=20} {incr i} {
+ do_test btree-10.5.$i {
+ btree_move_to $::c1 [format %03d $i]
+ lindex [btree_pager_stats $::b1] 1
+ } {2}
+ #btree_pager_ref_dump $::b1
+ #btree_tree_dump $::b1 2
+}
+
+# Create a tree with lots more pages
+#
+catch {unset ::data}
+catch {unset ::key}
+for {set i 21} {$i<=1000} {incr i} {
+ do_test btree-11.1.$i.1 {
+ set key [format %03d $i]
+ set ::data "*** $key *** $key *** $key *** $key ***"
+ btree_insert $::c1 $key $data
+ btree_key $::c1
+ } [format %03d $i]
+ do_test btree-11.1.$i.2 {
+ btree_data $::c1
+ } $::data
+ set ::key [format %03d [expr {$i/2}]]
+ if {$::key=="011"} {set ::key 010}
+ do_test btree-11.1.$i.3 {
+ btree_move_to $::c1 $::key
+ btree_key $::c1
+ } $::key
+}
+catch {unset ::data}
+catch {unset ::key}
+
+# Make sure our reference count is still correct.
+#
+do_test btree-11.2 {
+ btree_close_cursor $::c1
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-11.3 {
+ set ::c1 [btree_cursor $::b1 2 1]
+ lindex [btree_pager_stats $::b1] 1
+} {2}
+#btree_page_dump $::b1 2
+
+# Delete the dividers on the root page
+#
+do_test btree-11.4 {
+ btree_move_to $::c1 257
+ btree_delete $::c1
+ btree_next $::c1
+ btree_key $::c1
+} {258}
+do_test btree-11.4.1 {
+ btree_move_to $::c1 256
+ btree_key $::c1
+} {256}
+do_test btree-11.4.2 {
+ btree_move_to $::c1 258
+ btree_key $::c1
+} {258}
+do_test btree-11.4.3 {
+ btree_move_to $::c1 259
+ btree_key $::c1
+} {259}
+do_test btree-11.4.4 {
+ btree_move_to $::c1 257
+ set n [btree_key $::c1]
+ expr {$n==256||$n==258}
+} {1}
+do_test btree-11.5 {
+ btree_move_to $::c1 513
+ btree_delete $::c1
+ btree_next $::c1
+ btree_key $::c1
+} {514}
+do_test btree-11.5.1 {
+ btree_move_to $::c1 512
+ btree_key $::c1
+} {512}
+do_test btree-11.5.2 {
+ btree_move_to $::c1 514
+ btree_key $::c1
+} {514}
+do_test btree-11.5.3 {
+ btree_move_to $::c1 515
+ btree_key $::c1
+} {515}
+do_test btree-11.5.4 {
+ btree_move_to $::c1 513
+ set n [btree_key $::c1]
+ expr {$n==512||$n==514}
+} {1}
+do_test btree-11.6 {
+ btree_move_to $::c1 769
+ btree_delete $::c1
+ btree_next $::c1
+ btree_key $::c1
+} {770}
+do_test btree-11.6.1 {
+ btree_move_to $::c1 768
+ btree_key $::c1
+} {768}
+do_test btree-11.6.2 {
+ btree_move_to $::c1 771
+ btree_key $::c1
+} {771}
+do_test btree-11.6.3 {
+ btree_move_to $::c1 770
+ btree_key $::c1
+} {770}
+do_test btree-11.6.4 {
+ btree_move_to $::c1 769
+ set n [btree_key $::c1]
+ expr {$n==768||$n==770}
+} {1}
+#btree_page_dump $::b1 2
+#btree_page_dump $::b1 25
+
+# Change the data on an intermediate node such that the node becomes overfull
+# and has to split. We happen to know that intermediate nodes exist on
+# 337, 401 and 465 by the btree_page_dumps above
+#
+catch {unset ::data}
+set ::data {This is going to be a very long data segment}
+append ::data $::data
+append ::data $::data
+do_test btree-12.1 {
+ btree_insert $::c1 337 $::data
+ btree_data $::c1
+} $::data
+do_test btree-12.2 {
+ btree_insert $::c1 401 $::data
+ btree_data $::c1
+} $::data
+do_test btree-12.3 {
+ btree_insert $::c1 465 $::data
+ btree_data $::c1
+} $::data
+do_test btree-12.4 {
+ btree_move_to $::c1 337
+ btree_key $::c1
+} {337}
+do_test btree-12.5 {
+ btree_data $::c1
+} $::data
+do_test btree-12.6 {
+ btree_next $::c1
+ btree_key $::c1
+} {338}
+do_test btree-12.7 {
+ btree_move_to $::c1 464
+ btree_key $::c1
+} {464}
+do_test btree-12.8 {
+ btree_next $::c1
+ btree_data $::c1
+} $::data
+do_test btree-12.9 {
+ btree_next $::c1
+ btree_key $::c1
+} {466}
+do_test btree-12.10 {
+ btree_move_to $::c1 400
+ btree_key $::c1
+} {400}
+do_test btree-12.11 {
+ btree_next $::c1
+ btree_data $::c1
+} $::data
+do_test btree-12.12 {
+ btree_next $::c1
+ btree_key $::c1
+} {402}
+do_test btree-13.1 {
+ btree_integrity_check $::b1 2 3
+} {}
+
+# To Do:
+#
+# 1. Do some deletes from the 3-layer tree
+# 2. Commit and reopen the database
+# 3. Read every 15th entry and make sure it works
+# 4. Implement btree_sanity and put it throughout this script
+#
+
+do_test btree-15.98 {
+ btree_close_cursor $::c1
+ lindex [btree_pager_stats $::b1] 1
+} {1}
+do_test btree-15.99 {
+ btree_rollback $::b1
+ lindex [btree_pager_stats $::b1] 1
+} {0}
+btree_pager_ref_dump $::b1
+
+do_test btree-99.1 {
+ btree_close $::b1
+} {}
+catch {unset data}
+catch {unset key}
+
+} ;# end if( not mem: and has pager_open command );
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/btree2.test b/usr/src/cmd/svc/configd/sqlite/test/btree2.test
new file mode 100644
index 0000000000..45c0203a52
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/btree2.test
@@ -0,0 +1,449 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is btree database backend
+#
+# $Id: btree2.test,v 1.10 2002/02/19 13:39:23 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands btree_open]!=""} {
+
+# Create a new database file containing no entries. The database should
+# contain 5 tables:
+#
+# 2 The descriptor table
+# 3 The foreground table
+# 4 The background table
+# 5 The long key table
+# 6 The long data table
+#
+# An explanation for what all these tables are used for is provided below.
+#
+do_test btree2-1.1 {
+ expr srand(1)
+ file delete -force test2.bt
+ file delete -force test2.bt-journal
+ set ::b [btree_open test2.bt]
+ btree_begin_transaction $::b
+ btree_create_table $::b
+} {3}
+do_test btree2-1.2 {
+ btree_create_table $::b
+} {4}
+do_test btree2-1.3 {
+ btree_create_table $::b
+} {5}
+do_test btree2-1.4 {
+ btree_create_table $::b
+} {6}
+do_test btree2-1.5 {
+ set ::c2 [btree_cursor $::b 2 1]
+ btree_insert $::c2 {one} {1}
+ btree_delete $::c2
+ btree_close_cursor $::c2
+ btree_commit $::b
+ btree_integrity_check $::b 2 3 4 5 6
+} {}
+
+# This test module works by making lots of pseudo-random changes to a
+# database while simultaneously maintaining an invariant on that database.
+# Periodically, the script does a sanity check on the database and verifies
+# that the invariant is satisfied.
+#
+# The invariant is as follows:
+#
+# 1. The descriptor table always contains 2 enters. An entry keyed by
+# "N" is the number of elements in the foreground and background tables
+# combined. The entry keyed by "L" is the number of digits in the keys
+# for foreground and background tables.
+#
+# 2. The union of the foreground an background tables consists of N entries
+# where each entry an L-digit key. (Actually, some keys can be longer
+# than L characters, but they always start with L digits.) The keys
+# cover all integers between 1 and N. Whenever an entry is added to
+# the foreground it is removed form the background and vice versa.
+#
+# 3. Some entries in the foreground and background tables have keys that
+# begin with an L-digit number but are followed by additional characters.
+# For each such entry there is a corresponding entry in the long key
+# table. The long key table entry has a key which is just the L-digit
+# number and data which is the length of the key in the foreground and
+# background tables.
+#
+# 4. The data for both foreground and background entries is usually a
+# short string. But some entries have long data strings. For each
+# such entries there is an entry in the long data type. The key to
+# long data table is an L-digit number. (The extension on long keys
+# is omitted.) The data is the number of charaters in the data of the
+# foreground or background entry.
+#
+# The following function builds a database that satisfies all of the above
+# invariants.
+#
+proc build_db {N L} {
+ for {set i 2} {$i<=6} {incr i} {
+ catch {btree_close_cursor [set ::c$i]}
+ btree_clear_table $::b $i
+ set ::c$i [btree_cursor $::b $i 1]
+ }
+ btree_insert $::c2 N $N
+ btree_insert $::c2 L $L
+ set format %0${L}d
+ for {set i 1} {$i<=$N} {incr i} {
+ set key [format $format $i]
+ set data $key
+ btree_insert $::c3 $key $data
+ }
+}
+
+# Given a base key number and a length, construct the full text of the key
+# or data.
+#
+proc make_payload {keynum L len} {
+ set key [format %0${L}d $keynum]
+ set r $key
+ set i 1
+ while {[string length $r]<$len} {
+ append r " ($i) $key"
+ incr i
+ }
+ return [string range $r 0 [expr {$len-1}]]
+}
+
+# Verify the invariants on the database. Return an empty string on
+# success or an error message if something is amiss.
+#
+proc check_invariants {} {
+ set ck [btree_integrity_check $::b 2 3 4 5 6]
+ if {$ck!=""} {
+ puts "\n*** SANITY:\n$ck"
+ exit
+ return $ck
+ }
+ btree_move_to $::c3 {}
+ btree_move_to $::c4 {}
+ btree_move_to $::c2 N
+ set N [btree_data $::c2]
+ btree_move_to $::c2 L
+ set L [btree_data $::c2]
+ set LM1 [expr {$L-1}]
+ for {set i 1} {$i<=$N} {incr i} {
+ set key [btree_key $::c3]
+ if {[scan $key %d k]<1} {set k 0}
+ if {$k!=$i} {
+ set key [btree_key $::c4]
+ if {[scan $key %d k]<1} {set k 0}
+ if {$k!=$i} {
+ # puts "MISSING $i"
+ # puts {Page 3:}; btree_page_dump $::b 3
+ # puts {Page 4:}; btree_page_dump $::b 4
+ # exit
+ return "Key $i is missing from both foreground and background"
+ }
+ set data [btree_data $::c4]
+ btree_next $::c4
+ } else {
+ set data [btree_data $::c3]
+ btree_next $::c3
+ }
+ set skey [string range $key 0 $LM1]
+ if {[btree_move_to $::c5 $skey]==0} {
+ set keylen [btree_data $::c5]
+ } else {
+ set keylen $L
+ }
+ if {[string length $key]!=$keylen} {
+ return "Key $i is the wrong size.\
+ Is \"$key\" but should be \"[make_payload $k $L $keylen]\""
+ }
+ if {[make_payload $k $L $keylen]!=$key} {
+ return "Key $i has an invalid extension"
+ }
+ if {[btree_move_to $::c6 $skey]==0} {
+ set datalen [btree_data $::c6]
+ } else {
+ set datalen $L
+ }
+ if {[string length $data]!=$datalen} {
+ return "Data for $i is the wrong size.\
+ Is [string length $data] but should be $datalen"
+ }
+ if {[make_payload $k $L $datalen]!=$data} {
+ return "Entry $i has an incorrect data"
+ }
+ }
+}
+
+# Make random changes to the database such that each change preserves
+# the invariants. The number of changes is $n*N where N is the parameter
+# from the descriptor table. Each changes begins with a random key.
+# the entry with that key is put in the foreground table with probability
+# $I and it is put in background with probability (1.0-$I). It gets
+# a long key with probability $K and long data with probability $D.
+#
+set chngcnt 0
+proc random_changes {n I K D} {
+ btree_move_to $::c2 N
+ set N [btree_data $::c2]
+ btree_move_to $::c2 L
+ set L [btree_data $::c2]
+ set LM1 [expr {$L-1}]
+ set total [expr {int($N*$n)}]
+ set format %0${L}d
+ for {set i 0} {$i<$total} {incr i} {
+ set k [expr {int(rand()*$N)+1}]
+ set insert [expr {rand()<=$I}]
+ set longkey [expr {rand()<=$K}]
+ set longdata [expr {rand()<=$D}]
+ # incr ::chngcnt
+ # if {$::chngcnt==251} {btree_tree_dump $::b 3}
+ # puts "CHANGE $::chngcnt: $k $insert $longkey $longdata"
+ if {$longkey} {
+ set x [expr {rand()}]
+ set keylen [expr {int($x*$x*$x*$x*3000)+10}]
+ } else {
+ set keylen $L
+ }
+ set key [make_payload $k $L $keylen]
+ if {$longdata} {
+ set x [expr {rand()}]
+ set datalen [expr {int($x*$x*$x*$x*3000)+10}]
+ } else {
+ set datalen $L
+ }
+ set data [make_payload $k $L $datalen]
+ set basekey [format $format $k]
+ if {[set c [btree_move_to $::c3 $basekey]]==0} {
+ btree_delete $::c3
+ } else {
+ if {$c<0} {btree_next $::c3}
+ if {[string match $basekey* [btree_key $::c3]]} {
+ btree_delete $::c3
+ }
+ }
+ if {[set c [btree_move_to $::c4 $basekey]]==0} {
+ btree_delete $::c4
+ } else {
+ if {$c<0} {btree_next $::c4}
+ if {[string match $basekey* [btree_key $::c4]]} {
+ btree_delete $::c4
+ }
+ }
+ if {[scan [btree_key $::c4] %d kx]<1} {set kx -1}
+ if {$kx==$k} {
+ btree_delete $::c4
+ }
+ if {$insert} {
+ btree_insert $::c3 $key $data
+ } else {
+ btree_insert $::c4 $key $data
+ }
+ if {$longkey} {
+ btree_insert $::c5 $basekey $keylen
+ } elseif {[btree_move_to $::c5 $basekey]==0} {
+ btree_delete $::c5
+ }
+ if {$longdata} {
+ btree_insert $::c6 $basekey $datalen
+ } elseif {[btree_move_to $::c6 $basekey]==0} {
+ btree_delete $::c6
+ }
+ # set ck [btree_integrity_check $::b 2 3 4 5 6]
+ # if {$ck!=""} {
+ # puts "\nSANITY CHECK FAILED!\n$ck"
+ # exit
+ # }
+ # puts "PAGE 3:"; btree_page_dump $::b 3
+ # puts "PAGE 4:"; btree_page_dump $::b 4
+ }
+}
+
+# Repeat this test sequence on database of various sizes
+#
+set testno 2
+foreach {N L} {
+ 10 2
+ 50 2
+ 200 3
+ 2000 5
+} {
+ puts "**** N=$N L=$L ****"
+ set hash [md5file test2.bt]
+ do_test btree2-$testno.1 [subst -nocommands {
+ set ::c2 [btree_cursor $::b 2 1]
+ set ::c3 [btree_cursor $::b 3 1]
+ set ::c4 [btree_cursor $::b 4 1]
+ set ::c5 [btree_cursor $::b 5 1]
+ set ::c6 [btree_cursor $::b 6 1]
+ btree_begin_transaction $::b
+ build_db $N $L
+ check_invariants
+ }] {}
+ do_test btree2-$testno.2 {
+ btree_close_cursor $::c2
+ btree_close_cursor $::c3
+ btree_close_cursor $::c4
+ btree_close_cursor $::c5
+ btree_close_cursor $::c6
+ btree_rollback $::b
+ md5file test2.bt
+ } $hash
+ do_test btree2-$testno.3 [subst -nocommands {
+ btree_begin_transaction $::b
+ set ::c2 [btree_cursor $::b 2 1]
+ set ::c3 [btree_cursor $::b 3 1]
+ set ::c4 [btree_cursor $::b 4 1]
+ set ::c5 [btree_cursor $::b 5 1]
+ set ::c6 [btree_cursor $::b 6 1]
+ build_db $N $L
+ check_invariants
+ }] {}
+ do_test btree2-$testno.4 {
+ btree_commit $::b
+ check_invariants
+ } {}
+ do_test btree2-$testno.5 {
+ lindex [btree_pager_stats $::b] 1
+ } {6}
+ do_test btree2-$testno.6 {
+ btree_close_cursor $::c2
+ btree_close_cursor $::c3
+ btree_close_cursor $::c4
+ btree_close_cursor $::c5
+ btree_close_cursor $::c6
+ lindex [btree_pager_stats $::b] 1
+ } {0}
+ do_test btree2-$testno.7 {
+ btree_close $::b
+ } {}
+after 100
+ # For each database size, run various changes tests.
+ #
+ set num2 1
+ foreach {n I K D} {
+ 0.5 0.5 0.1 0.1
+ 1.0 0.2 0.1 0.1
+ 1.0 0.8 0.1 0.1
+ 2.0 0.0 0.1 0.1
+ 2.0 1.0 0.1 0.1
+ 2.0 0.0 0.0 0.0
+ 2.0 1.0 0.0 0.0
+ } {
+ set testid btree2-$testno.8.$num2
+ set hash [md5file test2.bt]
+ do_test $testid.0 {
+ set ::b [btree_open test2.bt]
+ set ::c2 [btree_cursor $::b 2 1]
+ set ::c3 [btree_cursor $::b 3 1]
+ set ::c4 [btree_cursor $::b 4 1]
+ set ::c5 [btree_cursor $::b 5 1]
+ set ::c6 [btree_cursor $::b 6 1]
+ check_invariants
+ } {}
+ set cnt 6
+ for {set i 2} {$i<=6} {incr i} {
+ if {[lindex [btree_cursor_dump [set ::c$i]] 0]!=$i} {incr cnt}
+ }
+ do_test $testid.1 {
+ btree_begin_transaction $::b
+ lindex [btree_pager_stats $::b] 1
+ } $cnt
+ # exec cp test2.bt test2.bt.bu1
+ do_test $testid.2 [subst {
+ random_changes $n $I $K $D
+ }] {}
+ do_test $testid.3 {
+ check_invariants
+ } {}
+ do_test $testid.4 {
+ btree_close_cursor $::c2
+ btree_close_cursor $::c3
+ btree_close_cursor $::c4
+ btree_close_cursor $::c5
+ btree_close_cursor $::c6
+ btree_rollback $::b
+ md5file test2.bt
+ } $hash
+ # exec cp test2.bt test2.bt.bu2
+ btree_begin_transaction $::b
+ set ::c2 [btree_cursor $::b 2 1]
+ set ::c3 [btree_cursor $::b 3 1]
+ set ::c4 [btree_cursor $::b 4 1]
+ set ::c5 [btree_cursor $::b 5 1]
+ set ::c6 [btree_cursor $::b 6 1]
+ do_test $testid.5 [subst {
+ random_changes $n $I $K $D
+ }] {}
+ do_test $testid.6 {
+ check_invariants
+ } {}
+ do_test $testid.7 {
+ btree_commit $::b
+ check_invariants
+ } {}
+ set hash [md5file test2.bt]
+ do_test $testid.8 {
+ btree_close_cursor $::c2
+ btree_close_cursor $::c3
+ btree_close_cursor $::c4
+ btree_close_cursor $::c5
+ btree_close_cursor $::c6
+ lindex [btree_pager_stats $::b] 1
+ } {0}
+ do_test $testid.9 {
+ btree_close $::b
+ set ::b [btree_open test2.bt]
+ set ::c2 [btree_cursor $::b 2 1]
+ set ::c3 [btree_cursor $::b 3 1]
+ set ::c4 [btree_cursor $::b 4 1]
+ set ::c5 [btree_cursor $::b 5 1]
+ set ::c6 [btree_cursor $::b 6 1]
+ check_invariants
+ } {}
+ do_test $testid.10 {
+ btree_close_cursor $::c2
+ btree_close_cursor $::c3
+ btree_close_cursor $::c4
+ btree_close_cursor $::c5
+ btree_close_cursor $::c6
+ lindex [btree_pager_stats $::b] 1
+ } {0}
+ do_test $testid.11 {
+ btree_close $::b
+ } {}
+ incr num2
+ }
+ incr testno
+ set ::b [btree_open test2.bt]
+}
+
+# Testing is complete. Shut everything down.
+#
+do_test btree-999.1 {
+ lindex [btree_pager_stats $::b] 1
+} {0}
+do_test btree-999.2 {
+ btree_close $::b
+} {}
+do_test btree-999.3 {
+ file delete -force test2.bt
+ file exists test2.bt-journal
+} {0}
+
+} ;# end if( not mem: and has pager_open command );
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/btree3.test b/usr/src/cmd/svc/configd/sqlite/test/btree3.test
new file mode 100644
index 0000000000..784759f176
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/btree3.test
@@ -0,0 +1,89 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 November 22
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is btree database backend
+#
+# In particular, this file tests a small part of the Delete logic
+# for the BTree backend. When a row is deleted from a table, the
+# cursor is suppose to be left pointing at either the previous or
+# next entry in that table. If the cursor is left pointing at the
+# next entry, then the next Next operation is ignored. So the
+# sequence of operations (Delete, Next) should always leave the
+# cursor pointing at the first entry past the one that was deleted.
+# This test is designed to verify that behavior.
+#
+# $Id: btree3.test,v 1.2 2002/12/04 13:40:27 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands btree_open]!=""} {
+
+# Open a test database.
+#
+file delete -force test1.bt
+file delete -force test1.bt-journal
+set b1 [btree_open test1.bt]
+btree_begin_transaction $::b1
+
+# Insert a few one records
+#
+set data {abcdefghijklmnopqrstuvwxyz0123456789}
+append data $data
+append data $data
+append data $data
+append data $data
+for {set k 2} {$k<=10} {incr k} {
+ for {set j 1} {$j<=$k} {incr j} {
+ set jkey [format %02d $j]
+ btree_clear_table $::b1 2
+ set ::c1 [btree_cursor $::b1 2 1]
+ for {set i 1} {$i<=$k} {incr i} {
+ set key [format %02d $i]
+ do_test btree3-$k.$j.1.$i {
+ btree_insert $::c1 $::key $::data
+ } {}
+ # btree_tree_dump $::b1 2
+ }
+ do_test btree3-$k.$j.2 {
+ btree_move_to $::c1 $::jkey
+ btree_key $::c1
+ } $::jkey
+ do_test btree3-$k.$j.3 {
+ btree_delete $::c1
+ } {}
+ if {$j<$k} {
+ do_test btree3-$k.$j.4 {
+ btree_next $::c1
+ btree_key $::c1
+ } [format %02d [expr $j+1]]
+ }
+ if {$j>1} {
+ do_test btree3-$k.$j.5 {
+ btree_prev $::c1
+ btree_key $::c1
+ } [format %02d [expr $j-1]]
+ }
+ btree_close_cursor $::c1
+ }
+}
+
+btree_rollback $::b1
+btree_pager_ref_dump $::b1
+btree_close $::b1
+
+} ;# end if( not mem: and has pager_open command );
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/btree3rb.test b/usr/src/cmd/svc/configd/sqlite/test/btree3rb.test
new file mode 100644
index 0000000000..ab249a6ca0
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/btree3rb.test
@@ -0,0 +1,87 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 November 22
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is btree database backend
+#
+# In particular, this file tests a small part of the Delete logic
+# for the BTree backend. When a row is deleted from a table, the
+# cursor is suppose to be left pointing at either the previous or
+# next entry in that table. If the cursor is left pointing at the
+# next entry, then the next Next operation is ignored. So the
+# sequence of operations (Delete, Next) should always leave the
+# cursor pointing at the first entry past the one that was deleted.
+# This test is designed to verify that behavior.
+#
+# $Id: btree3rb.test,v 1.1 2003/04/20 23:45:23 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands btree_open]!=""} {
+
+# Open a test database.
+#
+set b1 [btree_open :memory:]
+btree_begin_transaction $::b1
+
+# Insert a few one records
+#
+set data {abcdefghijklmnopqrstuvwxyz0123456789}
+append data $data
+append data $data
+append data $data
+append data $data
+for {set k 2} {$k<=20} {incr k} {
+ for {set j 1} {$j<=$k} {incr j} {
+ set jkey [format %02d $j]
+ btree_clear_table $::b1 2
+ set ::c1 [btree_cursor $::b1 2 1]
+ for {set i 1} {$i<=$k} {incr i} {
+ set key [format %02d $i]
+ do_test btree3rb-$k.$j.1.$i {
+ btree_insert $::c1 $::key $::data
+ } {}
+ # btree_tree_dump $::b1 2
+ }
+ do_test btree3rb-$k.$j.2 {
+ btree_move_to $::c1 $::jkey
+ btree_key $::c1
+ } $::jkey
+ do_test btree3rb-$k.$j.3 {
+ btree_delete $::c1
+ } {}
+ if {$j<$k} {
+ do_test btree3rb-$k.$j.4 {
+ btree_next $::c1
+ btree_key $::c1
+ } [format %02d [expr $j+1]]
+ }
+ if {$j>1} {
+ do_test btree3rb-$k.$j.5 {
+ btree_prev $::c1
+ btree_key $::c1
+ } [format %02d [expr $j-1]]
+ }
+ btree_close_cursor $::c1
+ }
+}
+
+btree_rollback $::b1
+#btree_pager_ref_dump $::b1
+btree_close $::b1
+
+} ;# end if( not mem: and has pager_open command );
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/btree4.test b/usr/src/cmd/svc/configd/sqlite/test/btree4.test
new file mode 100644
index 0000000000..b18790388a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/btree4.test
@@ -0,0 +1,101 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 December 03
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is btree database backend
+#
+# This file focuses on testing the sqliteBtreeNext() and
+# sqliteBtreePrevious() procedures and making sure they are able
+# to step through an entire table from either direction.
+#
+# $Id: btree4.test,v 1.1 2002/12/04 13:40:27 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands btree_open]!=""} {
+
+# Open a test database.
+#
+file delete -force test1.bt
+file delete -force test1.bt-journal
+set b1 [btree_open test1.bt]
+btree_begin_transaction $::b1
+
+set data {abcdefghijklmnopqrstuvwxyz0123456789}
+append data $data
+append data $data
+append data $data
+append data $data
+
+foreach N {10 100 1000} {
+ btree_clear_table $::b1 2
+ set ::c1 [btree_cursor $::b1 2 1]
+ do_test btree4-$N.1 {
+ for {set i 1} {$i<=$N} {incr i} {
+ btree_insert $::c1 [format k-%05d $i] $::data-$i
+ }
+ btree_first $::c1
+ btree_key $::c1
+ } {k-00001}
+ do_test btree4-$N.2 {
+ btree_data $::c1
+ } $::data-1
+ for {set i 2} {$i<=$N} {incr i} {
+ do_test btree-$N.3.$i.1 {
+ btree_next $::c1
+ } 0
+ do_test btree-$N.3.$i.2 {
+ btree_key $::c1
+ } [format k-%05d $i]
+ do_test btree-$N.3.$i.3 {
+ btree_data $::c1
+ } $::data-$i
+ }
+ do_test btree4-$N.4 {
+ btree_next $::c1
+ } 1
+ do_test btree4-$N.5 {
+ btree_last $::c1
+ } 0
+ do_test btree4-$N.6 {
+ btree_key $::c1
+ } [format k-%05d $N]
+ do_test btree4-$N.7 {
+ btree_data $::c1
+ } $::data-$N
+ for {set i [expr {$N-1}]} {$i>=1} {incr i -1} {
+ do_test btree4-$N.8.$i.1 {
+ btree_prev $::c1
+ } 0
+ do_test btree4-$N.8.$i.2 {
+ btree_key $::c1
+ } [format k-%05d $i]
+ do_test btree4-$N.8.$i.3 {
+ btree_data $::c1
+ } $::data-$i
+ }
+ do_test btree4-$N.9 {
+ btree_prev $::c1
+ } 1
+ btree_close_cursor $::c1
+}
+
+btree_rollback $::b1
+btree_pager_ref_dump $::b1
+btree_close $::b1
+
+} ;# end if( not mem: and has pager_open command );
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/btree4rb.test b/usr/src/cmd/svc/configd/sqlite/test/btree4rb.test
new file mode 100644
index 0000000000..3be7f1edbc
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/btree4rb.test
@@ -0,0 +1,98 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 December 03
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is btree database backend
+#
+# This file focuses on testing the sqliteBtreeNext() and
+# sqliteBtreePrevious() procedures and making sure they are able
+# to step through an entire table from either direction.
+#
+# $Id: btree4rb.test,v 1.1 2003/04/20 23:45:23 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands btree_open]!=""} {
+
+# Open a test database.
+#
+set b1 [btree_open :memory:]
+btree_begin_transaction $::b1
+
+set data {abcdefghijklmnopqrstuvwxyz0123456789}
+append data $data
+append data $data
+append data $data
+append data $data
+
+foreach N {10 100 1000} {
+ btree_clear_table $::b1 2
+ set ::c1 [btree_cursor $::b1 2 1]
+ do_test btree4rb-$N.1 {
+ for {set i 1} {$i<=$N} {incr i} {
+ btree_insert $::c1 [format k-%05d $i] $::data-$i
+ }
+ btree_first $::c1
+ btree_key $::c1
+ } {k-00001}
+ do_test btree4rb-$N.2 {
+ btree_data $::c1
+ } $::data-1
+ for {set i 2} {$i<=$N} {incr i} {
+ do_test btree-$N.3.$i.1 {
+ btree_next $::c1
+ } 0
+ do_test btree-$N.3.$i.2 {
+ btree_key $::c1
+ } [format k-%05d $i]
+ do_test btree-$N.3.$i.3 {
+ btree_data $::c1
+ } $::data-$i
+ }
+ do_test btree4rb-$N.4 {
+ btree_next $::c1
+ } 1
+ do_test btree4rb-$N.5 {
+ btree_last $::c1
+ } 0
+ do_test btree4rb-$N.6 {
+ btree_key $::c1
+ } [format k-%05d $N]
+ do_test btree4rb-$N.7 {
+ btree_data $::c1
+ } $::data-$N
+ for {set i [expr {$N-1}]} {$i>=1} {incr i -1} {
+ do_test btree4rb-$N.8.$i.1 {
+ btree_prev $::c1
+ } 0
+ do_test btree4rb-$N.8.$i.2 {
+ btree_key $::c1
+ } [format k-%05d $i]
+ do_test btree4rb-$N.8.$i.3 {
+ btree_data $::c1
+ } $::data-$i
+ }
+ do_test btree4rb-$N.9 {
+ btree_prev $::c1
+ } 1
+ btree_close_cursor $::c1
+}
+
+btree_rollback $::b1
+btree_close $::b1
+
+} ;# end if( not mem: and has pager_open command );
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/capi2.test b/usr/src/cmd/svc/configd/sqlite/test/capi2.test
new file mode 100644
index 0000000000..1bb32628a9
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/capi2.test
@@ -0,0 +1,478 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 January 29
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script testing the callback-free C/C++ API.
+#
+# $Id: capi2.test,v 1.10 2003/08/05 13:13:38 drh Exp $
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Check basic functionality
+#
+do_test capi2-1.1 {
+ db close
+ set DB [sqlite db test.db]
+ execsql {CREATE TABLE t1(a,b,c)}
+ set VM [sqlite_compile $DB {SELECT name, rowid FROM sqlite_master} TAIL]
+ set TAIL
+} {}
+do_test capi2-1.2 {
+ sqlite_step $VM N VALUES COLNAMES
+} {SQLITE_ROW}
+do_test capi2-1.3 {
+ set N
+} {2}
+do_test capi2-1.4 {
+ set VALUES
+} {t1 1}
+do_test capi2-1.5 {
+ set COLNAMES
+} {name rowid text INTEGER}
+do_test capi2-1.6 {
+ set N x
+ set VALUES y
+ set COLNAMES z
+ sqlite_step $VM N VALUES COLNAMES
+} {SQLITE_DONE}
+do_test capi2-1.7 {
+ list $N $VALUES $COLNAMES
+} {2 {} {name rowid text INTEGER}}
+do_test capi2-1.8 {
+ set N x
+ set VALUES y
+ set COLNAMES z
+ sqlite_step $VM N VALUES COLNAMES
+} {SQLITE_MISUSE}
+do_test capi2-1.9 {
+ list $N $VALUES $COLNAMES
+} {0 {} {}}
+do_test capi2-1.10 {
+ sqlite_finalize $VM
+} {}
+
+# Check to make sure that the "tail" of a multi-statement SQL script
+# is returned by sqlite_compile.
+#
+do_test capi2-2.1 {
+ set SQL {
+ SELECT name, rowid FROM sqlite_master;
+ SELECT name, rowid FROM sqlite_temp_master;
+ -- A comment at the end
+ }
+ set VM [sqlite_compile $DB $SQL SQL]
+ set SQL
+} {
+ SELECT name, rowid FROM sqlite_temp_master;
+ -- A comment at the end
+ }
+do_test capi2-2.2 {
+ set r [sqlite_step $VM n val colname]
+ lappend r $n $val $colname
+} {SQLITE_ROW 2 {t1 1} {name rowid text INTEGER}}
+do_test capi2-2.3 {
+ set r [sqlite_step $VM n val colname]
+ lappend r $n $val $colname
+} {SQLITE_DONE 2 {} {name rowid text INTEGER}}
+do_test capi2-2.4 {
+ sqlite_finalize $VM
+} {}
+do_test capi2-2.5 {
+ set VM [sqlite_compile $DB $SQL SQL]
+ set SQL
+} {
+ -- A comment at the end
+ }
+do_test capi2-2.6 {
+ set r [sqlite_step $VM n val colname]
+ lappend r $n $val $colname
+} {SQLITE_DONE 2 {} {name rowid text INTEGER}}
+do_test capi2-2.7 {
+ sqlite_finalize $VM
+} {}
+do_test capi2-2.8 {
+ set VM [sqlite_compile $DB $SQL SQL]
+ list $SQL $VM
+} {{} {}}
+
+# Check the error handling.
+#
+do_test capi2-3.1 {
+ set rc [catch {
+ sqlite_compile $DB {select bogus from sqlite_master} TAIL
+ } msg]
+ lappend rc $msg $TAIL
+} {1 {(1) no such column: bogus} {}}
+do_test capi2-3.2 {
+ set rc [catch {
+ sqlite_compile $DB {select bogus from } TAIL
+ } msg]
+ lappend rc $msg $TAIL
+} {1 {(1) near " ": syntax error} {}}
+do_test capi2-3.3 {
+ set rc [catch {
+ sqlite_compile $DB {;;;;select bogus from sqlite_master} TAIL
+ } msg]
+ lappend rc $msg $TAIL
+} {1 {(1) no such column: bogus} {}}
+do_test capi2-3.4 {
+ set rc [catch {
+ sqlite_compile $DB {select bogus from sqlite_master;x;} TAIL
+ } msg]
+ lappend rc $msg $TAIL
+} {1 {(1) no such column: bogus} {x;}}
+do_test capi2-3.5 {
+ set rc [catch {
+ sqlite_compile $DB {select bogus from sqlite_master;;;x;} TAIL
+ } msg]
+ lappend rc $msg $TAIL
+} {1 {(1) no such column: bogus} {;;x;}}
+do_test capi2-3.6 {
+ set rc [catch {
+ sqlite_compile $DB {select 5/0} TAIL
+ } VM]
+ lappend rc $TAIL
+} {0 {}}
+do_test capi2-3.7 {
+ set N {}
+ set VALUE {}
+ set COLNAME {}
+ list [sqlite_step $VM N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 {{}} {5/0 NUMERIC}}
+do_test capi2-3.8 {
+ sqlite_finalize $VM
+} {}
+do_test capi2-3.9 {
+ execsql {CREATE UNIQUE INDEX i1 ON t1(a)}
+ set VM [sqlite_compile $DB {INSERT INTO t1 VALUES(1,2,3)} TAIL]
+ set TAIL
+} {}
+do_test capi2-3.9b {db changes} {0}
+do_test capi2-3.10 {
+ set N {}
+ set VALUE {}
+ set COLNAME {}
+ list [sqlite_step $VM N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_DONE 0 {} {}}
+do_test capi2-3.10b {db changes} {1}
+do_test capi2-3.11 {
+ sqlite_finalize $VM
+} {}
+do_test capi2-3.11b {db changes} {1}
+do_test capi2-3.12 {
+ list [catch {sqlite_finalize $VM} msg] [set msg]
+} {1 {(21) library routine called out of sequence}}
+do_test capi2-3.13 {
+ set VM [sqlite_compile $DB {INSERT INTO t1 VALUES(1,3,4)} TAIL]
+ list [sqlite_step $VM N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ERROR 0 {} {}}
+do_test capi2-3.13b {db changes} {0}
+do_test capi2-3.14 {
+ list [catch {sqlite_finalize $VM} msg] [set msg]
+} {1 {(19) column a is not unique}}
+do_test capi2-3.15 {
+ set VM [sqlite_compile $DB {CREATE TABLE t2(a NOT NULL, b)} TAIL]
+ set TAIL
+} {}
+do_test capi2-3.16 {
+ list [sqlite_step $VM N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_DONE 0 {} {}}
+do_test capi2-3.17 {
+ list [catch {sqlite_finalize $VM} msg] [set msg]
+} {0 {}}
+do_test capi2-3.18 {
+ set VM [sqlite_compile $DB {INSERT INTO t2 VALUES(NULL,2)} TAIL]
+ list [sqlite_step $VM N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ERROR 0 {} {}}
+do_test capi2-3.19 {
+ list [catch {sqlite_finalize $VM} msg] [set msg]
+} {1 {(19) t2.a may not be NULL}}
+
+# Two or more virtual machines exists at the same time.
+#
+do_test capi2-4.1 {
+ set VM1 [sqlite_compile $DB {INSERT INTO t2 VALUES(1,2)} TAIL]
+ set TAIL
+} {}
+do_test capi2-4.2 {
+ set VM2 [sqlite_compile $DB {INSERT INTO t2 VALUES(2,3)} TAIL]
+ set TAIL
+} {}
+do_test capi2-4.3 {
+ set VM3 [sqlite_compile $DB {INSERT INTO t2 VALUES(3,4)} TAIL]
+ set TAIL
+} {}
+do_test capi2-4.4 {
+ list [sqlite_step $VM2 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_DONE 0 {} {}}
+do_test capi2-4.5 {
+ execsql {SELECT * FROM t2 ORDER BY a}
+} {2 3}
+do_test capi2-4.6 {
+ list [catch {sqlite_finalize $VM2} msg] [set msg]
+} {0 {}}
+do_test capi2-4.7 {
+ list [sqlite_step $VM3 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_DONE 0 {} {}}
+do_test capi2-4.8 {
+ execsql {SELECT * FROM t2 ORDER BY a}
+} {2 3 3 4}
+do_test capi2-4.9 {
+ list [catch {sqlite_finalize $VM3} msg] [set msg]
+} {0 {}}
+do_test capi2-4.10 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_DONE 0 {} {}}
+do_test capi2-4.11 {
+ execsql {SELECT * FROM t2 ORDER BY a}
+} {1 2 2 3 3 4}
+do_test capi2-4.12 {
+ list [catch {sqlite_finalize $VM1} msg] [set msg]
+} {0 {}}
+
+# Interleaved SELECTs
+#
+do_test capi2-5.1 {
+ set VM1 [sqlite_compile $DB {SELECT * FROM t2} TAIL]
+ set VM2 [sqlite_compile $DB {SELECT * FROM t2} TAIL]
+ set VM3 [sqlite_compile $DB {SELECT * FROM t2} TAIL]
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {2 3} {a b {} {}}}
+do_test capi2-5.2 {
+ list [sqlite_step $VM2 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {2 3} {a b {} {}}}
+do_test capi2-5.3 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {3 4} {a b {} {}}}
+do_test capi2-5.4 {
+ list [sqlite_step $VM3 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {2 3} {a b {} {}}}
+do_test capi2-5.5 {
+ list [sqlite_step $VM3 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {3 4} {a b {} {}}}
+do_test capi2-5.6 {
+ list [sqlite_step $VM3 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {1 2} {a b {} {}}}
+do_test capi2-5.7 {
+ list [sqlite_step $VM3 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_DONE 2 {} {a b {} {}}}
+do_test capi2-5.8 {
+ list [catch {sqlite_finalize $VM3} msg] [set msg]
+} {0 {}}
+do_test capi2-5.9 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {1 2} {a b {} {}}}
+do_test capi2-5.10 {
+ list [catch {sqlite_finalize $VM1} msg] [set msg]
+} {0 {}}
+do_test capi2-5.11 {
+ list [sqlite_step $VM2 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {3 4} {a b {} {}}}
+do_test capi2-5.12 {
+ list [sqlite_step $VM2 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 2 {1 2} {a b {} {}}}
+do_test capi2-5.11 {
+ list [catch {sqlite_finalize $VM2} msg] [set msg]
+} {0 {}}
+
+# Check for proper SQLITE_BUSY returns.
+#
+do_test capi2-6.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t3(x counter);
+ INSERT INTO t3 VALUES(1);
+ INSERT INTO t3 VALUES(2);
+ INSERT INTO t3 SELECT x+2 FROM t3;
+ INSERT INTO t3 SELECT x+4 FROM t3;
+ INSERT INTO t3 SELECT x+8 FROM t3;
+ COMMIT;
+ }
+ set VM1 [sqlite_compile $DB {SELECT * FROM t3} TAIL]
+ sqlite db2 test.db
+ execsql {BEGIN} db2
+} {}
+do_test capi2-6.2 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_BUSY 0 {} {}}
+do_test capi2-6.3 {
+ execsql {COMMIT} db2
+} {}
+do_test capi2-6.4 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 1 {x counter}}
+do_test capi2-6.5 {
+ catchsql {BEGIN} db2
+} {1 {database is locked}}
+do_test capi2-6.6 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 2 {x counter}}
+do_test capi2-6.7 {
+ execsql {SELECT * FROM t2} db2
+} {2 3 3 4 1 2}
+do_test capi2-6.8 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 3 {x counter}}
+do_test capi2-6.9 {
+ execsql {SELECT * FROM t2}
+} {2 3 3 4 1 2}
+do_test capi2-6.10 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 4 {x counter}}
+do_test capi2-6.11 {
+ execsql {BEGIN}
+} {}
+do_test capi2-6.12 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 5 {x counter}}
+# execsql {pragma vdbe_trace=on}
+do_test capi2-6.13 {
+ catchsql {UPDATE t3 SET x=x+1}
+} {1 {database table is locked}}
+do_test capi2-6.14 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 6 {x counter}}
+# puts [list [catch {sqlite_finalize $VM1} msg] [set msg]]; exit
+do_test capi2-6.15 {
+ execsql {SELECT * FROM t1}
+} {1 2 3}
+do_test capi2-6.16 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 7 {x counter}}
+do_test capi2-6.17 {
+ catchsql {UPDATE t1 SET b=b+1}
+} {0 {}}
+do_test capi2-6.18 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 8 {x counter}}
+do_test capi2-6.19 {
+ execsql {SELECT * FROM t1}
+} {1 3 3}
+do_test capi2-6.20 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 9 {x counter}}
+do_test capi2-6.21 {
+ execsql {ROLLBACK; SELECT * FROM t1}
+} {1 2 3}
+do_test capi2-6.22 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 10 {x counter}}
+do_test capi2-6.23 {
+ execsql {BEGIN TRANSACTION ON CONFLICT ROLLBACK;}
+} {}
+do_test capi2-6.24 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 11 {x counter}}
+do_test capi2-6.25 {
+ execsql {
+ INSERT INTO t1 VALUES(2,3,4);
+ SELECT * FROM t1;
+ }
+} {1 2 3 2 3 4}
+do_test capi2-6.26 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 12 {x counter}}
+do_test capi2-6.27 {
+ catchsql {
+ INSERT INTO t1 VALUES(2,4,5);
+ SELECT * FROM t1;
+ }
+} {1 {column a is not unique}}
+do_test capi2-6.28 {
+ list [sqlite_step $VM1 N VALUE COLNAME] [set N] [set VALUE] [set COLNAME]
+} {SQLITE_ROW 1 13 {x counter}}
+do_test capi2-6.99 {
+ list [catch {sqlite_finalize $VM1} msg] [set msg]
+} {0 {}}
+catchsql {ROLLBACK}
+
+do_test capi2-7.1 {
+ stepsql $DB {
+ SELECT * FROM t1
+ }
+} {0 1 2 3}
+do_test capi2-7.2 {
+ stepsql $DB {
+ PRAGMA count_changes=on
+ }
+} {0}
+do_test capi2-7.3 {
+ stepsql $DB {
+ UPDATE t1 SET a=a+10;
+ }
+} {0 1}
+do_test capi2-7.4 {
+ stepsql $DB {
+ INSERT INTO t1 SELECT a+1,b+1,c+1 FROM t1;
+ }
+} {0 1}
+do_test capi2-7.4b {db changes} {1}
+do_test capi2-7.5 {
+ stepsql $DB {
+ UPDATE t1 SET a=a+10;
+ }
+} {0 2}
+do_test capi2-7.5b {db changes} {2}
+do_test capi2-7.6 {
+ stepsql $DB {
+ SELECT * FROM t1;
+ }
+} {0 21 2 3 22 3 4}
+do_test capi2-7.7 {
+ stepsql $DB {
+ INSERT INTO t1 SELECT a+2,b+2,c+2 FROM t1;
+ }
+} {0 2}
+do_test capi2-7.8 {
+ db changes
+} {2}
+do_test capi2-7.9 {
+ stepsql $DB {
+ SELECT * FROM t1;
+ }
+} {0 21 2 3 22 3 4 23 4 5 24 5 6}
+do_test capi2-7.10 {
+ stepsql $DB {
+ UPDATE t1 SET a=a-20;
+ SELECT * FROM t1;
+ }
+} {0 4 1 2 3 2 3 4 3 4 5 4 5 6}
+do_test capi2-7.11 {
+ db changes
+} {0}
+do_test capi2-7.12 {
+ set x [stepsql $DB {EXPLAIN SELECT * FROM t1}]
+ lindex $x 0
+} {0}
+
+# Ticket #261 - make sure we can finalize before the end of a query.
+#
+do_test capi2-8.1 {
+ set VM1 [sqlite_compile $DB {SELECT * FROM t2} TAIL]
+ sqlite_finalize $VM1
+} {}
+
+# Tickets #384 and #385 - make sure the TAIL argument to sqlite_compile
+# and all of the return pointers in sqlite_step can be null.
+#
+do_test capi2-9.1 {
+ set VM1 [sqlite_compile $DB {SELECT * FROM t2}]
+ sqlite_step $VM1
+ sqlite_finalize $VM1
+} {}
+
+db2 close
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/conflict.test b/usr/src/cmd/svc/configd/sqlite/test/conflict.test
new file mode 100644
index 0000000000..dfb1c88f42
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/conflict.test
@@ -0,0 +1,697 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 January 29
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for the conflict resolution extension
+# to SQLite.
+#
+# $Id: conflict.test,v 1.19 2003/08/05 13:13:39 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create tables for the first group of tests.
+#
+do_test conflict-1.0 {
+ execsql {
+ CREATE TABLE t1(a, b, c, UNIQUE(a,b));
+ CREATE TABLE t2(x);
+ SELECT c FROM t1 ORDER BY c;
+ }
+} {}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf The conflict resolution algorithm on the BEGIN statement
+# cmd An INSERT or REPLACE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "c" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t2
+#
+foreach {i conf cmd t0 t1 t2} {
+ 1 {} INSERT 1 {} 1
+ 2 {} {INSERT OR IGNORE} 0 3 1
+ 3 {} {INSERT OR REPLACE} 0 4 1
+ 4 {} REPLACE 0 4 1
+ 5 {} {INSERT OR FAIL} 1 {} 1
+ 6 {} {INSERT OR ABORT} 1 {} 1
+ 7 {} {INSERT OR ROLLBACK} 1 {} {}
+ 8 IGNORE INSERT 0 3 1
+ 9 IGNORE {INSERT OR IGNORE} 0 3 1
+ 10 IGNORE {INSERT OR REPLACE} 0 4 1
+ 11 IGNORE REPLACE 0 4 1
+ 12 IGNORE {INSERT OR FAIL} 1 {} 1
+ 13 IGNORE {INSERT OR ABORT} 1 {} 1
+ 14 IGNORE {INSERT OR ROLLBACK} 1 {} {}
+ 15 REPLACE INSERT 0 4 1
+ 16 FAIL INSERT 1 {} 1
+ 17 ABORT INSERT 1 {} 1
+ 18 ROLLBACK INSERT 1 {} {}
+} {
+ do_test conflict-1.$i {
+ if {$conf!=""} {set conf "ON CONFLICT $conf"}
+ set r0 [catch {execsql [subst {
+ DELETE FROM t1;
+ DELETE FROM t2;
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN $conf;
+ INSERT INTO t2 VALUES(1);
+ $cmd INTO t1 VALUES(1,2,4);
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]}
+ set r2 [execsql {SELECT x FROM t2}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+# Create tables for the first group of tests.
+#
+do_test conflict-2.0 {
+ execsql {
+ DROP TABLE t1;
+ DROP TABLE t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(a,b));
+ CREATE TABLE t2(x);
+ SELECT c FROM t1 ORDER BY c;
+ }
+} {}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf The conflict resolution algorithm on the BEGIN statement
+# cmd An INSERT or REPLACE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "c" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t2
+#
+foreach {i conf cmd t0 t1 t2} {
+ 1 {} INSERT 1 {} 1
+ 2 {} {INSERT OR IGNORE} 0 3 1
+ 3 {} {INSERT OR REPLACE} 0 4 1
+ 4 {} REPLACE 0 4 1
+ 5 {} {INSERT OR FAIL} 1 {} 1
+ 6 {} {INSERT OR ABORT} 1 {} 1
+ 7 {} {INSERT OR ROLLBACK} 1 {} {}
+ 8 IGNORE INSERT 0 3 1
+ 9 IGNORE {INSERT OR IGNORE} 0 3 1
+ 10 IGNORE {INSERT OR REPLACE} 0 4 1
+ 11 IGNORE REPLACE 0 4 1
+ 12 IGNORE {INSERT OR FAIL} 1 {} 1
+ 13 IGNORE {INSERT OR ABORT} 1 {} 1
+ 14 IGNORE {INSERT OR ROLLBACK} 1 {} {}
+ 15 REPLACE INSERT 0 4 1
+ 16 FAIL INSERT 1 {} 1
+ 17 ABORT INSERT 1 {} 1
+ 18 ROLLBACK INSERT 1 {} {}
+} {
+ do_test conflict-2.$i {
+ if {$conf!=""} {set conf "ON CONFLICT $conf"}
+ set r0 [catch {execsql [subst {
+ DELETE FROM t1;
+ DELETE FROM t2;
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN $conf;
+ INSERT INTO t2 VALUES(1);
+ $cmd INTO t1 VALUES(1,2,4);
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]}
+ set r2 [execsql {SELECT x FROM t2}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+# Create tables for the first group of tests.
+#
+do_test conflict-3.0 {
+ execsql {
+ DROP TABLE t1;
+ DROP TABLE t2;
+ CREATE TABLE t1(a, b, c INTEGER, PRIMARY KEY(c), UNIQUE(a,b));
+ CREATE TABLE t2(x);
+ SELECT c FROM t1 ORDER BY c;
+ }
+} {}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf The conflict resolution algorithm on the BEGIN statement
+# cmd An INSERT or REPLACE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "c" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t2
+#
+foreach {i conf cmd t0 t1 t2} {
+ 1 {} INSERT 1 {} 1
+ 2 {} {INSERT OR IGNORE} 0 3 1
+ 3 {} {INSERT OR REPLACE} 0 4 1
+ 4 {} REPLACE 0 4 1
+ 5 {} {INSERT OR FAIL} 1 {} 1
+ 6 {} {INSERT OR ABORT} 1 {} 1
+ 7 {} {INSERT OR ROLLBACK} 1 {} {}
+ 8 IGNORE INSERT 0 3 1
+ 9 IGNORE {INSERT OR IGNORE} 0 3 1
+ 10 IGNORE {INSERT OR REPLACE} 0 4 1
+ 11 IGNORE REPLACE 0 4 1
+ 12 IGNORE {INSERT OR FAIL} 1 {} 1
+ 13 IGNORE {INSERT OR ABORT} 1 {} 1
+ 14 IGNORE {INSERT OR ROLLBACK} 1 {} {}
+ 15 REPLACE INSERT 0 4 1
+ 16 FAIL INSERT 1 {} 1
+ 17 ABORT INSERT 1 {} 1
+ 18 ROLLBACK INSERT 1 {} {}
+} {
+ do_test conflict-3.$i {
+ if {$conf!=""} {set conf "ON CONFLICT $conf"}
+ set r0 [catch {execsql [subst {
+ DELETE FROM t1;
+ DELETE FROM t2;
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN $conf;
+ INSERT INTO t2 VALUES(1);
+ $cmd INTO t1 VALUES(1,2,4);
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]}
+ set r2 [execsql {SELECT x FROM t2}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+do_test conflict-4.0 {
+ execsql {
+ DROP TABLE t2;
+ CREATE TABLE t2(x);
+ SELECT x FROM t2;
+ }
+} {}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf1 The conflict resolution algorithm on the UNIQUE constraint
+# conf2 The conflict resolution algorithm on the BEGIN statement
+# cmd An INSERT or REPLACE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "c" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t2
+#
+foreach {i conf1 conf2 cmd t0 t1 t2} {
+ 1 {} {} INSERT 1 {} 1
+ 2 REPLACE {} INSERT 0 4 1
+ 3 IGNORE {} INSERT 0 3 1
+ 4 FAIL {} INSERT 1 {} 1
+ 5 ABORT {} INSERT 1 {} 1
+ 6 ROLLBACK {} INSERT 1 {} {}
+ 7 REPLACE {} {INSERT OR IGNORE} 0 3 1
+ 8 IGNORE {} {INSERT OR REPLACE} 0 4 1
+ 9 FAIL {} {INSERT OR IGNORE} 0 3 1
+ 10 ABORT {} {INSERT OR REPLACE} 0 4 1
+ 11 ROLLBACK {} {INSERT OR IGNORE } 0 3 1
+ 12 REPLACE IGNORE INSERT 0 3 1
+ 13 IGNORE REPLACE INSERT 0 4 1
+ 14 FAIL IGNORE INSERT 0 3 1
+ 15 ABORT REPLACE INSERT 0 4 1
+ 16 ROLLBACK IGNORE INSERT 0 3 1
+ 12 IGNORE REPLACE INSERT 0 4 1
+ 13 IGNORE FAIL INSERT 1 {} 1
+ 14 IGNORE ABORT INSERT 1 {} 1
+ 15 IGNORE ROLLBACK INSERT 1 {} {}
+} {
+ do_test conflict-4.$i {
+ if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"}
+ if {$conf2!=""} {set conf2 "ON CONFLICT $conf2"}
+ set r0 [catch {execsql [subst {
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c,UNIQUE(a,b) $conf1);
+ DELETE FROM t2;
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN $conf2;
+ INSERT INTO t2 VALUES(1);
+ $cmd INTO t1 VALUES(1,2,4);
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]}
+ set r2 [execsql {SELECT x FROM t2}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+do_test conflict-5.0 {
+ execsql {
+ DROP TABLE t2;
+ CREATE TABLE t2(x);
+ SELECT x FROM t2;
+ }
+} {}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf1 The conflict resolution algorithm on the NOT NULL constraint
+# conf2 The conflict resolution algorithm on the BEGIN statement
+# cmd An INSERT or REPLACE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "c" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t2
+#
+foreach {i conf1 conf2 cmd t0 t1 t2} {
+ 1 {} {} INSERT 1 {} 1
+ 2 REPLACE {} INSERT 0 5 1
+ 3 IGNORE {} INSERT 0 {} 1
+ 4 FAIL {} INSERT 1 {} 1
+ 5 ABORT {} INSERT 1 {} 1
+ 6 ROLLBACK {} INSERT 1 {} {}
+ 7 REPLACE {} {INSERT OR IGNORE} 0 {} 1
+ 8 IGNORE {} {INSERT OR REPLACE} 0 5 1
+ 9 FAIL {} {INSERT OR IGNORE} 0 {} 1
+ 10 ABORT {} {INSERT OR REPLACE} 0 5 1
+ 11 ROLLBACK {} {INSERT OR IGNORE} 0 {} 1
+ 12 {} {} {INSERT OR IGNORE} 0 {} 1
+ 13 {} {} {INSERT OR REPLACE} 0 5 1
+ 14 {} {} {INSERT OR FAIL} 1 {} 1
+ 15 {} {} {INSERT OR ABORT} 1 {} 1
+ 16 {} {} {INSERT OR ROLLBACK} 1 {} {}
+ 17 {} IGNORE INSERT 0 {} 1
+ 18 {} REPLACE INSERT 0 5 1
+ 19 {} FAIL INSERT 1 {} 1
+ 20 {} ABORT INSERT 1 {} 1
+ 21 {} ROLLBACK INSERT 1 {} {}
+ 22 REPLACE FAIL INSERT 1 {} 1
+ 23 IGNORE ROLLBACK INSERT 1 {} {}
+} {
+ if {$t0} {set t1 {t1.c may not be NULL}}
+ do_test conflict-5.$i {
+ if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"}
+ if {$conf2!=""} {set conf2 "ON CONFLICT $conf2"}
+ set r0 [catch {execsql [subst {
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c NOT NULL $conf1 DEFAULT 5);
+ DELETE FROM t2;
+ BEGIN $conf2;
+ INSERT INTO t2 VALUES(1);
+ $cmd INTO t1 VALUES(1,2,NULL);
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {!$r0} {set r1 [execsql {SELECT c FROM t1}]}
+ set r2 [execsql {SELECT x FROM t2}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+do_test conflict-6.0 {
+ execsql {
+ DROP TABLE t2;
+ CREATE TABLE t2(a,b,c);
+ INSERT INTO t2 VALUES(1,2,1);
+ INSERT INTO t2 VALUES(2,3,2);
+ INSERT INTO t2 VALUES(3,4,1);
+ INSERT INTO t2 VALUES(4,5,4);
+ SELECT c FROM t2 ORDER BY b;
+ CREATE TABLE t3(x);
+ INSERT INTO t3 VALUES(1);
+ }
+} {1 2 1 4}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf1 The conflict resolution algorithm on the UNIQUE constraint
+# conf2 The conflict resolution algorithm on the BEGIN statement
+# cmd An UPDATE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "b" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t3
+#
+foreach {i conf1 conf2 cmd t0 t1 t2} {
+ 1 {} {} UPDATE 1 {6 7 8 9} 1
+ 2 REPLACE {} UPDATE 0 {7 6 9} 1
+ 3 IGNORE {} UPDATE 0 {6 7 3 9} 1
+ 4 FAIL {} UPDATE 1 {6 7 3 4} 1
+ 5 ABORT {} UPDATE 1 {1 2 3 4} 1
+ 6 ROLLBACK {} UPDATE 1 {1 2 3 4} 0
+ 7 REPLACE {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 8 IGNORE {} {UPDATE OR REPLACE} 0 {7 6 9} 1
+ 9 FAIL {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 10 ABORT {} {UPDATE OR REPLACE} 0 {7 6 9} 1
+ 11 ROLLBACK {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 12 {} {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 13 {} {} {UPDATE OR REPLACE} 0 {7 6 9} 1
+ 14 {} {} {UPDATE OR FAIL} 1 {6 7 3 4} 1
+ 15 {} {} {UPDATE OR ABORT} 1 {1 2 3 4} 1
+ 16 {} {} {UPDATE OR ROLLBACK} 1 {1 2 3 4} 0
+ 17 {} IGNORE UPDATE 0 {6 7 3 9} 1
+ 18 {} REPLACE UPDATE 0 {7 6 9} 1
+ 19 {} FAIL UPDATE 1 {6 7 3 4} 1
+ 20 {} ABORT UPDATE 1 {1 2 3 4} 1
+ 21 {} ROLLBACK UPDATE 1 {1 2 3 4} 0
+ 22 REPLACE IGNORE UPDATE 0 {6 7 3 9} 1
+ 23 IGNORE REPLACE UPDATE 0 {7 6 9} 1
+ 24 REPLACE FAIL UPDATE 1 {6 7 3 4} 1
+ 25 IGNORE ABORT UPDATE 1 {1 2 3 4} 1
+ 26 REPLACE ROLLBACK UPDATE 1 {1 2 3 4} 0
+} {
+ if {$t0} {set t1 {column a is not unique}}
+ do_test conflict-6.$i {
+ if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"}
+ if {$conf2!=""} {set conf2 "ON CONFLICT $conf2"}
+ set r0 [catch {execsql [subst {
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c, UNIQUE(a) $conf1);
+ INSERT INTO t1 SELECT * FROM t2;
+ UPDATE t3 SET x=0;
+ BEGIN $conf2;
+ $cmd t3 SET x=1;
+ $cmd t1 SET b=b*2;
+ $cmd t1 SET a=c+5;
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {!$r0} {set r1 [execsql {SELECT a FROM t1 ORDER BY b}]}
+ set r2 [execsql {SELECT x FROM t3}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+# Test to make sure a lot of IGNOREs don't cause a stack overflow
+#
+do_test conflict-7.1 {
+ execsql {
+ DROP TABLE t1;
+ DROP TABLE t2;
+ DROP TABLE t3;
+ CREATE TABLE t1(a unique, b);
+ }
+ for {set i 1} {$i<=50} {incr i} {
+ execsql "INSERT into t1 values($i,[expr {$i+1}]);"
+ }
+ execsql {
+ SELECT count(*), min(a), max(b) FROM t1;
+ }
+} {50 1 51}
+do_test conflict-7.2 {
+ execsql {
+ PRAGMA count_changes=on;
+ UPDATE OR IGNORE t1 SET a=1000;
+ }
+} {1}
+do_test conflict-7.2.1 {
+ db changes
+} {1}
+do_test conflict-7.3 {
+ execsql {
+ SELECT b FROM t1 WHERE a=1000;
+ }
+} {2}
+do_test conflict-7.4 {
+ execsql {
+ SELECT count(*) FROM t1;
+ }
+} {50}
+do_test conflict-7.5 {
+ execsql {
+ PRAGMA count_changes=on;
+ UPDATE OR REPLACE t1 SET a=1001;
+ }
+} {50}
+do_test conflict-7.5.1 {
+ db changes
+} {50}
+do_test conflict-7.6 {
+ execsql {
+ SELECT b FROM t1 WHERE a=1001;
+ }
+} {51}
+do_test conflict-7.7 {
+ execsql {
+ SELECT count(*) FROM t1;
+ }
+} {1}
+do_test conflict-7.7.1 {
+ db changes
+} {0}
+
+# Make sure the row count is right for rows that are ignored on
+# an insert.
+#
+do_test conflict-8.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2);
+ }
+ execsql {
+ INSERT OR IGNORE INTO t1 VALUES(2,3);
+ }
+} {1}
+do_test conflict-8.1.1 {
+ db changes
+} {1}
+do_test conflict-8.2 {
+ execsql {
+ INSERT OR IGNORE INTO t1 VALUES(2,4);
+ }
+} {0}
+do_test conflict-8.2.1 {
+ db changes
+} {0}
+do_test conflict-8.3 {
+ execsql {
+ INSERT OR REPLACE INTO t1 VALUES(2,4);
+ }
+} {1}
+do_test conflict-8.3.1 {
+ db changes
+} {1}
+do_test conflict-8.4 {
+ execsql {
+ INSERT OR IGNORE INTO t1 SELECT * FROM t1;
+ }
+} {0}
+do_test conflict-8.4.1 {
+ db changes
+} {0}
+do_test conflict-8.5 {
+ execsql {
+ INSERT OR IGNORE INTO t1 SELECT a+2,b+2 FROM t1;
+ }
+} {2}
+do_test conflict-8.5.1 {
+ db changes
+} {2}
+do_test conflict-8.6 {
+ execsql {
+ INSERT OR IGNORE INTO t1 SELECT a+3,b+3 FROM t1;
+ }
+} {3}
+do_test conflict-8.6.1 {
+ db changes
+} {3}
+
+integrity_check conflict-8.99
+
+do_test conflict-9.1 {
+ execsql {
+ PRAGMA count_changes=0;
+ CREATE TABLE t2(
+ a INTEGER UNIQUE ON CONFLICT IGNORE,
+ b INTEGER UNIQUE ON CONFLICT FAIL,
+ c INTEGER UNIQUE ON CONFLICT REPLACE,
+ d INTEGER UNIQUE ON CONFLICT ABORT,
+ e INTEGER UNIQUE ON CONFLICT ROLLBACK
+ );
+ CREATE TABLE t3(x);
+ INSERT INTO t3 VALUES(1);
+ SELECT * FROM t3;
+ }
+} {1}
+do_test conflict-9.2 {
+ catchsql {
+ INSERT INTO t2 VALUES(1,1,1,1,1);
+ INSERT INTO t2 VALUES(2,2,2,2,2);
+ SELECT * FROM t2;
+ }
+} {0 {1 1 1 1 1 2 2 2 2 2}}
+do_test conflict-9.3 {
+ catchsql {
+ INSERT INTO t2 VALUES(1,3,3,3,3);
+ SELECT * FROM t2;
+ }
+} {0 {1 1 1 1 1 2 2 2 2 2}}
+do_test conflict-9.4 {
+ catchsql {
+ UPDATE t2 SET a=a+1 WHERE a=1;
+ SELECT * FROM t2;
+ }
+} {0 {1 1 1 1 1 2 2 2 2 2}}
+do_test conflict-9.5 {
+ catchsql {
+ INSERT INTO t2 VALUES(3,1,3,3,3);
+ SELECT * FROM t2;
+ }
+} {1 {column b is not unique}}
+do_test conflict-9.6 {
+ catchsql {
+ UPDATE t2 SET b=b+1 WHERE b=1;
+ SELECT * FROM t2;
+ }
+} {1 {column b is not unique}}
+do_test conflict-9.7 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ INSERT INTO t2 VALUES(3,1,3,3,3);
+ SELECT * FROM t2;
+ }
+} {1 {column b is not unique}}
+do_test conflict-9.8 {
+ execsql {COMMIT}
+ execsql {SELECT * FROM t3}
+} {2}
+do_test conflict-9.9 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ UPDATE t2 SET b=b+1 WHERE b=1;
+ SELECT * FROM t2;
+ }
+} {1 {column b is not unique}}
+do_test conflict-9.10 {
+ execsql {COMMIT}
+ execsql {SELECT * FROM t3}
+} {3}
+do_test conflict-9.11 {
+ catchsql {
+ INSERT INTO t2 VALUES(3,3,3,1,3);
+ SELECT * FROM t2;
+ }
+} {1 {column d is not unique}}
+do_test conflict-9.12 {
+ catchsql {
+ UPDATE t2 SET d=d+1 WHERE d=1;
+ SELECT * FROM t2;
+ }
+} {1 {column d is not unique}}
+do_test conflict-9.13 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ INSERT INTO t2 VALUES(3,3,3,1,3);
+ SELECT * FROM t2;
+ }
+} {1 {column d is not unique}}
+do_test conflict-9.14 {
+ execsql {COMMIT}
+ execsql {SELECT * FROM t3}
+} {4}
+do_test conflict-9.15 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ UPDATE t2 SET d=d+1 WHERE d=1;
+ SELECT * FROM t2;
+ }
+} {1 {column d is not unique}}
+do_test conflict-9.16 {
+ execsql {COMMIT}
+ execsql {SELECT * FROM t3}
+} {5}
+do_test conflict-9.17 {
+ catchsql {
+ INSERT INTO t2 VALUES(3,3,3,3,1);
+ SELECT * FROM t2;
+ }
+} {1 {column e is not unique}}
+do_test conflict-9.18 {
+ catchsql {
+ UPDATE t2 SET e=e+1 WHERE e=1;
+ SELECT * FROM t2;
+ }
+} {1 {column e is not unique}}
+do_test conflict-9.19 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ INSERT INTO t2 VALUES(3,3,3,3,1);
+ SELECT * FROM t2;
+ }
+} {1 {column e is not unique}}
+do_test conflict-9.20 {
+ catch {execsql {COMMIT}}
+ execsql {SELECT * FROM t3}
+} {5}
+do_test conflict-9.21 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ UPDATE t2 SET e=e+1 WHERE e=1;
+ SELECT * FROM t2;
+ }
+} {1 {column e is not unique}}
+do_test conflict-9.22 {
+ catch {execsql {COMMIT}}
+ execsql {SELECT * FROM t3}
+} {5}
+do_test conflict-9.23 {
+ catchsql {
+ INSERT INTO t2 VALUES(3,3,1,3,3);
+ SELECT * FROM t2;
+ }
+} {0 {2 2 2 2 2 3 3 1 3 3}}
+do_test conflict-9.24 {
+ catchsql {
+ UPDATE t2 SET c=c-1 WHERE c=2;
+ SELECT * FROM t2;
+ }
+} {0 {2 2 1 2 2}}
+do_test conflict-9.25 {
+ catchsql {
+ BEGIN;
+ UPDATE t3 SET x=x+1;
+ INSERT INTO t2 VALUES(3,3,1,3,3);
+ SELECT * FROM t2;
+ }
+} {0 {3 3 1 3 3}}
+do_test conflict-9.26 {
+ catch {execsql {COMMIT}}
+ execsql {SELECT * FROM t3}
+} {6}
+
+do_test conflict-10.1 {
+ catchsql {
+ DELETE FROM t1;
+ BEGIN ON CONFLICT ROLLBACK;
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t1 VALUES(1,3);
+ COMMIT;
+ }
+ execsql {SELECT * FROM t1}
+} {}
+do_test conflict-10.2 {
+ catchsql {
+ CREATE TABLE t4(x);
+ CREATE UNIQUE INDEX t4x ON t4(x);
+ BEGIN ON CONFLICT ROLLBACK;
+ INSERT INTO t4 VALUES(1);
+ INSERT INTO t4 VALUES(1);
+ COMMIT;
+ }
+ execsql {SELECT * FROM t4}
+} {}
+
+integrity_check conflict-99.0
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/copy.test b/usr/src/cmd/svc/configd/sqlite/test/copy.test
new file mode 100644
index 0000000000..68fa7f8fd2
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/copy.test
@@ -0,0 +1,268 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the COPY statement.
+#
+# $Id: copy.test,v 1.17 2004/02/17 18:26:57 dougcurrie Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a file of data from which to copy.
+#
+set f [open data1.txt w]
+puts $f "11\t22\t33"
+puts $f "22\t33\t11"
+close $f
+set f [open data2.txt w]
+puts $f "11\t22\t33"
+puts $f "\\."
+puts $f "22\t33\t11"
+close $f
+set f [open data3.txt w]
+puts $f "11\t22\t33\t44"
+puts $f "22\t33\t11"
+close $f
+set f [open data4.txt w]
+puts $f "11 | 22 | 33"
+puts $f "22 | 33 | 11"
+close $f
+set f [open data5.txt w]
+puts $f "11|22|33"
+puts $f "22|33|11"
+close $f
+set f [open dataX.txt w]
+fconfigure $f -translation binary
+puts -nonewline $f "11|22|33\r"
+puts -nonewline $f "22|33|44\r\n"
+puts -nonewline $f "33|44|55\n"
+puts -nonewline $f "44|55|66\r"
+puts -nonewline $f "55|66|77\r\n"
+puts -nonewline $f "66|77|88\n"
+close $f
+
+# Try to COPY into a non-existant table.
+#
+do_test copy-1.1 {
+ set v [catch {execsql {COPY test1 FROM 'data1.txt'}} msg]
+ lappend v $msg
+} {1 {no such table: test1}}
+
+# Try to insert into sqlite_master
+#
+do_test copy-1.2 {
+ set v [catch {execsql {COPY sqlite_master FROM 'data2.txt'}} msg]
+ lappend v $msg
+} {1 {table sqlite_master may not be modified}}
+
+# Do some actual inserts
+#
+do_test copy-1.3 {
+ execsql {CREATE TABLE test1(one int, two int, three int)}
+ execsql {COPY test1 FROM 'data1.txt'}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {11 22 33 22 33 11}
+
+# Make sure input terminates at \.
+#
+do_test copy-1.4 {
+ execsql {DELETE FROM test1}
+ execsql {COPY test1 FROM 'data2.txt'}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {11 22 33}
+
+# Test out the USING DELIMITERS clause
+#
+do_test copy-1.5 {
+ execsql {DELETE FROM test1}
+ execsql {COPY test1 FROM 'data4.txt' USING DELIMITERS ' | '}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {11 22 33 22 33 11}
+do_test copy-1.6 {
+ execsql {DELETE FROM test1}
+ execsql {COPY test1 FROM 'data5.txt' USING DELIMITERS '|'}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {11 22 33 22 33 11}
+do_test copy-1.7 {
+ execsql {DELETE FROM test1}
+ execsql {COPY test1 FROM 'data4.txt' USING DELIMITERS '|'}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {{11 } { 22 } { 33} {22 } { 33 } { 11}}
+
+# Try copying into a table that has one or more indices.
+#
+do_test copy-1.8 {
+ execsql {DELETE FROM test1}
+ execsql {CREATE INDEX index1 ON test1(one)}
+ execsql {CREATE INDEX index2 ON test1(two)}
+ execsql {CREATE INDEX index3 ON test1(three)}
+ execsql {COPY test1 from 'data1.txt'}
+ execsql {SELECT * FROM test1 WHERE one=11}
+} {11 22 33}
+do_test copy-1.8b {
+ execsql {SELECT * FROM test1 WHERE one=22}
+} {22 33 11}
+do_test copy-1.8c {
+ execsql {SELECT * FROM test1 WHERE two=22}
+} {11 22 33}
+do_test copy-1.8d {
+ execsql {SELECT * FROM test1 WHERE three=11}
+} {22 33 11}
+
+
+# Try inserting really long data
+#
+set x {}
+for {set i 0} {$i<100} {incr i} {
+ append x "($i)-abcdefghijklmnopqrstyvwxyz-ABCDEFGHIJKLMNOPQRSTUVWXYZ-"
+}
+do_test copy-2.1 {
+ execsql {CREATE TABLE test2(a int, x text)}
+ set f [open data21.txt w]
+ puts $f "123\t$x"
+ close $f
+ execsql {COPY test2 FROM 'data21.txt'}
+ execsql {SELECT x from test2}
+} $x
+file delete -force data21.txt
+
+# Test the escape character mechanism
+#
+do_test copy-3.1 {
+ set fd [open data6.txt w]
+ puts $fd "hello\\\tworld\t1"
+ puts $fd "hello\tworld\\\t2"
+ close $fd
+ execsql {
+ CREATE TABLE t1(a text, b text);
+ COPY t1 FROM 'data6.txt';
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {hello {world 2} {hello world} 1}
+do_test copy-3.2 {
+ set fd [open data6.txt w]
+ puts $fd "1\thello\\\nworld"
+ puts $fd "2\thello world"
+ close $fd
+ execsql {
+ DELETE FROM t1;
+ COPY t1 FROM 'data6.txt';
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {hello
+world} 2 {hello world}}
+do_test copy-3.3 {
+ set fd [open data6.txt w]
+ puts $fd "1:hello\\b\\f\\n\\r\\t\\vworld"
+ puts $fd "2:hello world"
+ close $fd
+ execsql {
+ DELETE FROM t1;
+ COPY t1 FROM 'data6.txt' USING DELIMITERS ':';
+ SELECT * FROM t1 ORDER BY a;
+ }
+} [list 1 "hello\b\f\n\r\t\vworld" 2 "hello world"]
+
+# Test the embedded NULL logic.
+#
+do_test copy-4.1 {
+ set fd [open data6.txt w]
+ puts $fd "1\t\\N"
+ puts $fd "\\N\thello world"
+ close $fd
+ execsql {
+ DELETE FROM t1;
+ COPY t1 FROM 'data6.txt';
+ SELECT * FROM t1 WHERE a IS NULL;
+ }
+} {{} {hello world}}
+do_test copy-4.2 {
+ execsql {
+ SELECT * FROM t1 WHERE b IS NULL;
+ }
+} {1 {}}
+
+# Test the conflict resolution logic for COPY
+#
+do_test copy-5.1 {
+ execsql {
+ DROP TABLE t1;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE, c);
+ COPY t1 FROM 'data5.txt' USING DELIMITERS '|';
+ SELECT * FROM t1;
+ }
+} {11 22 33 22 33 11}
+do_test copy-5.2 {
+ set fd [open data6.txt w]
+ puts $fd "33|22|44"
+ close $fd
+ catchsql {
+ COPY t1 FROM 'data6.txt' USING DELIMITERS '|';
+ SELECT * FROM t1;
+ }
+} {1 {column b is not unique}}
+do_test copy-5.3 {
+ set fd [open data6.txt w]
+ puts $fd "33|22|44"
+ close $fd
+ catchsql {
+ COPY OR IGNORE t1 FROM 'data6.txt' USING DELIMITERS '|';
+ SELECT * FROM t1;
+ }
+} {0 {11 22 33 22 33 11}}
+do_test copy-5.4 {
+ set fd [open data6.txt w]
+ puts $fd "33|22|44"
+ close $fd
+ catchsql {
+ COPY OR REPLACE t1 FROM 'data6.txt' USING DELIMITERS '|';
+ SELECT * FROM t1;
+ }
+} {0 {22 33 11 33 22 44}}
+
+do_test copy-5.5 {
+ execsql {
+ DELETE FROM t1;
+ PRAGMA count_changes=on;
+ COPY t1 FROM 'data5.txt' USING DELIMITERS '|';
+ }
+} {2}
+do_test copy-5.6 {
+ execsql {
+ COPY OR REPLACE t1 FROM 'data5.txt' USING DELIMITERS '|';
+ }
+} {2}
+do_test copy-5.7 {
+ execsql {
+ COPY OR IGNORE t1 FROM 'data5.txt' USING DELIMITERS '|';
+ }
+} {0}
+
+do_test copy-6.1 {
+ execsql {
+ PRAGMA count_changes=off;
+ CREATE TABLE t2(a,b,c);
+ COPY t2 FROM 'dataX.txt' USING DELIMITERS '|';
+ SELECT * FROM t2;
+ }
+} {11 22 33 22 33 44 33 44 55 44 55 66 55 66 77 66 77 88}
+
+integrity_check copy-7.1
+
+# Cleanup
+#
+#file delete -force data1.txt data2.txt data3.txt data4.txt data5.txt \
+ data6.txt dataX.txt
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/crashme2.off b/usr/src/cmd/svc/configd/sqlite/test/crashme2.off
new file mode 100644
index 0000000000..a6a4e77353
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/crashme2.off
@@ -0,0 +1,52 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+db close
+set DB [sqlite db test.db]
+
+execsql {
+ CREATE TABLE t1(a);
+ INSERT INTO t1 VALUES(1);
+ INSERT INTO t1 VALUES(2);
+ INSERT INTO t1 VALUES(3);
+ INSERT INTO t1 VALUES(4);
+}
+
+do_test capi3-13.1 {
+ execsql {
+ CREATE TABLE t3(a unique on conflict rollback);
+ INSERT INTO t3 SELECT a FROM t1;
+ BEGIN;
+ INSERT INTO t1 SELECT * FROM t1;
+ }
+} {}
+do_test capi3-13.2 {
+ set STMT [sqlite_compile $DB "SELECT a FROM t1" TAIL]
+ sqlite_step $STMT
+ sqlite_step $STMT
+ sqlite_step $STMT
+ sqlite_step $STMT
+ sqlite_step $STMT
+} {SQLITE_ROW}
+do_test capi3-13.3 {
+# This causes a ROLLBACK, which deletes the table out from underneath the
+# SELECT statement. Causes a crash.
+ catchsql {
+ INSERT INTO t3 SELECT a FROM t1;
+ }
+} {1 {column a is not unique}}
+do_test capi3-13.4 {
+ sqlite_step $STMT
+ sqlite_step $STMT
+ sqlite_step $STMT
+ sqlite_step $STMT
+} {SQLITE_DONE}
+do_test capi3-13.5 {
+ sqlite_finalize $STMT
+} {SQLITE_OK}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/crashtest1.c b/usr/src/cmd/svc/configd/sqlite/test/crashtest1.c
new file mode 100644
index 0000000000..ed82867dd8
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/crashtest1.c
@@ -0,0 +1,99 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** This program tests the ability of SQLite database to recover from a crash.
+** This program runs under Unix only, but the results are applicable to all
+** systems.
+**
+** The main process first constructs a test database, then starts creating
+** subprocesses that write to that database. Each subprocess is killed off,
+** without a chance to clean up its database connection, after a random
+** delay. This killing of the subprocesses simulates a crash or power
+** failure. The next subprocess to open the database should rollback
+** whatever operation was in process at the time of the simulated crash.
+**
+** If any problems are encountered, an error is reported and the test stops.
+** If no problems are seen after a large number of tests, we assume that
+** the rollback mechanism is working.
+*/
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sched.h>
+#include "sqlite.h"
+
+static void do_some_sql(int parent){
+ char *zErr;
+ int rc = SQLITE_OK;
+ sqlite *db;
+ int cnt = 0;
+ static char zBig[] =
+ "-abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "-abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+ if( access("./test.db-journal",0)==0 ){
+ /*printf("pid %d: journal exists. rollback will be required\n",getpid());*/ unlink("test.db-saved");
+ system("cp test.db test.db-saved");
+ unlink("test.db-journal-saved");
+ system("cp test.db-journal test.db-journal-saved");
+ }
+ db = sqlite_open("./test.db", 0, &zErr);
+ if( db==0 ){
+ printf("ERROR: %s\n", zErr);
+ if( strcmp(zErr,"database disk image is malformed")==0 ){
+ kill(parent, SIGKILL);
+ }
+ exit(1);
+ }
+ srand(getpid());
+ while( rc==SQLITE_OK ){
+ cnt++;
+ rc = sqlite_exec_printf(db,
+ "INSERT INTO t1 VALUES(%d,'%d%s')", 0, 0, &zErr,
+ rand(), rand(), zBig);
+ }
+ if( rc!=SQLITE_OK ){
+ printf("ERROR #%d: %s\n", rc, zErr);
+ if( rc==SQLITE_CORRUPT ){
+ kill(parent, SIGKILL);
+ }
+ }
+ printf("pid %d: cnt=%d\n", getpid(), cnt);
+}
+
+
+int main(int argc, char **argv){
+ int i;
+ sqlite *db;
+ char *zErr;
+ int status;
+ int parent = getpid();
+
+ unlink("test.db");
+ unlink("test.db-journal");
+ db = sqlite_open("test.db", 0, &zErr);
+ if( db==0 ){
+ printf("Cannot initialize: %s\n", zErr);
+ return 1;
+ }
+ sqlite_exec(db, "CREATE TABLE t1(a,b)", 0, 0, 0);
+ sqlite_close(db);
+ for(i=0; i<10000; i++){
+ int pid = fork();
+ if( pid==0 ){
+ sched_yield();
+ do_some_sql(parent);
+ return 0;
+ }
+ printf("test %d, pid=%d\n", i, pid);
+ usleep(rand()%10000 + 1000);
+ kill(pid, SIGKILL);
+ waitpid(pid, &status, 0);
+ }
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/test/date.test b/usr/src/cmd/svc/configd/sqlite/test/date.test
new file mode 100644
index 0000000000..b145e3cadd
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/date.test
@@ -0,0 +1,260 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 October 31
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing date and time functions.
+#
+# $Id: date.test,v 1.7.2.1 2004/07/18 22:25:16 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+proc datetest {tnum expr result} {
+ do_test date-$tnum [subst {
+ execsql "SELECT coalesce($expr,'NULL')"
+ }] [list $result]
+}
+
+datetest 1.1 julianday('2000-01-01') 2451544.5
+datetest 1.2 julianday('1970-01-01') 2440587.5
+datetest 1.3 julianday('1910-04-20') 2418781.5
+datetest 1.4 julianday('1986-02-09') 2446470.5
+datetest 1.5 julianday('12:00:00') 2451545
+datetest 1.6 {julianday('2000-01-01 12:00:00')} 2451545
+datetest 1.7 {julianday('2000-01-01 12:00')} 2451545
+datetest 1.8 julianday('bogus') NULL
+datetest 1.9 julianday('1999-12-31') 2451543.5
+datetest 1.10 julianday('1999-12-32') NULL
+datetest 1.11 julianday('1999-13-01') NULL
+datetest 1.12 julianday('2003-02-31') 2452701.5
+datetest 1.13 julianday('2003-03-03') 2452701.5
+datetest 1.14 julianday('+2000-01-01') NULL
+datetest 1.15 julianday('200-01-01') NULL
+datetest 1.16 julianday('2000-1-01') NULL
+datetest 1.17 julianday('2000-01-1') NULL
+datetest 1.18 {julianday('2000-01-01 12:00:00')} 2451545
+datetest 1.19 {julianday('2000-01-01 12:00:00.1')} 2451545.00000116
+datetest 1.20 {julianday('2000-01-01 12:00:00.01')} 2451545.00000012
+datetest 1.21 {julianday('2000-01-01 12:00:00.001')} 2451545.00000001
+datetest 1.22 {julianday('2000-01-01 12:00:00.')} NULL
+datetest 1.23 julianday(12345.6) 12345.6
+datetest 1.24 {julianday('2001-01-01 12:00:00 bogus')} NULL
+datetest 1.25 {julianday('2001-01-01 bogus')} NULL
+
+datetest 2.1 datetime(0,'unixepoch') {1970-01-01 00:00:00}
+datetest 2.2 datetime(946684800,'unixepoch') {2000-01-01 00:00:00}
+datetest 2.3 {date('2003-10-22','weekday 0')} 2003-10-26
+datetest 2.4 {date('2003-10-22','weekday 1')} 2003-10-27
+datetest 2.5 {date('2003-10-22','weekday 2')} 2003-10-28
+datetest 2.6 {date('2003-10-22','weekday 3')} 2003-10-22
+datetest 2.7 {date('2003-10-22','weekday 4')} 2003-10-23
+datetest 2.8 {date('2003-10-22','weekday 5')} 2003-10-24
+datetest 2.9 {date('2003-10-22','weekday 6')} 2003-10-25
+datetest 2.10 {date('2003-10-22','weekday 7')} NULL
+datetest 2.11 {date('2003-10-22','weekday 5.5')} NULL
+datetest 2.12 {datetime('2003-10-22 12:34','weekday 0')} {2003-10-26 12:34:00}
+datetest 2.13 {datetime('2003-10-22 12:34','start of month')} \
+ {2003-10-01 00:00:00}
+datetest 2.14 {datetime('2003-10-22 12:34','start of year')} \
+ {2003-01-01 00:00:00}
+datetest 2.15 {datetime('2003-10-22 12:34','start of day')} \
+ {2003-10-22 00:00:00}
+datetest 2.16 time('12:34:56.43') 12:34:56
+datetest 2.17 {datetime('2003-10-22 12:34','1 day')} {2003-10-23 12:34:00}
+datetest 2.18 {datetime('2003-10-22 12:34','+1 day')} {2003-10-23 12:34:00}
+datetest 2.19 {datetime('2003-10-22 12:34','+1.25 day')} {2003-10-23 18:34:00}
+datetest 2.20 {datetime('2003-10-22 12:34','-1.0 day')} {2003-10-21 12:34:00}
+datetest 2.21 {datetime('2003-10-22 12:34','1 month')} {2003-11-22 12:34:00}
+datetest 2.22 {datetime('2003-10-22 12:34','11 month')} {2004-09-22 12:34:00}
+datetest 2.23 {datetime('2003-10-22 12:34','-13 month')} {2002-09-22 12:34:00}
+datetest 2.24 {datetime('2003-10-22 12:34','1.5 months')} {2003-12-07 12:34:00}
+datetest 2.25 {datetime('2003-10-22 12:34','-5 years')} {1998-10-22 12:34:00}
+datetest 2.26 {datetime('2003-10-22 12:34','+10.5 minutes')} \
+ {2003-10-22 12:44:30}
+datetest 2.27 {datetime('2003-10-22 12:34','-1.25 hours')} \
+ {2003-10-22 11:19:00}
+datetest 2.28 {datetime('2003-10-22 12:34','11.25 seconds')} \
+ {2003-10-22 12:34:11}
+datetest 2.29 {datetime('2003-10-22 12:24','+5 bogus')} NULL
+
+
+datetest 3.1 {strftime('%d','2003-10-31 12:34:56.432')} 31
+datetest 3.2 {strftime('%f','2003-10-31 12:34:56.432')} 56.432
+datetest 3.3 {strftime('%H','2003-10-31 12:34:56.432')} 12
+datetest 3.4 {strftime('%j','2003-10-31 12:34:56.432')} 304
+datetest 3.5 {strftime('%J','2003-10-31 12:34:56.432')} 2452944.024264259
+datetest 3.6 {strftime('%m','2003-10-31 12:34:56.432')} 10
+datetest 3.7 {strftime('%M','2003-10-31 12:34:56.432')} 34
+datetest 3.8 {strftime('%s','2003-10-31 12:34:56.432')} 1067603696
+datetest 3.9 {strftime('%S','2003-10-31 12:34:56.432')} 56
+datetest 3.10 {strftime('%w','2003-10-31 12:34:56.432')} 5
+datetest 3.11.1 {strftime('%W','2003-10-31 12:34:56.432')} 43
+datetest 3.11.2 {strftime('%W','2004-01-01')} 00
+datetest 3.11.3 {strftime('%W','2004-01-02')} 00
+datetest 3.11.4 {strftime('%W','2004-01-03')} 00
+datetest 3.11.5 {strftime('%W','2004-01-04')} 00
+datetest 3.11.6 {strftime('%W','2004-01-05')} 01
+datetest 3.11.7 {strftime('%W','2004-01-06')} 01
+datetest 3.11.8 {strftime('%W','2004-01-07')} 01
+datetest 3.11.9 {strftime('%W','2004-01-08')} 01
+datetest 3.11.10 {strftime('%W','2004-01-09')} 01
+datetest 3.11.11 {strftime('%W','2004-07-18')} 28
+datetest 3.11.12 {strftime('%W','2004-12-31')} 52
+datetest 3.11.13 {strftime('%W','2007-12-31')} 53
+datetest 3.11.14 {strftime('%W','2007-01-01')} 01
+datetest 3.12 {strftime('%Y','2003-10-31 12:34:56.432')} 2003
+datetest 3.13 {strftime('%%','2003-10-31 12:34:56.432')} %
+datetest 3.14 {strftime('%_','2003-10-31 12:34:56.432')} NULL
+datetest 3.15 {strftime('%Y-%m-%d','2003-10-31')} 2003-10-31
+proc repeat {n txt} {
+ set x {}
+ while {$n>0} {
+ append x $txt
+ incr n -1
+ }
+ return $x
+}
+datetest 3.16 "strftime('[repeat 200 %Y]','2003-10-31')" [repeat 200 2003]
+datetest 3.17 "strftime('[repeat 200 abc%m123]','2003-10-31')" \
+ [repeat 200 abc10123]
+
+set now [clock format [clock seconds] -format "%Y-%m-%d" -gmt 1]
+datetest 4.1 {date('now')} $now
+
+datetest 5.1 {datetime('1994-04-16 14:00:00 -05:00')} {1994-04-16 09:00:00}
+datetest 5.2 {datetime('1994-04-16 14:00:00 +05:15')} {1994-04-16 19:15:00}
+datetest 5.3 {datetime('1994-04-16 05:00:00 -08:30')} {1994-04-15 20:30:00}
+datetest 5.4 {datetime('1994-04-16 14:00:00 +11:55')} {1994-04-17 01:55:00}
+
+# localtime->utc and utc->localtime conversions. These tests only work
+# if the localtime is in the US Eastern Time (the time in Charlotte, NC
+# and in New York.)
+#
+if {[clock scan [clock format 0 -format {%b %d, %Y %H:%M:%S}] -gmt 1]==-18000} {
+ datetest 6.1 {datetime('2000-10-29 05:59:00','localtime')}\
+ {2000-10-29 01:59:00}
+ datetest 6.2 {datetime('2000-10-29 06:00:00','localtime')}\
+ {2000-10-29 01:00:00}
+ datetest 6.3 {datetime('2000-04-02 06:59:00','localtime')}\
+ {2000-04-02 01:59:00}
+ datetest 6.4 {datetime('2000-04-02 07:00:00','localtime')}\
+ {2000-04-02 03:00:00}
+ datetest 6.5 {datetime('2000-10-29 01:59:00','utc')} {2000-10-29 05:59:00}
+ datetest 6.6 {datetime('2000-10-29 02:00:00','utc')} {2000-10-29 07:00:00}
+ datetest 6.7 {datetime('2000-04-02 01:59:00','utc')} {2000-04-02 06:59:00}
+ datetest 6.8 {datetime('2000-04-02 02:00:00','utc')} {2000-04-02 06:00:00}
+
+ datetest 6.10 {datetime('2000-01-01 12:00:00','localtime')} \
+ {2000-01-01 07:00:00}
+ datetest 6.11 {datetime('1969-01-01 12:00:00','localtime')} \
+ {1969-01-01 07:00:00}
+ datetest 6.12 {datetime('2039-01-01 12:00:00','localtime')} \
+ {2039-01-01 07:00:00}
+ datetest 6.13 {datetime('2000-07-01 12:00:00','localtime')} \
+ {2000-07-01 08:00:00}
+ datetest 6.14 {datetime('1969-07-01 12:00:00','localtime')} \
+ {1969-07-01 07:00:00}
+ datetest 6.15 {datetime('2039-07-01 12:00:00','localtime')} \
+ {2039-07-01 07:00:00}
+ set sqlite_current_time \
+ [db eval {SELECT strftime('%s','2000-07-01 12:34:56')}]
+ datetest 6.16 {datetime('now','localtime')} {2000-07-01 08:34:56}
+ set sqlite_current_time 0
+}
+
+# Date-time functions that contain NULL arguments return a NULL
+# result.
+#
+datetest 7.1 {datetime(null)} NULL
+datetest 7.2 {datetime('now',null)} NULL
+datetest 7.3 {datetime('now','localtime',null)} NULL
+datetest 7.4 {time(null)} NULL
+datetest 7.5 {time('now',null)} NULL
+datetest 7.6 {time('now','localtime',null)} NULL
+datetest 7.7 {date(null)} NULL
+datetest 7.8 {date('now',null)} NULL
+datetest 7.9 {date('now','localtime',null)} NULL
+datetest 7.10 {julianday(null)} NULL
+datetest 7.11 {julianday('now',null)} NULL
+datetest 7.12 {julianday('now','localtime',null)} NULL
+datetest 7.13 {strftime(null,'now')} NULL
+datetest 7.14 {strftime('%s',null)} NULL
+datetest 7.15 {strftime('%s','now',null)} NULL
+datetest 7.16 {strftime('%s','now','localtime',null)} NULL
+
+# Test modifiers when the date begins as a julian day number - to
+# make sure the HH:MM:SS is preserved. Ticket #551.
+#
+set sqlite_current_time [db eval {SELECT strftime('%s','2003-10-22 12:34:00')}]
+datetest 8.1 {datetime('now','weekday 0')} {2003-10-26 12:34:00}
+datetest 8.2 {datetime('now','weekday 1')} {2003-10-27 12:34:00}
+datetest 8.3 {datetime('now','weekday 2')} {2003-10-28 12:34:00}
+datetest 8.4 {datetime('now','weekday 3')} {2003-10-22 12:34:00}
+datetest 8.5 {datetime('now','start of month')} {2003-10-01 00:00:00}
+datetest 8.6 {datetime('now','start of year')} {2003-01-01 00:00:00}
+datetest 8.7 {datetime('now','start of day')} {2003-10-22 00:00:00}
+datetest 8.8 {datetime('now','1 day')} {2003-10-23 12:34:00}
+datetest 8.9 {datetime('now','+1 day')} {2003-10-23 12:34:00}
+datetest 8.10 {datetime('now','+1.25 day')} {2003-10-23 18:34:00}
+datetest 8.11 {datetime('now','-1.0 day')} {2003-10-21 12:34:00}
+datetest 8.12 {datetime('now','1 month')} {2003-11-22 12:34:00}
+datetest 8.13 {datetime('now','11 month')} {2004-09-22 12:34:00}
+datetest 8.14 {datetime('now','-13 month')} {2002-09-22 12:34:00}
+datetest 8.15 {datetime('now','1.5 months')} {2003-12-07 12:34:00}
+datetest 8.16 {datetime('now','-5 years')} {1998-10-22 12:34:00}
+datetest 8.17 {datetime('now','+10.5 minutes')} {2003-10-22 12:44:30}
+datetest 8.18 {datetime('now','-1.25 hours')} {2003-10-22 11:19:00}
+datetest 8.19 {datetime('now','11.25 seconds')} {2003-10-22 12:34:11}
+set sqlite_current_time 0
+
+# Negative years work. Example: '-4713-11-26' is JD 1.5.
+#
+datetest 9.1 {julianday('-4713-11-24 12:00:00')} {0}
+datetest 9.2 {julianday(datetime(5))} {5}
+datetest 9.3 {julianday(datetime(10))} {10}
+datetest 9.4 {julianday(datetime(100))} {100}
+datetest 9.5 {julianday(datetime(1000))} {1000}
+datetest 9.6 {julianday(datetime(10000))} {10000}
+datetest 9.7 {julianday(datetime(100000))} {100000}
+
+# datetime() with just an HH:MM:SS correctly inserts the date 2000-01-01.
+#
+datetest 10.1 {datetime('01:02:03')} {2000-01-01 01:02:03}
+datetest 10.2 {date('01:02:03')} {2000-01-01}
+datetest 10.3 {strftime('%Y-%m-%d %H:%M','01:02:03')} {2000-01-01 01:02}
+
+# Test the new HH:MM:SS modifier
+#
+datetest 11.1 {datetime('2004-02-28 20:00:00', '-01:20:30')} \
+ {2004-02-28 18:39:30}
+datetest 11.2 {datetime('2004-02-28 20:00:00', '+12:30:00')} \
+ {2004-02-29 08:30:00}
+datetest 11.3 {datetime('2004-02-28 20:00:00', '+12:30')} \
+ {2004-02-29 08:30:00}
+datetest 11.4 {datetime('2004-02-28 20:00:00', '12:30')} \
+ {2004-02-29 08:30:00}
+datetest 11.5 {datetime('2004-02-28 20:00:00', '-12:00')} \
+ {2004-02-28 08:00:00}
+datetest 11.6 {datetime('2004-02-28 20:00:00', '-12:01')} \
+ {2004-02-28 07:59:00}
+datetest 11.7 {datetime('2004-02-28 20:00:00', '-11:59')} \
+ {2004-02-28 08:01:00}
+datetest 11.8 {datetime('2004-02-28 20:00:00', '11:59')} \
+ {2004-02-29 07:59:00}
+datetest 11.9 {datetime('2004-02-28 20:00:00', '12:01')} \
+ {2004-02-29 08:01:00}
+
+
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/delete.test b/usr/src/cmd/svc/configd/sqlite/test/delete.test
new file mode 100644
index 0000000000..1676323507
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/delete.test
@@ -0,0 +1,294 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the DELETE FROM statement.
+#
+# $Id: delete.test,v 1.13 2003/06/15 23:42:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Try to delete from a non-existant table.
+#
+do_test delete-1.1 {
+ set v [catch {execsql {DELETE FROM test1}} msg]
+ lappend v $msg
+} {1 {no such table: test1}}
+
+# Try to delete from sqlite_master
+#
+do_test delete-2.1 {
+ set v [catch {execsql {DELETE FROM sqlite_master}} msg]
+ lappend v $msg
+} {1 {table sqlite_master may not be modified}}
+
+# Delete selected entries from a table with and without an index.
+#
+do_test delete-3.1.1 {
+ execsql {CREATE TABLE table1(f1 int, f2 int)}
+ execsql {INSERT INTO table1 VALUES(1,2)}
+ execsql {INSERT INTO table1 VALUES(2,4)}
+ execsql {INSERT INTO table1 VALUES(3,8)}
+ execsql {INSERT INTO table1 VALUES(4,16)}
+ execsql {SELECT * FROM table1 ORDER BY f1}
+} {1 2 2 4 3 8 4 16}
+do_test delete-3.1.2 {
+ execsql {DELETE FROM table1 WHERE f1=3}
+} {}
+do_test delete-3.1.3 {
+ execsql {SELECT * FROM table1 ORDER BY f1}
+} {1 2 2 4 4 16}
+do_test delete-3.1.4 {
+ execsql {CREATE INDEX index1 ON table1(f1)}
+ execsql {PRAGMA count_changes=on}
+ execsql {DELETE FROM 'table1' WHERE f1=3}
+} {0}
+do_test delete-3.1.5 {
+ execsql {SELECT * FROM table1 ORDER BY f1}
+} {1 2 2 4 4 16}
+do_test delete-3.1.6 {
+ execsql {DELETE FROM table1 WHERE f1=2}
+} {1}
+do_test delete-3.1.7 {
+ execsql {SELECT * FROM table1 ORDER BY f1}
+} {1 2 4 16}
+integrity_check delete-3.2
+
+
+# Semantic errors in the WHERE clause
+#
+do_test delete-4.1 {
+ execsql {CREATE TABLE table2(f1 int, f2 int)}
+ set v [catch {execsql {DELETE FROM table2 WHERE f3=5}} msg]
+ lappend v $msg
+} {1 {no such column: f3}}
+
+do_test delete-4.2 {
+ set v [catch {execsql {DELETE FROM table2 WHERE xyzzy(f1+4)}} msg]
+ lappend v $msg
+} {1 {no such function: xyzzy}}
+integrity_check delete-4.3
+
+# Lots of deletes
+#
+do_test delete-5.1.1 {
+ execsql {DELETE FROM table1}
+} {2}
+do_test delete-5.1.2 {
+ execsql {SELECT count(*) FROM table1}
+} {0}
+do_test delete-5.2.1 {
+ execsql {BEGIN TRANSACTION}
+ for {set i 1} {$i<=200} {incr i} {
+ execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])"
+ }
+ execsql {COMMIT}
+ execsql {SELECT count(*) FROM table1}
+} {200}
+do_test delete-5.2.2 {
+ execsql {DELETE FROM table1}
+} {200}
+do_test delete-5.2.3 {
+ execsql {BEGIN TRANSACTION}
+ for {set i 1} {$i<=200} {incr i} {
+ execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])"
+ }
+ execsql {COMMIT}
+ execsql {SELECT count(*) FROM table1}
+} {200}
+do_test delete-5.2.4 {
+ execsql {PRAGMA count_changes=off}
+ execsql {DELETE FROM table1}
+} {}
+do_test delete-5.2.5 {
+ execsql {SELECT count(*) FROM table1}
+} {0}
+do_test delete-5.2.6 {
+ execsql {BEGIN TRANSACTION}
+ for {set i 1} {$i<=200} {incr i} {
+ execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])"
+ }
+ execsql {COMMIT}
+ execsql {SELECT count(*) FROM table1}
+} {200}
+do_test delete-5.3 {
+ for {set i 1} {$i<=200} {incr i 4} {
+ execsql "DELETE FROM table1 WHERE f1==$i"
+ }
+ execsql {SELECT count(*) FROM table1}
+} {150}
+do_test delete-5.4 {
+ execsql "DELETE FROM table1 WHERE f1>50"
+ execsql {SELECT count(*) FROM table1}
+} {37}
+do_test delete-5.5 {
+ for {set i 1} {$i<=70} {incr i 3} {
+ execsql "DELETE FROM table1 WHERE f1==$i"
+ }
+ execsql {SELECT f1 FROM table1 ORDER BY f1}
+} {2 3 6 8 11 12 14 15 18 20 23 24 26 27 30 32 35 36 38 39 42 44 47 48 50}
+do_test delete-5.6 {
+ for {set i 1} {$i<40} {incr i} {
+ execsql "DELETE FROM table1 WHERE f1==$i"
+ }
+ execsql {SELECT f1 FROM table1 ORDER BY f1}
+} {42 44 47 48 50}
+do_test delete-5.7 {
+ execsql "DELETE FROM table1 WHERE f1!=48"
+ execsql {SELECT f1 FROM table1 ORDER BY f1}
+} {48}
+integrity_check delete-5.8
+
+
+# Delete large quantities of data. We want to test the List overflow
+# mechanism in the vdbe.
+#
+do_test delete-6.1 {
+ set fd [open data1.txt w]
+ for {set i 1} {$i<=3000} {incr i} {
+ puts $fd "[expr {$i}]\t[expr {$i*$i}]"
+ }
+ close $fd
+ execsql {DELETE FROM table1}
+ execsql {COPY table1 FROM 'data1.txt'}
+ execsql {DELETE FROM table2}
+ execsql {COPY table2 FROM 'data1.txt'}
+ file delete data1.txt
+ execsql {SELECT count(*) FROM table1}
+} {3000}
+do_test delete-6.2 {
+ execsql {SELECT count(*) FROM table2}
+} {3000}
+do_test delete-6.3 {
+ execsql {SELECT f1 FROM table1 WHERE f1<10 ORDER BY f1}
+} {1 2 3 4 5 6 7 8 9}
+do_test delete-6.4 {
+ execsql {SELECT f1 FROM table2 WHERE f1<10 ORDER BY f1}
+} {1 2 3 4 5 6 7 8 9}
+do_test delete-6.5 {
+ execsql {DELETE FROM table1 WHERE f1>7}
+ execsql {SELECT f1 FROM table1 ORDER BY f1}
+} {1 2 3 4 5 6 7}
+do_test delete-6.6 {
+ execsql {DELETE FROM table2 WHERE f1>7}
+ execsql {SELECT f1 FROM table2 ORDER BY f1}
+} {1 2 3 4 5 6 7}
+do_test delete-6.7 {
+ execsql {DELETE FROM table1}
+ execsql {SELECT f1 FROM table1}
+} {}
+do_test delete-6.8 {
+ execsql {INSERT INTO table1 VALUES(2,3)}
+ execsql {SELECT f1 FROM table1}
+} {2}
+do_test delete-6.9 {
+ execsql {DELETE FROM table2}
+ execsql {SELECT f1 FROM table2}
+} {}
+do_test delete-6.10 {
+ execsql {INSERT INTO table2 VALUES(2,3)}
+ execsql {SELECT f1 FROM table2}
+} {2}
+integrity_check delete-6.11
+
+do_test delete-7.1 {
+ execsql {
+ CREATE TABLE t3(a);
+ INSERT INTO t3 VALUES(1);
+ INSERT INTO t3 SELECT a+1 FROM t3;
+ INSERT INTO t3 SELECT a+2 FROM t3;
+ SELECT * FROM t3;
+ }
+} {1 2 3 4}
+do_test delete-7.2 {
+ execsql {
+ CREATE TABLE cnt(del);
+ INSERT INTO cnt VALUES(0);
+ CREATE TRIGGER r1 AFTER DELETE ON t3 FOR EACH ROW BEGIN
+ UPDATE cnt SET del=del+1;
+ END;
+ DELETE FROM t3 WHERE a<2;
+ SELECT * FROM t3;
+ }
+} {2 3 4}
+do_test delete-7.3 {
+ execsql {
+ SELECT * FROM cnt;
+ }
+} {1}
+do_test delete-7.4 {
+ execsql {
+ DELETE FROM t3;
+ SELECT * FROM t3;
+ }
+} {}
+do_test delete-7.5 {
+ execsql {
+ SELECT * FROM cnt;
+ }
+} {4}
+do_test delete-7.6 {
+ execsql {
+ INSERT INTO t3 VALUES(1);
+ INSERT INTO t3 SELECT a+1 FROM t3;
+ INSERT INTO t3 SELECT a+2 FROM t3;
+ CREATE TABLE t4 AS SELECT * FROM t3;
+ PRAGMA count_changes=ON;
+ DELETE FROM t3;
+ DELETE FROM t4;
+ }
+} {4 4}
+integrity_check delete-7.7
+
+# Make sure error messages are consistent when attempting to delete
+# from a read-only database. Ticket #304.
+#
+do_test delete-8.0 {
+ execsql {
+ PRAGMA count_changes=OFF;
+ INSERT INTO t3 VALUES(123);
+ SELECT * FROM t3;
+ }
+} {123}
+db close
+catch {file attributes test.db -permissions 0444}
+catch {file attributes test.db -readonly 1}
+sqlite db test.db
+do_test delete-8.1 {
+ catchsql {
+ DELETE FROM t3;
+ }
+} {1 {attempt to write a readonly database}}
+do_test delete-8.2 {
+ execsql {SELECT * FROM t3}
+} {123}
+do_test delete-8.3 {
+ catchsql {
+ DELETE FROM t3 WHERE 1;
+ }
+} {1 {attempt to write a readonly database}}
+do_test delete-8.4 {
+ execsql {SELECT * FROM t3}
+} {123}
+do_test delete-8.5 {
+ catchsql {
+ DELETE FROM t3 WHERE a<100;
+ }
+} {0 {}}
+do_test delete-8.6 {
+ execsql {SELECT * FROM t3}
+} {123}
+integrity_check delete-8.7
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/expr.test b/usr/src/cmd/svc/configd/sqlite/test/expr.test
new file mode 100644
index 0000000000..2f4fb26924
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/expr.test
@@ -0,0 +1,522 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing expressions.
+#
+# $Id: expr.test,v 1.31 2004/03/03 01:51:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table to work with.
+#
+execsql {CREATE TABLE test1(i1 int, i2 int, r1 real, r2 real, t1 text, t2 text)}
+execsql {INSERT INTO test1 VALUES(1,2,1.1,2.2,'hello','world')}
+proc test_expr {name settings expr result} {
+ do_test $name [format {
+ execsql {BEGIN; UPDATE test1 SET %s; SELECT %s FROM test1; ROLLBACK;}
+ } $settings $expr] $result
+}
+
+test_expr expr-1.1 {i1=10, i2=20} {i1+i2} 30
+test_expr expr-1.2 {i1=10, i2=20} {i1-i2} -10
+test_expr expr-1.3 {i1=10, i2=20} {i1*i2} 200
+test_expr expr-1.4 {i1=10, i2=20} {i1/i2} 0.5
+test_expr expr-1.5 {i1=10, i2=20} {i2/i1} 2
+test_expr expr-1.6 {i1=10, i2=20} {i2<i1} 0
+test_expr expr-1.7 {i1=10, i2=20} {i2<=i1} 0
+test_expr expr-1.8 {i1=10, i2=20} {i2>i1} 1
+test_expr expr-1.9 {i1=10, i2=20} {i2>=i1} 1
+test_expr expr-1.10 {i1=10, i2=20} {i2!=i1} 1
+test_expr expr-1.11 {i1=10, i2=20} {i2=i1} 0
+test_expr expr-1.12 {i1=10, i2=20} {i2<>i1} 1
+test_expr expr-1.13 {i1=10, i2=20} {i2==i1} 0
+test_expr expr-1.14 {i1=20, i2=20} {i2<i1} 0
+test_expr expr-1.15 {i1=20, i2=20} {i2<=i1} 1
+test_expr expr-1.16 {i1=20, i2=20} {i2>i1} 0
+test_expr expr-1.17 {i1=20, i2=20} {i2>=i1} 1
+test_expr expr-1.18 {i1=20, i2=20} {i2!=i1} 0
+test_expr expr-1.19 {i1=20, i2=20} {i2=i1} 1
+test_expr expr-1.20 {i1=20, i2=20} {i2<>i1} 0
+test_expr expr-1.21 {i1=20, i2=20} {i2==i1} 1
+test_expr expr-1.22 {i1=1, i2=2, r1=3.0} {i1+i2*r1} {7}
+test_expr expr-1.23 {i1=1, i2=2, r1=3.0} {(i1+i2)*r1} {9}
+test_expr expr-1.24 {i1=1, i2=2} {min(i1,i2,i1+i2,i1-i2)} {-1}
+test_expr expr-1.25 {i1=1, i2=2} {max(i1,i2,i1+i2,i1-i2)} {3}
+test_expr expr-1.26 {i1=1, i2=2} {max(i1,i2,i1+i2,i1-i2)} {3}
+test_expr expr-1.27 {i1=1, i2=2} {i1==1 AND i2=2} {1}
+test_expr expr-1.28 {i1=1, i2=2} {i1=2 AND i2=1} {0}
+test_expr expr-1.29 {i1=1, i2=2} {i1=1 AND i2=1} {0}
+test_expr expr-1.30 {i1=1, i2=2} {i1=2 AND i2=2} {0}
+test_expr expr-1.31 {i1=1, i2=2} {i1==1 OR i2=2} {1}
+test_expr expr-1.32 {i1=1, i2=2} {i1=2 OR i2=1} {0}
+test_expr expr-1.33 {i1=1, i2=2} {i1=1 OR i2=1} {1}
+test_expr expr-1.34 {i1=1, i2=2} {i1=2 OR i2=2} {1}
+test_expr expr-1.35 {i1=1, i2=2} {i1-i2=-1} {1}
+test_expr expr-1.36 {i1=1, i2=0} {not i1} {0}
+test_expr expr-1.37 {i1=1, i2=0} {not i2} {1}
+test_expr expr-1.38 {i1=1} {-i1} {-1}
+test_expr expr-1.39 {i1=1} {+i1} {1}
+test_expr expr-1.40 {i1=1, i2=2} {+(i2+i1)} {3}
+test_expr expr-1.41 {i1=1, i2=2} {-(i2+i1)} {-3}
+test_expr expr-1.42 {i1=1, i2=2} {i1|i2} {3}
+test_expr expr-1.42b {i1=1, i2=2} {4|2} {6}
+test_expr expr-1.43 {i1=1, i2=2} {i1&i2} {0}
+test_expr expr-1.43b {i1=1, i2=2} {4&5} {4}
+test_expr expr-1.44 {i1=1} {~i1} {-2}
+test_expr expr-1.45 {i1=1, i2=3} {i1<<i2} {8}
+test_expr expr-1.46 {i1=32, i2=3} {i1>>i2} {4}
+test_expr expr-1.47 {i1=9999999999, i2=8888888888} {i1<i2} 0
+test_expr expr-1.48 {i1=9999999999, i2=8888888888} {i1=i2} 0
+test_expr expr-1.49 {i1=9999999999, i2=8888888888} {i1>i2} 1
+test_expr expr-1.50 {i1=99999999999, i2=99999999998} {i1<i2} 0
+test_expr expr-1.51 {i1=99999999999, i2=99999999998} {i1=i2} 0
+test_expr expr-1.52 {i1=99999999999, i2=99999999998} {i1>i2} 1
+test_expr expr-1.53 {i1=099999999999, i2=99999999999} {i1<i2} 0
+test_expr expr-1.54 {i1=099999999999, i2=99999999999} {i1=i2} 1
+test_expr expr-1.55 {i1=099999999999, i2=99999999999} {i1>i2} 0
+test_expr expr-1.56 {i1=25, i2=11} {i1%i2} 3
+test_expr expr-1.58 {i1=NULL, i2=1} {coalesce(i1+i2,99)} 99
+test_expr expr-1.59 {i1=1, i2=NULL} {coalesce(i1+i2,99)} 99
+test_expr expr-1.60 {i1=NULL, i2=NULL} {coalesce(i1+i2,99)} 99
+test_expr expr-1.61 {i1=NULL, i2=1} {coalesce(i1-i2,99)} 99
+test_expr expr-1.62 {i1=1, i2=NULL} {coalesce(i1-i2,99)} 99
+test_expr expr-1.63 {i1=NULL, i2=NULL} {coalesce(i1-i2,99)} 99
+test_expr expr-1.64 {i1=NULL, i2=1} {coalesce(i1*i2,99)} 99
+test_expr expr-1.65 {i1=1, i2=NULL} {coalesce(i1*i2,99)} 99
+test_expr expr-1.66 {i1=NULL, i2=NULL} {coalesce(i1*i2,99)} 99
+test_expr expr-1.67 {i1=NULL, i2=1} {coalesce(i1/i2,99)} 99
+test_expr expr-1.68 {i1=1, i2=NULL} {coalesce(i1/i2,99)} 99
+test_expr expr-1.69 {i1=NULL, i2=NULL} {coalesce(i1/i2,99)} 99
+test_expr expr-1.70 {i1=NULL, i2=1} {coalesce(i1<i2,99)} 99
+test_expr expr-1.71 {i1=1, i2=NULL} {coalesce(i1>i2,99)} 99
+test_expr expr-1.72 {i1=NULL, i2=NULL} {coalesce(i1<=i2,99)} 99
+test_expr expr-1.73 {i1=NULL, i2=1} {coalesce(i1>=i2,99)} 99
+test_expr expr-1.74 {i1=1, i2=NULL} {coalesce(i1!=i2,99)} 99
+test_expr expr-1.75 {i1=NULL, i2=NULL} {coalesce(i1==i2,99)} 99
+test_expr expr-1.76 {i1=NULL, i2=NULL} {coalesce(not i1,99)} 99
+test_expr expr-1.77 {i1=NULL, i2=NULL} {coalesce(-i1,99)} 99
+test_expr expr-1.78 {i1=NULL, i2=NULL} {coalesce(i1 IS NULL AND i2=5,99)} 99
+test_expr expr-1.79 {i1=NULL, i2=NULL} {coalesce(i1 IS NULL OR i2=5,99)} 1
+test_expr expr-1.80 {i1=NULL, i2=NULL} {coalesce(i1=5 AND i2 IS NULL,99)} 99
+test_expr expr-1.81 {i1=NULL, i2=NULL} {coalesce(i1=5 OR i2 IS NULL,99)} 1
+test_expr expr-1.82 {i1=NULL, i2=3} {coalesce(min(i1,i2,1),99)} 99
+test_expr expr-1.83 {i1=NULL, i2=3} {coalesce(max(i1,i2,1),99)} 99
+test_expr expr-1.84 {i1=3, i2=NULL} {coalesce(min(i1,i2,1),99)} 99
+test_expr expr-1.85 {i1=3, i2=NULL} {coalesce(max(i1,i2,1),99)} 99
+test_expr expr-1.86 {i1=3, i2=8} {5 between i1 and i2} 1
+test_expr expr-1.87 {i1=3, i2=8} {5 not between i1 and i2} 0
+test_expr expr-1.88 {i1=3, i2=8} {55 between i1 and i2} 0
+test_expr expr-1.89 {i1=3, i2=8} {55 not between i1 and i2} 1
+test_expr expr-1.90 {i1=3, i2=NULL} {5 between i1 and i2} {{}}
+test_expr expr-1.91 {i1=3, i2=NULL} {5 not between i1 and i2} {{}}
+test_expr expr-1.92 {i1=3, i2=NULL} {2 between i1 and i2} 0
+test_expr expr-1.93 {i1=3, i2=NULL} {2 not between i1 and i2} 1
+test_expr expr-1.94 {i1=NULL, i2=8} {2 between i1 and i2} {{}}
+test_expr expr-1.95 {i1=NULL, i2=8} {2 not between i1 and i2} {{}}
+test_expr expr-1.94 {i1=NULL, i2=8} {55 between i1 and i2} 0
+test_expr expr-1.95 {i1=NULL, i2=8} {55 not between i1 and i2} 1
+test_expr expr-1.96 {i1=NULL, i2=3} {coalesce(i1<<i2,99)} 99
+test_expr expr-1.97 {i1=32, i2=NULL} {coalesce(i1>>i2,99)} 99
+test_expr expr-1.98 {i1=NULL, i2=NULL} {coalesce(i1|i2,99)} 99
+test_expr expr-1.99 {i1=32, i2=NULL} {coalesce(i1&i2,99)} 99
+test_expr expr-1.100 {i1=1, i2=''} {i1=i2} 0
+test_expr expr-1.101 {i1=0, i2=''} {i1=i2} 0
+
+test_expr expr-2.1 {r1=1.23, r2=2.34} {r1+r2} 3.57
+test_expr expr-2.2 {r1=1.23, r2=2.34} {r1-r2} -1.11
+test_expr expr-2.3 {r1=1.23, r2=2.34} {r1*r2} 2.8782
+test_expr expr-2.4 {r1=1.23, r2=2.34} {r1/r2} 0.525641025641026
+test_expr expr-2.5 {r1=1.23, r2=2.34} {r2/r1} 1.90243902439024
+test_expr expr-2.6 {r1=1.23, r2=2.34} {r2<r1} 0
+test_expr expr-2.7 {r1=1.23, r2=2.34} {r2<=r1} 0
+test_expr expr-2.8 {r1=1.23, r2=2.34} {r2>r1} 1
+test_expr expr-2.9 {r1=1.23, r2=2.34} {r2>=r1} 1
+test_expr expr-2.10 {r1=1.23, r2=2.34} {r2!=r1} 1
+test_expr expr-2.11 {r1=1.23, r2=2.34} {r2=r1} 0
+test_expr expr-2.12 {r1=1.23, r2=2.34} {r2<>r1} 1
+test_expr expr-2.13 {r1=1.23, r2=2.34} {r2==r1} 0
+test_expr expr-2.14 {r1=2.34, r2=2.34} {r2<r1} 0
+test_expr expr-2.15 {r1=2.34, r2=2.34} {r2<=r1} 1
+test_expr expr-2.16 {r1=2.34, r2=2.34} {r2>r1} 0
+test_expr expr-2.17 {r1=2.34, r2=2.34} {r2>=r1} 1
+test_expr expr-2.18 {r1=2.34, r2=2.34} {r2!=r1} 0
+test_expr expr-2.19 {r1=2.34, r2=2.34} {r2=r1} 1
+test_expr expr-2.20 {r1=2.34, r2=2.34} {r2<>r1} 0
+test_expr expr-2.21 {r1=2.34, r2=2.34} {r2==r1} 1
+test_expr expr-2.22 {r1=1.23, r2=2.34} {min(r1,r2,r1+r2,r1-r2)} {-1.11}
+test_expr expr-2.23 {r1=1.23, r2=2.34} {max(r1,r2,r1+r2,r1-r2)} {3.57}
+test_expr expr-2.24 {r1=25.0, r2=11.0} {r1%r2} 3
+test_expr expr-2.25 {r1=1.23, r2=NULL} {coalesce(r1+r2,99.0)} 99.0
+
+test_expr expr-3.1 {t1='abc', t2='xyz'} {t1<t2} 1
+test_expr expr-3.2 {t1='xyz', t2='abc'} {t1<t2} 0
+test_expr expr-3.3 {t1='abc', t2='abc'} {t1<t2} 0
+test_expr expr-3.4 {t1='abc', t2='xyz'} {t1<=t2} 1
+test_expr expr-3.5 {t1='xyz', t2='abc'} {t1<=t2} 0
+test_expr expr-3.6 {t1='abc', t2='abc'} {t1<=t2} 1
+test_expr expr-3.7 {t1='abc', t2='xyz'} {t1>t2} 0
+test_expr expr-3.8 {t1='xyz', t2='abc'} {t1>t2} 1
+test_expr expr-3.9 {t1='abc', t2='abc'} {t1>t2} 0
+test_expr expr-3.10 {t1='abc', t2='xyz'} {t1>=t2} 0
+test_expr expr-3.11 {t1='xyz', t2='abc'} {t1>=t2} 1
+test_expr expr-3.12 {t1='abc', t2='abc'} {t1>=t2} 1
+test_expr expr-3.13 {t1='abc', t2='xyz'} {t1=t2} 0
+test_expr expr-3.14 {t1='xyz', t2='abc'} {t1=t2} 0
+test_expr expr-3.15 {t1='abc', t2='abc'} {t1=t2} 1
+test_expr expr-3.16 {t1='abc', t2='xyz'} {t1==t2} 0
+test_expr expr-3.17 {t1='xyz', t2='abc'} {t1==t2} 0
+test_expr expr-3.18 {t1='abc', t2='abc'} {t1==t2} 1
+test_expr expr-3.19 {t1='abc', t2='xyz'} {t1<>t2} 1
+test_expr expr-3.20 {t1='xyz', t2='abc'} {t1<>t2} 1
+test_expr expr-3.21 {t1='abc', t2='abc'} {t1<>t2} 0
+test_expr expr-3.22 {t1='abc', t2='xyz'} {t1!=t2} 1
+test_expr expr-3.23 {t1='xyz', t2='abc'} {t1!=t2} 1
+test_expr expr-3.24 {t1='abc', t2='abc'} {t1!=t2} 0
+test_expr expr-3.25 {t1=NULL, t2='hi'} {t1 isnull} 1
+test_expr expr-3.25b {t1=NULL, t2='hi'} {t1 is null} 1
+test_expr expr-3.26 {t1=NULL, t2='hi'} {t2 isnull} 0
+test_expr expr-3.27 {t1=NULL, t2='hi'} {t1 notnull} 0
+test_expr expr-3.28 {t1=NULL, t2='hi'} {t2 notnull} 1
+test_expr expr-3.28b {t1=NULL, t2='hi'} {t2 is not null} 1
+test_expr expr-3.29 {t1='xyz', t2='abc'} {t1||t2} {xyzabc}
+test_expr expr-3.30 {t1=NULL, t2='abc'} {t1||t2} {{}}
+test_expr expr-3.31 {t1='xyz', t2=NULL} {t1||t2} {{}}
+test_expr expr-3.32 {t1='xyz', t2='abc'} {t1||' hi '||t2} {{xyz hi abc}}
+test_expr epxr-3.33 {t1='abc', t2=NULL} {coalesce(t1<t2,99)} 99
+test_expr epxr-3.34 {t1='abc', t2=NULL} {coalesce(t2<t1,99)} 99
+test_expr epxr-3.35 {t1='abc', t2=NULL} {coalesce(t1>t2,99)} 99
+test_expr epxr-3.36 {t1='abc', t2=NULL} {coalesce(t2>t1,99)} 99
+test_expr epxr-3.37 {t1='abc', t2=NULL} {coalesce(t1<=t2,99)} 99
+test_expr epxr-3.38 {t1='abc', t2=NULL} {coalesce(t2<=t1,99)} 99
+test_expr epxr-3.39 {t1='abc', t2=NULL} {coalesce(t1>=t2,99)} 99
+test_expr epxr-3.40 {t1='abc', t2=NULL} {coalesce(t2>=t1,99)} 99
+test_expr epxr-3.41 {t1='abc', t2=NULL} {coalesce(t1==t2,99)} 99
+test_expr epxr-3.42 {t1='abc', t2=NULL} {coalesce(t2==t1,99)} 99
+test_expr epxr-3.43 {t1='abc', t2=NULL} {coalesce(t1!=t2,99)} 99
+test_expr epxr-3.44 {t1='abc', t2=NULL} {coalesce(t2!=t1,99)} 99
+
+
+test_expr expr-4.1 {t1='abc', t2='Abc'} {t1<t2} 0
+test_expr expr-4.2 {t1='abc', t2='Abc'} {t1>t2} 1
+test_expr expr-4.3 {t1='abc', t2='Bbc'} {t1<t2} 0
+test_expr expr-4.4 {t1='abc', t2='Bbc'} {t1>t2} 1
+test_expr expr-4.5 {t1='0', t2='0.0'} {t1==t2} 0
+test_expr expr-4.6 {t1='0.000', t2='0.0'} {t1==t2} 0
+test_expr expr-4.7 {t1=' 0.000', t2=' 0.0'} {t1==t2} 0
+test_expr expr-4.8 {t1='0.0', t2='abc'} {t1<t2} 1
+test_expr expr-4.9 {t1='0.0', t2='abc'} {t1==t2} 0
+test_expr expr-4.10 {r1='0.0', r2='abc'} {r1>r2} 0
+test_expr expr-4.11 {r1='abc', r2='Abc'} {r1<r2} 0
+test_expr expr-4.12 {r1='abc', r2='Abc'} {r1>r2} 1
+test_expr expr-4.13 {r1='abc', r2='Bbc'} {r1<r2} 0
+test_expr expr-4.14 {r1='abc', r2='Bbc'} {r1>r2} 1
+test_expr expr-4.15 {r1='0', r2='0.0'} {r1==r2} 1
+test_expr expr-4.16 {r1='0.000', r2='0.0'} {r1==r2} 1
+test_expr expr-4.17 {r1=' 0.000', r2=' 0.0'} {r1==r2} 0
+test_expr expr-4.18 {r1='0.0', r2='abc'} {r1<r2} 1
+test_expr expr-4.19 {r1='0.0', r2='abc'} {r1==r2} 0
+test_expr expr-4.20 {r1='0.0', r2='abc'} {r1>r2} 0
+
+test_expr expr-5.1 {t1='abc', t2='xyz'} {t1 LIKE t2} 0
+test_expr expr-5.2 {t1='abc', t2='ABC'} {t1 LIKE t2} 1
+test_expr expr-5.3 {t1='abc', t2='A_C'} {t1 LIKE t2} 1
+test_expr expr-5.4 {t1='abc', t2='abc_'} {t1 LIKE t2} 0
+test_expr expr-5.5 {t1='abc', t2='A%C'} {t1 LIKE t2} 1
+test_expr expr-5.5b {t1='ac', t2='A%C'} {t1 LIKE t2} 1
+test_expr expr-5.6 {t1='abxyzzyc', t2='A%C'} {t1 LIKE t2} 1
+test_expr expr-5.7 {t1='abxyzzy', t2='A%C'} {t1 LIKE t2} 0
+test_expr expr-5.8 {t1='abxyzzycx', t2='A%C'} {t1 LIKE t2} 0
+test_expr expr-5.8b {t1='abxyzzycy', t2='A%CX'} {t1 LIKE t2} 0
+test_expr expr-5.9 {t1='abc', t2='A%_C'} {t1 LIKE t2} 1
+test_expr expr-5.9b {t1='ac', t2='A%_C'} {t1 LIKE t2} 0
+test_expr expr-5.10 {t1='abxyzzyc', t2='A%_C'} {t1 LIKE t2} 1
+test_expr expr-5.11 {t1='abc', t2='xyz'} {t1 NOT LIKE t2} 1
+test_expr expr-5.12 {t1='abc', t2='ABC'} {t1 NOT LIKE t2} 0
+
+# The following tests only work on versions of TCL that support
+# Unicode and SQLite configured for UTF-8 support.
+#
+if {"\u1234"!="u1234" && [sqlite -encoding]=="UTF-8"} {
+ test_expr expr-5.13 "t1='a\u0080c', t2='A_C'" {t1 LIKE t2} 1
+ test_expr expr-5.14 "t1='a\u07FFc', t2='A_C'" {t1 LIKE t2} 1
+ test_expr expr-5.15 "t1='a\u0800c', t2='A_C'" {t1 LIKE t2} 1
+ test_expr expr-5.16 "t1='a\uFFFFc', t2='A_C'" {t1 LIKE t2} 1
+ test_expr expr-5.17 "t1='a\u0080', t2='A__'" {t1 LIKE t2} 0
+ test_expr expr-5.18 "t1='a\u07FF', t2='A__'" {t1 LIKE t2} 0
+ test_expr expr-5.19 "t1='a\u0800', t2='A__'" {t1 LIKE t2} 0
+ test_expr expr-5.20 "t1='a\uFFFF', t2='A__'" {t1 LIKE t2} 0
+ test_expr expr-5.21 "t1='ax\uABCD', t2='A_\uABCD'" {t1 LIKE t2} 1
+ test_expr expr-5.22 "t1='ax\u1234', t2='A%\u1234'" {t1 LIKE t2} 1
+ test_expr expr-5.23 "t1='ax\uFEDC', t2='A_%'" {t1 LIKE t2} 1
+ test_expr expr-5.24 "t1='ax\uFEDCy\uFEDC', t2='A%\uFEDC'" {t1 LIKE t2} 1
+}
+
+# Theses tests are for when SQLite assumes iso8859 characters.
+#
+if {[sqlite -encoding]=="iso8859"} {
+ set go 1
+ if {[info command encoding]!=""} {
+ if {[catch {encoding system iso8859-1} msg]} {
+ puts "skipping tests of LIKE operator: $msg"
+ set go 0
+ }
+ }
+ if {$go} {
+ test_expr expr-5.50 "t1='a\266c', t2='A_C'" {t1 LIKE t2} 1
+ test_expr expr-5.51 "t1='a\347', t2='A_'" {t1 LIKE t2} 1
+ test_expr expr-5.52 "t1='ax\351', t2='A_\351'" {t1 LIKE t2} 1
+ test_expr expr-5.53 "t1='ax\241', t2='A_%'" {t1 LIKE t2} 1
+ }
+}
+test_expr expr-5.54 {t1='abc', t2=NULL} {t1 LIKE t2} {{}}
+test_expr expr-5.55 {t1='abc', t2=NULL} {t1 NOT LIKE t2} {{}}
+test_expr expr-5.56 {t1='abc', t2=NULL} {t2 LIKE t1} {{}}
+test_expr expr-5.57 {t1='abc', t2=NULL} {t2 NOT LIKE t1} {{}}
+
+
+test_expr expr-6.1 {t1='abc', t2='xyz'} {t1 GLOB t2} 0
+test_expr expr-6.2 {t1='abc', t2='ABC'} {t1 GLOB t2} 0
+test_expr expr-6.3 {t1='abc', t2='A?C'} {t1 GLOB t2} 0
+test_expr expr-6.4 {t1='abc', t2='a?c'} {t1 GLOB t2} 1
+test_expr expr-6.5 {t1='abc', t2='abc?'} {t1 GLOB t2} 0
+test_expr expr-6.6 {t1='abc', t2='A*C'} {t1 GLOB t2} 0
+test_expr expr-6.7 {t1='abc', t2='a*c'} {t1 GLOB t2} 1
+test_expr expr-6.8 {t1='abxyzzyc', t2='a*c'} {t1 GLOB t2} 1
+test_expr expr-6.9 {t1='abxyzzy', t2='a*c'} {t1 GLOB t2} 0
+test_expr expr-6.10 {t1='abxyzzycx', t2='a*c'} {t1 GLOB t2} 0
+test_expr expr-6.11 {t1='abc', t2='xyz'} {t1 NOT GLOB t2} 1
+test_expr expr-6.12 {t1='abc', t2='abc'} {t1 NOT GLOB t2} 0
+test_expr expr-6.13 {t1='abc', t2='a[bx]c'} {t1 GLOB t2} 1
+test_expr expr-6.14 {t1='abc', t2='a[cx]c'} {t1 GLOB t2} 0
+test_expr expr-6.15 {t1='abc', t2='a[a-d]c'} {t1 GLOB t2} 1
+test_expr expr-6.16 {t1='abc', t2='a[^a-d]c'} {t1 GLOB t2} 0
+test_expr expr-6.17 {t1='abc', t2='a[A-Dc]c'} {t1 GLOB t2} 0
+test_expr expr-6.18 {t1='abc', t2='a[^A-Dc]c'} {t1 GLOB t2} 1
+test_expr expr-6.19 {t1='abc', t2='a[]b]c'} {t1 GLOB t2} 1
+test_expr expr-6.20 {t1='abc', t2='a[^]b]c'} {t1 GLOB t2} 0
+test_expr expr-6.21a {t1='abcdefg', t2='a*[de]g'} {t1 GLOB t2} 0
+test_expr expr-6.21b {t1='abcdefg', t2='a*[df]g'} {t1 GLOB t2} 1
+test_expr expr-6.21c {t1='abcdefg', t2='a*[d-h]g'} {t1 GLOB t2} 1
+test_expr expr-6.21d {t1='abcdefg', t2='a*[b-e]g'} {t1 GLOB t2} 0
+test_expr expr-6.22a {t1='abcdefg', t2='a*[^de]g'} {t1 GLOB t2} 1
+test_expr expr-6.22b {t1='abcdefg', t2='a*[^def]g'} {t1 GLOB t2} 0
+test_expr expr-6.23 {t1='abcdefg', t2='a*?g'} {t1 GLOB t2} 1
+test_expr expr-6.24 {t1='ac', t2='a*c'} {t1 GLOB t2} 1
+test_expr expr-6.25 {t1='ac', t2='a*?c'} {t1 GLOB t2} 0
+
+
+# These tests only work on versions of TCL that support Unicode
+#
+if {"\u1234"!="u1234" && [sqlite -encoding]=="UTF-8"} {
+ test_expr expr-6.26 "t1='a\u0080c', t2='a?c'" {t1 GLOB t2} 1
+ test_expr expr-6.27 "t1='a\u07ffc', t2='a?c'" {t1 GLOB t2} 1
+ test_expr expr-6.28 "t1='a\u0800c', t2='a?c'" {t1 GLOB t2} 1
+ test_expr expr-6.29 "t1='a\uffffc', t2='a?c'" {t1 GLOB t2} 1
+ test_expr expr-6.30 "t1='a\u1234', t2='a?'" {t1 GLOB t2} 1
+ test_expr expr-6.31 "t1='a\u1234', t2='a??'" {t1 GLOB t2} 0
+ test_expr expr-6.32 "t1='ax\u1234', t2='a?\u1234'" {t1 GLOB t2} 1
+ test_expr expr-6.33 "t1='ax\u1234', t2='a*\u1234'" {t1 GLOB t2} 1
+ test_expr expr-6.34 "t1='ax\u1234y\u1234', t2='a*\u1234'" {t1 GLOB t2} 1
+ test_expr expr-6.35 "t1='a\u1234b', t2='a\[x\u1234y\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.36 "t1='a\u1234b', t2='a\[\u1233-\u1235\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.37 "t1='a\u1234b', t2='a\[\u1234-\u124f\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.38 "t1='a\u1234b', t2='a\[\u1235-\u124f\]b'" {t1 GLOB t2} 0
+ test_expr expr-6.39 "t1='a\u1234b', t2='a\[a-\u1235\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.40 "t1='a\u1234b', t2='a\[a-\u1234\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.41 "t1='a\u1234b', t2='a\[a-\u1233\]b'" {t1 GLOB t2} 0
+}
+
+test_expr expr-6.51 {t1='ABC', t2='xyz'} {t1 GLOB t2} 0
+test_expr expr-6.52 {t1='ABC', t2='abc'} {t1 GLOB t2} 0
+test_expr expr-6.53 {t1='ABC', t2='a?c'} {t1 GLOB t2} 0
+test_expr expr-6.54 {t1='ABC', t2='A?C'} {t1 GLOB t2} 1
+test_expr expr-6.55 {t1='ABC', t2='abc?'} {t1 GLOB t2} 0
+test_expr expr-6.56 {t1='ABC', t2='a*c'} {t1 GLOB t2} 0
+test_expr expr-6.57 {t1='ABC', t2='A*C'} {t1 GLOB t2} 1
+test_expr expr-6.58 {t1='ABxyzzyC', t2='A*C'} {t1 GLOB t2} 1
+test_expr expr-6.59 {t1='ABxyzzy', t2='A*C'} {t1 GLOB t2} 0
+test_expr expr-6.60 {t1='ABxyzzyCx', t2='A*C'} {t1 GLOB t2} 0
+test_expr expr-6.61 {t1='ABC', t2='xyz'} {t1 NOT GLOB t2} 1
+test_expr expr-6.62 {t1='ABC', t2='ABC'} {t1 NOT GLOB t2} 0
+test_expr expr-6.63 {t1='ABC', t2='A[Bx]C'} {t1 GLOB t2} 1
+test_expr expr-6.64 {t1='ABC', t2='A[Cx]C'} {t1 GLOB t2} 0
+test_expr expr-6.65 {t1='ABC', t2='A[A-D]C'} {t1 GLOB t2} 1
+test_expr expr-6.66 {t1='ABC', t2='A[^A-D]C'} {t1 GLOB t2} 0
+test_expr expr-6.67 {t1='ABC', t2='A[a-dC]C'} {t1 GLOB t2} 0
+test_expr expr-6.68 {t1='ABC', t2='A[^a-dC]C'} {t1 GLOB t2} 1
+test_expr expr-6.69a {t1='ABC', t2='A[]B]C'} {t1 GLOB t2} 1
+test_expr expr-6.69b {t1='A]C', t2='A[]B]C'} {t1 GLOB t2} 1
+test_expr expr-6.70a {t1='ABC', t2='A[^]B]C'} {t1 GLOB t2} 0
+test_expr expr-6.70b {t1='AxC', t2='A[^]B]C'} {t1 GLOB t2} 1
+test_expr expr-6.70c {t1='A]C', t2='A[^]B]C'} {t1 GLOB t2} 0
+test_expr expr-6.71 {t1='ABCDEFG', t2='A*[DE]G'} {t1 GLOB t2} 0
+test_expr expr-6.72 {t1='ABCDEFG', t2='A*[^DE]G'} {t1 GLOB t2} 1
+test_expr expr-6.73 {t1='ABCDEFG', t2='A*?G'} {t1 GLOB t2} 1
+test_expr expr-6.74 {t1='AC', t2='A*C'} {t1 GLOB t2} 1
+test_expr expr-6.75 {t1='AC', t2='A*?C'} {t1 GLOB t2} 0
+
+# Theses tests are for when SQLite assumes iso8859 characters.
+#
+if {[sqlite -encoding]=="iso8859"} {
+ set go 1
+ if {[info command encoding]!=""} {
+ if {[catch {encoding system iso8859-1} msg]} {
+ puts "skipping tests of GLOB operator: $msg"
+ set go 0
+ }
+ }
+ if {$go} {
+ test_expr expr-6.50 "t1='a\266c', t2='a?c'" {t1 GLOB t2} 1
+ test_expr expr-6.51 "t1='a\266', t2='a?'" {t1 GLOB t2} 1
+ test_expr expr-6.52 "t1='a\266', t2='a??'" {t1 GLOB t2} 0
+ test_expr expr-6.53 "t1='ax\266', t2='a??'" {t1 GLOB t2} 1
+ test_expr expr-6.54 "t1='ax\266', t2='a?\266'" {t1 GLOB t2} 1
+ test_expr expr-6.55 "t1='ax\266y\266', t2='a*\266'" {t1 GLOB t2} 1
+ test_expr expr-6.56 "t1='a\266b', t2='a\[x\266y\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.57 "t1='a\266b', t2='a\[\260-\270\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.58 "t1='a\266b', t2='a\[\266-\270\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.59 "t1='a\266b', t2='a\[\267-\270\]b'" {t1 GLOB t2} 0
+ test_expr expr-6.60 "t1='a\266b', t2='a\[x-\267\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.61 "t1='a\266b', t2='a\[x-\266\]b'" {t1 GLOB t2} 1
+ test_expr expr-6.62 "t1='a\266b', t2='a\[x-\265\]b'" {t1 GLOB t2} 0
+ }
+}
+test_expr expr-6.63 {t1=NULL, t2='a*?c'} {t1 GLOB t2} {{}}
+test_expr expr-6.64 {t1='ac', t2=NULL} {t1 GLOB t2} {{}}
+test_expr expr-6.65 {t1=NULL, t2='a*?c'} {t1 NOT GLOB t2} {{}}
+test_expr expr-6.66 {t1='ac', t2=NULL} {t1 NOT GLOB t2} {{}}
+
+test_expr expr-case.1 {i1=1, i2=2} \
+ {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} ne
+test_expr expr-case.2 {i1=2, i2=2} \
+ {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} eq
+test_expr expr-case.3 {i1=NULL, i2=2} \
+ {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} ne
+test_expr expr-case.4 {i1=2, i2=NULL} \
+ {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} ne
+test_expr expr-case.5 {i1=2} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'error' END} two
+test_expr expr-case.6 {i1=1} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN NULL THEN 'two' ELSE 'error' END} one
+test_expr expr-case.7 {i1=2} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN NULL THEN 'two' ELSE 'error' END} error
+test_expr expr-case.8 {i1=3} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN NULL THEN 'two' ELSE 'error' END} error
+test_expr expr-case.9 {i1=3} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'error' END} error
+test_expr expr-case.10 {i1=3} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' END} {{}}
+test_expr expr-case.11 {i1=null} \
+ {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 3 END} 3
+test_expr expr-case.12 {i1=1} \
+ {CASE i1 WHEN 1 THEN null WHEN 2 THEN 'two' ELSE 3 END} {{}}
+test_expr expr-case.13 {i1=7} \
+ { CASE WHEN i1 < 5 THEN 'low'
+ WHEN i1 < 10 THEN 'medium'
+ WHEN i1 < 15 THEN 'high' ELSE 'error' END} medium
+
+
+# The sqliteExprIfFalse and sqliteExprIfTrue routines are only
+# executed as part of a WHERE clause. Create a table suitable
+# for testing these functions.
+#
+execsql {DROP TABLE test1}
+execsql {CREATE TABLE test1(a int, b int);}
+for {set i 1} {$i<=20} {incr i} {
+ execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])"
+}
+execsql "INSERT INTO test1 VALUES(NULL,0)"
+do_test expr-7.1 {
+ execsql {SELECT * FROM test1 ORDER BY a}
+} {{} 0 1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024 11 2048 12 4096 13 8192 14 16384 15 32768 16 65536 17 131072 18 262144 19 524288 20 1048576}
+
+proc test_expr2 {name expr result} {
+ do_test $name [format {
+ execsql {SELECT a FROM test1 WHERE %s ORDER BY a}
+ } $expr] $result
+}
+
+test_expr2 expr-7.2 {a<10 AND a>8} {9}
+test_expr2 expr-7.3 {a<=10 AND a>=8} {8 9 10}
+test_expr2 expr-7.4 {a>=8 AND a<=10} {8 9 10}
+test_expr2 expr-7.5 {a>=20 OR a<=1} {1 20}
+test_expr2 expr-7.6 {b!=4 AND a<=3} {1 3}
+test_expr2 expr-7.7 {b==8 OR b==16 OR b==32} {3 4 5}
+test_expr2 expr-7.8 {NOT b<>8 OR b==1024} {3 10}
+test_expr2 expr-7.9 {b LIKE '10%'} {10 20}
+test_expr2 expr-7.10 {b LIKE '_4'} {6}
+test_expr2 expr-7.11 {a GLOB '1?'} {10 11 12 13 14 15 16 17 18 19}
+test_expr2 expr-7.12 {b GLOB '1*4'} {10 14}
+test_expr2 expr-7.13 {b GLOB '*1[456]'} {4}
+test_expr2 expr-7.14 {a ISNULL} {{}}
+test_expr2 expr-7.15 {a NOTNULL AND a<3} {1 2}
+test_expr2 expr-7.16 {a AND a<3} {1 2}
+test_expr2 expr-7.17 {NOT a} {}
+test_expr2 expr-7.18 {a==11 OR (b>1000 AND b<2000)} {10 11}
+test_expr2 expr-7.19 {a<=1 OR a>=20} {1 20}
+test_expr2 expr-7.20 {a<1 OR a>20} {}
+test_expr2 expr-7.21 {a>19 OR a<1} {20}
+test_expr2 expr-7.22 {a!=1 OR a=100} \
+ {2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
+test_expr2 expr-7.23 {(a notnull AND a<4) OR a==8} {1 2 3 8}
+test_expr2 expr-7.24 {a LIKE '2_' OR a==8} {8 20}
+test_expr2 expr-7.25 {a GLOB '2?' OR a==8} {8 20}
+test_expr2 expr-7.26 {a isnull OR a=8} {{} 8}
+test_expr2 expr-7.27 {a notnull OR a=8} \
+ {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
+test_expr2 expr-7.28 {a<0 OR b=0} {{}}
+test_expr2 expr-7.29 {b=0 OR a<0} {{}}
+test_expr2 expr-7.30 {a<0 AND b=0} {}
+test_expr2 expr-7.31 {b=0 AND a<0} {}
+test_expr2 expr-7.32 {a IS NULL AND (a<0 OR b=0)} {{}}
+test_expr2 expr-7.33 {a IS NULL AND (b=0 OR a<0)} {{}}
+test_expr2 expr-7.34 {a IS NULL AND (a<0 AND b=0)} {}
+test_expr2 expr-7.35 {a IS NULL AND (b=0 AND a<0)} {}
+test_expr2 expr-7.32 {(a<0 OR b=0) AND a IS NULL} {{}}
+test_expr2 expr-7.33 {(b=0 OR a<0) AND a IS NULL} {{}}
+test_expr2 expr-7.34 {(a<0 AND b=0) AND a IS NULL} {}
+test_expr2 expr-7.35 {(b=0 AND a<0) AND a IS NULL} {}
+test_expr2 expr-7.36 {a<2 OR (a<0 OR b=0)} {{} 1}
+test_expr2 expr-7.37 {a<2 OR (b=0 OR a<0)} {{} 1}
+test_expr2 expr-7.38 {a<2 OR (a<0 AND b=0)} {1}
+test_expr2 expr-7.39 {a<2 OR (b=0 AND a<0)} {1}
+test_expr2 expr-7.40 {((a<2 OR a IS NULL) AND b<3) OR b>1e10} {{} 1}
+test_expr2 expr-7.41 {a BETWEEN -1 AND 1} {1}
+test_expr2 expr-7.42 {a NOT BETWEEN 2 AND 100} {1}
+test_expr2 expr-7.43 {(b+1234)||'this is a string that is at least 32 characters long' BETWEEN 1 AND 2} {}
+test_expr2 expr-7.44 {123||'xabcdefghijklmnopqrstuvwyxz01234567890'||a BETWEEN '123a' AND '123b'} {}
+test_expr2 expr-7.45 {((123||'xabcdefghijklmnopqrstuvwyxz01234567890'||a) BETWEEN '123a' AND '123b')<0} {}
+test_expr2 expr-7.46 {((123||'xabcdefghijklmnopqrstuvwyxz01234567890'||a) BETWEEN '123a' AND '123z')>0} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
+
+test_expr2 expr-7.50 {((a between 1 and 2 OR 0) AND 1) OR 0} {1 2}
+test_expr2 expr-7.51 {((a not between 3 and 100 OR 0) AND 1) OR 0} {1 2}
+test_expr2 expr-7.52 {((a in (1,2) OR 0) AND 1) OR 0} {1 2}
+test_expr2 expr-7.53 {((a not in (3,4,5,6,7,8,9,10) OR 0) AND a<11) OR 0} {1 2}
+test_expr2 expr-7.54 {((a>0 OR 0) AND a<3) OR 0} {1 2}
+test_expr2 expr-7.55 {((a in (1,2) OR 0) IS NULL AND 1) OR 0} {{}}
+test_expr2 expr-7.56 {((a not in (3,4,5,6,7,8,9,10) IS NULL OR 0) AND 1) OR 0} \
+ {{}}
+test_expr2 expr-7.57 {((a>0 IS NULL OR 0) AND 1) OR 0} {{}}
+
+test_expr2 expr-7.58 {(a||'')<='1'} {1}
+
+test_expr2 expr-7.59 {LIKE('10%',b)} {10 20}
+test_expr2 expr-7.60 {LIKE('_4',b)} {6}
+test_expr2 expr-7.61 {GLOB('1?',a)} {10 11 12 13 14 15 16 17 18 19}
+test_expr2 expr-7.62 {GLOB('1*4',b)} {10 14}
+test_expr2 expr-7.63 {GLOB('*1[456]',b)} {4}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/fkey1.test b/usr/src/cmd/svc/configd/sqlite/test/fkey1.test
new file mode 100644
index 0000000000..4e0b4d6400
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/fkey1.test
@@ -0,0 +1,56 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for foreign keys.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table and some data to work with.
+#
+do_test fkey1-1.0 {
+ execsql {
+ CREATE TABLE t1(
+ a INTEGER PRIMARY KEY,
+ b INTEGER
+ REFERENCES t1 ON DELETE CASCADE
+ REFERENCES t2,
+ c TEXT,
+ FOREIGN KEY (b,c) REFERENCES t2(x,y) ON UPDATE CASCADE
+ );
+ }
+} {}
+do_test fkey1-1.1 {
+ execsql {
+ CREATE TABLE t2(
+ x INTEGER PRIMARY KEY,
+ y TEXT
+ );
+ }
+} {}
+do_test fkey1-1.2 {
+ execsql {
+ CREATE TABLE t3(
+ a INTEGER REFERENCES t2,
+ b INTEGER REFERENCES t1,
+ FOREIGN KEY (a,b) REFERENCES t2(x,y)
+ );
+ }
+} {}
+
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/format3.test b/usr/src/cmd/svc/configd/sqlite/test/format3.test
new file mode 100644
index 0000000000..bcad7d957e
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/format3.test
@@ -0,0 +1,741 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the the library is able to correctly
+# handle file-format 3 (version 2.6.x) databases.
+#
+# $Id: format3.test,v 1.4 2003/12/23 02:17:35 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a bunch of data to sort against
+#
+do_test format3-1.0 {
+ set fd [open data.txt w]
+ puts $fd "1\tone\t0\tI\t3.141592653"
+ puts $fd "2\ttwo\t1\tII\t2.15"
+ puts $fd "3\tthree\t1\tIII\t4221.0"
+ puts $fd "4\tfour\t2\tIV\t-0.0013442"
+ puts $fd "5\tfive\t2\tV\t-11"
+ puts $fd "6\tsix\t2\tVI\t0.123"
+ puts $fd "7\tseven\t2\tVII\t123.0"
+ puts $fd "8\teight\t3\tVIII\t-1.6"
+ close $fd
+ execsql {
+ CREATE TABLE t1(
+ n int,
+ v varchar(10),
+ log int,
+ roman varchar(10),
+ flt real
+ );
+ COPY t1 FROM 'data.txt'
+ }
+ file delete data.txt
+ db close
+ set ::bt [btree_open test.db]
+ btree_begin_transaction $::bt
+ set m [btree_get_meta $::bt]
+ set m [lreplace $m 2 2 3]
+ eval btree_update_meta $::bt $m
+ btree_commit $::bt
+ btree_close $::bt
+ sqlite db test.db
+ execsql {SELECT count(*) FROM t1}
+} {8}
+
+do_test format3-1.1 {
+ execsql {SELECT n FROM t1 ORDER BY n}
+} {1 2 3 4 5 6 7 8}
+do_test format3-1.1.1 {
+ execsql {SELECT n FROM t1 ORDER BY n ASC}
+} {1 2 3 4 5 6 7 8}
+do_test format3-1.1.1 {
+ execsql {SELECT ALL n FROM t1 ORDER BY n ASC}
+} {1 2 3 4 5 6 7 8}
+do_test format3-1.2 {
+ execsql {SELECT n FROM t1 ORDER BY n DESC}
+} {8 7 6 5 4 3 2 1}
+do_test format3-1.3a {
+ execsql {SELECT v FROM t1 ORDER BY v}
+} {eight five four one seven six three two}
+do_test format3-1.3b {
+ execsql {SELECT n FROM t1 ORDER BY v}
+} {8 5 4 1 7 6 3 2}
+do_test format3-1.4 {
+ execsql {SELECT n FROM t1 ORDER BY v DESC}
+} {2 3 6 7 1 4 5 8}
+do_test format3-1.5 {
+ execsql {SELECT flt FROM t1 ORDER BY flt}
+} {-11 -1.6 -0.0013442 0.123 2.15 3.141592653 123.0 4221.0}
+do_test format3-1.6 {
+ execsql {SELECT flt FROM t1 ORDER BY flt DESC}
+} {4221.0 123.0 3.141592653 2.15 0.123 -0.0013442 -1.6 -11}
+do_test format3-1.7 {
+ execsql {SELECT roman FROM t1 ORDER BY roman}
+} {I II III IV V VI VII VIII}
+do_test format3-1.8 {
+ execsql {SELECT n FROM t1 ORDER BY log, flt}
+} {1 2 3 5 4 6 7 8}
+do_test format3-1.8.1 {
+ execsql {SELECT n FROM t1 ORDER BY log asc, flt}
+} {1 2 3 5 4 6 7 8}
+do_test format3-1.8.2 {
+ execsql {SELECT n FROM t1 ORDER BY log, flt ASC}
+} {1 2 3 5 4 6 7 8}
+do_test format3-1.8.3 {
+ execsql {SELECT n FROM t1 ORDER BY log ASC, flt asc}
+} {1 2 3 5 4 6 7 8}
+do_test format3-1.9 {
+ execsql {SELECT n FROM t1 ORDER BY log, flt DESC}
+} {1 3 2 7 6 4 5 8}
+do_test format3-1.9.1 {
+ execsql {SELECT n FROM t1 ORDER BY log ASC, flt DESC}
+} {1 3 2 7 6 4 5 8}
+do_test format3-1.10 {
+ execsql {SELECT n FROM t1 ORDER BY log DESC, flt}
+} {8 5 4 6 7 2 3 1}
+do_test format3-1.11 {
+ execsql {SELECT n FROM t1 ORDER BY log DESC, flt DESC}
+} {8 7 6 4 5 3 2 1}
+
+# These tests are designed to reach some hard-to-reach places
+# inside the string comparison routines.
+#
+# (Later) The sorting behavior changed in 2.7.0. But we will
+# keep these tests. You can never have too many test cases!
+#
+do_test format3-2.1.1 {
+ execsql {
+ UPDATE t1 SET v='x' || -flt;
+ UPDATE t1 SET v='x-2b' where v=='x-0.123';
+ SELECT v FROM t1 ORDER BY v;
+ }
+} {x-123 x-2.15 x-2b x-3.141592653 x-4221 x0.0013442 x1.6 x11}
+do_test format3-2.1.2 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999);
+ }
+} {x-4221 x-123 x-3.141592653 x-2.15 x0.0013442 x1.6 x11 x-2b}
+do_test format3-2.1.3 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999)+0.0;
+ }
+} {x-4221 x-123 x-3.141592653 x-2.15 x-2b x0.0013442 x1.6 x11}
+do_test format3-2.1.4 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999) DESC;
+ }
+} {x-2b x11 x1.6 x0.0013442 x-2.15 x-3.141592653 x-123 x-4221}
+do_test format3-2.1.5 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999)+0.0 DESC;
+ }
+} {x11 x1.6 x0.0013442 x-2b x-2.15 x-3.141592653 x-123 x-4221}
+
+# This is a bug fix for 2.2.4.
+# Strings are normally mapped to upper-case for a caseless comparison.
+# But this can cause problems for characters in between 'Z' and 'a'.
+#
+do_test format3-3.1 {
+ execsql {
+ CREATE TABLE t2(a,b);
+ INSERT INTO t2 VALUES('AGLIENTU',1);
+ INSERT INTO t2 VALUES('AGLIE`',2);
+ INSERT INTO t2 VALUES('AGNA',3);
+ SELECT a, b FROM t2 ORDER BY a;
+ }
+} {AGLIENTU 1 AGLIE` 2 AGNA 3}
+do_test format3-3.2 {
+ execsql {
+ SELECT a, b FROM t2 ORDER BY a DESC;
+ }
+} {AGNA 3 AGLIE` 2 AGLIENTU 1}
+do_test format3-3.3 {
+ execsql {
+ DELETE FROM t2;
+ INSERT INTO t2 VALUES('aglientu',1);
+ INSERT INTO t2 VALUES('aglie`',2);
+ INSERT INTO t2 VALUES('agna',3);
+ SELECT a, b FROM t2 ORDER BY a;
+ }
+} {aglie` 2 aglientu 1 agna 3}
+do_test format3-3.4 {
+ execsql {
+ SELECT a, b FROM t2 ORDER BY a DESC;
+ }
+} {agna 3 aglientu 1 aglie` 2}
+
+# Version 2.7.0 testing.
+#
+do_test format3-4.1 {
+ execsql {
+ INSERT INTO t1 VALUES(9,'x2.7',3,'IX',4.0e5);
+ INSERT INTO t1 VALUES(10,'x5.0e10',3,'X',-4.0e5);
+ INSERT INTO t1 VALUES(11,'x-4.0e9',3,'XI',4.1e4);
+ INSERT INTO t1 VALUES(12,'x01234567890123456789',3,'XII',-4.2e3);
+ SELECT n FROM t1 ORDER BY n;
+ }
+} {1 2 3 4 5 6 7 8 9 10 11 12}
+do_test format3-4.2 {
+ execsql {
+ SELECT n||'' FROM t1 ORDER BY 1;
+ }
+} {1 2 3 4 5 6 7 8 9 10 11 12}
+do_test format3-4.3 {
+ execsql {
+ SELECT n+0 FROM t1 ORDER BY 1;
+ }
+} {1 2 3 4 5 6 7 8 9 10 11 12}
+do_test format3-4.4 {
+ execsql {
+ SELECT n||'' FROM t1 ORDER BY 1 DESC;
+ }
+} {12 11 10 9 8 7 6 5 4 3 2 1}
+do_test format3-4.5 {
+ execsql {
+ SELECT n+0 FROM t1 ORDER BY 1 DESC;
+ }
+} {12 11 10 9 8 7 6 5 4 3 2 1}
+do_test format3-4.6 {
+ execsql {
+ SELECT v FROM t1 ORDER BY 1;
+ }
+} {x-123 x-2.15 x-2b x-3.141592653 x-4.0e9 x-4221 x0.0013442 x01234567890123456789 x1.6 x11 x2.7 x5.0e10}
+do_test format3-4.7 {
+ execsql {
+ SELECT v FROM t1 ORDER BY 1 DESC;
+ }
+} {x5.0e10 x2.7 x11 x1.6 x01234567890123456789 x0.0013442 x-4221 x-4.0e9 x-3.141592653 x-2b x-2.15 x-123}
+do_test format3-4.8 {
+ execsql {
+ SELECT substr(v,2,99) FROM t1 ORDER BY 1;
+ }
+} {-4.0e9 -4221 -123 -3.141592653 -2.15 0.0013442 1.6 2.7 11 5.0e10 01234567890123456789 -2b}
+
+# Build some new test data, this time with indices.
+#
+do_test format3-5.0 {
+ execsql {
+ DROP TABLE t1;
+ CREATE TABLE t1(w int, x text, y blob);
+ DROP TABLE t2;
+ CREATE TABLE t2(p varchar(1), q clob, r real, s numeric(8));
+ }
+ for {set i 1} {$i<=100} {incr i} {
+ set w $i
+ set x [expr {int(log($i)/log(2))}]
+ set y [expr {$i*$i + 2*$i + 1}]
+ execsql "INSERT INTO t1 VALUES($w,$x,$y)"
+ }
+ execsql {
+ INSERT INTO t2 SELECT 101-w, x, (SELECT max(y) FROM t1)+1-y, y FROM t1;
+ CREATE INDEX i1w ON t1(w);
+ CREATE INDEX i1xy ON t1(x,y);
+ CREATE INDEX i2p ON t2(p);
+ CREATE INDEX i2r ON t2(r);
+ CREATE INDEX i2qs ON t2(q, s);
+ }
+} {}
+
+# Do an SQL statement. Append the search count to the end of the result.
+#
+proc count sql {
+ set ::sqlite_search_count 0
+ return [concat [execsql $sql] $::sqlite_search_count]
+}
+
+# Verify that queries use an index. We are using the special variable
+# "sqlite_search_count" which tallys the number of executions of MoveTo
+# and Next operators in the VDBE. By verifing that the search count is
+# small we can be assured that indices are being used properly.
+#
+do_test format3-5.1 {
+ db close
+ sqlite db test.db
+ count {SELECT x, y FROM t1 WHERE w=10}
+} {3 121 3}
+do_test format3-5.2 {
+ count {SELECT x, y FROM t1 WHERE w=11}
+} {3 144 3}
+do_test format3-5.3 {
+ count {SELECT x, y FROM t1 WHERE 11=w}
+} {3 144 3}
+do_test format3-5.4 {
+ count {SELECT x, y FROM t1 WHERE 11=w AND x>2}
+} {3 144 3}
+do_test format3-5.5 {
+ count {SELECT x, y FROM t1 WHERE y<200 AND w=11 AND x>2}
+} {3 144 3}
+do_test format3-5.6 {
+ count {SELECT x, y FROM t1 WHERE y<200 AND x>2 AND w=11}
+} {3 144 3}
+do_test format3-5.7 {
+ count {SELECT x, y FROM t1 WHERE w=11 AND y<200 AND x>2}
+} {3 144 3}
+do_test format3-5.8 {
+ count {SELECT x, y FROM t1 WHERE w>10 AND y=144 AND x=3}
+} {3 144 3}
+do_test format3-5.9 {
+ count {SELECT x, y FROM t1 WHERE y=144 AND w>10 AND x=3}
+} {3 144 3}
+do_test format3-5.10 {
+ count {SELECT x, y FROM t1 WHERE x=3 AND w>=10 AND y=121}
+} {3 121 3}
+do_test format3-5.11 {
+ count {SELECT x, y FROM t1 WHERE x=3 AND y=100 AND w<10}
+} {3 100 3}
+
+# New for SQLite version 2.1: Verify that that inequality constraints
+# are used correctly.
+#
+do_test format3-5.12 {
+ count {SELECT w FROM t1 WHERE x=3 AND y<100}
+} {8 3}
+do_test format3-5.13 {
+ count {SELECT w FROM t1 WHERE x=3 AND 100>y}
+} {8 3}
+do_test format3-5.14 {
+ count {SELECT w FROM t1 WHERE 3=x AND y<100}
+} {8 3}
+do_test format3-5.15 {
+ count {SELECT w FROM t1 WHERE 3=x AND 100>y}
+} {8 3}
+do_test format3-5.16 {
+ count {SELECT w FROM t1 WHERE x=3 AND y<=100}
+} {8 9 5}
+do_test format3-5.17 {
+ count {SELECT w FROM t1 WHERE x=3 AND 100>=y}
+} {8 9 5}
+do_test format3-5.18 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>225}
+} {15 3}
+do_test format3-5.19 {
+ count {SELECT w FROM t1 WHERE x=3 AND 225<y}
+} {15 3}
+do_test format3-5.20 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>=225}
+} {14 15 5}
+do_test format3-5.21 {
+ count {SELECT w FROM t1 WHERE x=3 AND 225<=y}
+} {14 15 5}
+do_test format3-5.22 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>121 AND y<196}
+} {11 12 5}
+do_test format3-5.23 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196}
+} {10 11 12 13 9}
+do_test format3-5.24 {
+ count {SELECT w FROM t1 WHERE x=3 AND 121<y AND 196>y}
+} {11 12 5}
+do_test format3-5.25 {
+ count {SELECT w FROM t1 WHERE x=3 AND 121<=y AND 196>=y}
+} {10 11 12 13 9}
+
+# Need to work on optimizing the BETWEEN operator.
+#
+# do_test format3-5.26 {
+# count {SELECT w FROM t1 WHERE x=3 AND y BETWEEN 121 AND 196}
+# } {10 11 12 13 9}
+
+do_test format3-5.27 {
+ count {SELECT w FROM t1 WHERE x=3 AND y+1==122}
+} {10 17}
+do_test format3-5.28 {
+ count {SELECT w FROM t1 WHERE x+1=4 AND y+1==122}
+} {10 99}
+do_test format3-5.29 {
+ count {SELECT w FROM t1 WHERE y==121}
+} {10 99}
+
+
+do_test format3-5.30 {
+ count {SELECT w FROM t1 WHERE w>97}
+} {98 99 100 6}
+do_test format3-5.31 {
+ count {SELECT w FROM t1 WHERE w>=97}
+} {97 98 99 100 8}
+do_test format3-5.33 {
+ count {SELECT w FROM t1 WHERE w==97}
+} {97 3}
+do_test format3-5.34 {
+ count {SELECT w FROM t1 WHERE w+1==98}
+} {97 99}
+do_test format3-5.35 {
+ count {SELECT w FROM t1 WHERE w<3}
+} {1 2 4}
+do_test format3-5.36 {
+ count {SELECT w FROM t1 WHERE w<=3}
+} {1 2 3 6}
+do_test format3-5.37 {
+ count {SELECT w FROM t1 WHERE w+1<=4 ORDER BY w}
+} {1 2 3 199}
+
+
+# Do the same kind of thing except use a join as the data source.
+#
+do_test format3-6.1 {
+ db close
+ sqlite db test.db
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND y=s AND r=8977
+ }
+} {34 67 6}
+do_test format3-6.2 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND s=y AND r=8977
+ }
+} {34 67 6}
+do_test format3-6.3 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND s=y AND r=8977 AND w>10
+ }
+} {34 67 6}
+do_test format3-6.4 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE p<80 AND x=q AND s=y AND r=8977 AND w>10
+ }
+} {34 67 6}
+do_test format3-6.5 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE p<80 AND x=q AND 8977=r AND s=y AND w>10
+ }
+} {34 67 6}
+do_test format3-6.6 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND p=77 AND s=y AND w>5
+ }
+} {24 77 6}
+do_test format3-6.7 {
+ count {
+ SELECT w, p FROM t1, t2
+ WHERE x=q AND p>77 AND s=y AND w=5
+ }
+} {5 96 6}
+
+# Lets do a 3-way join.
+#
+do_test format3-7.1 {
+ count {
+ SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C
+ WHERE C.w=101-B.p AND B.r=10202-A.y AND A.w=11
+ }
+} {11 90 11 9}
+do_test format3-7.2 {
+ count {
+ SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C
+ WHERE C.w=101-B.p AND B.r=10202-A.y AND A.w=12
+ }
+} {12 89 12 9}
+do_test format3-7.3 {
+ count {
+ SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C
+ WHERE A.w=15 AND B.p=C.w AND B.r=10202-A.y
+ }
+} {15 86 86 9}
+
+# Test to see that the special case of a constant WHERE clause is
+# handled.
+#
+do_test format3-8.1 {
+ count {
+ SELECT * FROM t1 WHERE 0
+ }
+} {0}
+do_test format3-8.2 {
+ count {
+ SELECT * FROM t1 WHERE 1 LIMIT 1
+ }
+} {1 0 4 1}
+do_test format3-8.3 {
+ execsql {
+ SELECT 99 WHERE 0
+ }
+} {}
+do_test format3-8.4 {
+ execsql {
+ SELECT 99 WHERE 1
+ }
+} {99}
+
+# Verify that IN operators in a WHERE clause are handled correctly.
+#
+do_test format3-9.1 {
+ count {
+ SELECT * FROM t1 WHERE rowid IN (1,2,3,1234) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 0}
+do_test format3-9.2 {
+ count {
+ SELECT * FROM t1 WHERE rowid+0 IN (1,2,3,1234) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 199}
+do_test format3-9.3 {
+ count {
+ SELECT * FROM t1 WHERE w IN (-1,1,2,3) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 10}
+do_test format3-9.4 {
+ count {
+ SELECT * FROM t1 WHERE w+0 IN (-1,1,2,3) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 199}
+do_test format3-9.5 {
+ count {
+ SELECT * FROM t1 WHERE rowid IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 1}
+do_test format3-9.6 {
+ count {
+ SELECT * FROM t1 WHERE rowid+0 IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 199}
+do_test format3-9.7 {
+ count {
+ SELECT * FROM t1 WHERE w IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 7}
+do_test format3-9.8 {
+ count {
+ SELECT * FROM t1 WHERE w+0 IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 199}
+do_test format3-9.9 {
+ count {
+ SELECT * FROM t1 WHERE x IN (1,7) ORDER BY 1;
+ }
+} {2 1 9 3 1 16 6}
+do_test format3-9.10 {
+ count {
+ SELECT * FROM t1 WHERE x+0 IN (1,7) ORDER BY 1;
+ }
+} {2 1 9 3 1 16 199}
+do_test format3-9.11 {
+ count {
+ SELECT * FROM t1 WHERE y IN (6400,8100) ORDER BY 1;
+ }
+} {79 6 6400 89 6 8100 199}
+do_test format3-9.12 {
+ count {
+ SELECT * FROM t1 WHERE x=6 AND y IN (6400,8100) ORDER BY 1;
+ }
+} {79 6 6400 89 6 8100 74}
+do_test format3-9.13 {
+ count {
+ SELECT * FROM t1 WHERE x IN (1,7) AND y NOT IN (6400,8100) ORDER BY 1;
+ }
+} {2 1 9 3 1 16 6}
+do_test format3-9.14 {
+ count {
+ SELECT * FROM t1 WHERE x IN (1,7) AND y IN (9,10) ORDER BY 1;
+ }
+} {2 1 9 6}
+
+# This procedure executes the SQL. Then it checks the generated program
+# for the SQL and appends a "nosort" to the result if the program contains the
+# SortCallback opcode. If the program does not contain the SortCallback
+# opcode it appends "sort"
+#
+proc cksort {sql} {
+ set data [execsql $sql]
+ set prog [execsql "EXPLAIN $sql"]
+ if {[regexp SortCallback $prog]} {set x sort} {set x nosort}
+ lappend data $x
+ return $data
+}
+# Check out the logic that attempts to implement the ORDER BY clause
+# using an index rather than by sorting.
+#
+do_test format3-10.1 {
+ execsql {
+ CREATE TABLE t3(a,b,c);
+ CREATE INDEX t3a ON t3(a);
+ CREATE INDEX t3bc ON t3(b,c);
+ CREATE INDEX t3acb ON t3(a,c,b);
+ INSERT INTO t3 SELECT w, 101-w, y FROM t1;
+ SELECT count(*), sum(a), sum(b), sum(c) FROM t3;
+ }
+} {100 5050 5050 348550}
+do_test format3-10.2 {
+ cksort {
+ SELECT * FROM t3 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test format3-10.3 {
+ cksort {
+ SELECT * FROM t3 ORDER BY a+1 LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 sort}
+do_test format3-10.4 {
+ cksort {
+ SELECT * FROM t3 WHERE a<10 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test format3-10.5 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 AND a<10 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test format3-10.6 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test format3-10.7 {
+ cksort {
+ SELECT * FROM t3 WHERE b>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 sort}
+do_test format3-10.8 {
+ cksort {
+ SELECT * FROM t3 WHERE a IN (3,5,7,1,9,4,2) ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 sort}
+do_test format3-10.9 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test format3-10.10 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test format3-10.11 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test format3-10.12 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c,b LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test format3-10.13 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 ORDER BY a DESC LIMIT 3
+ }
+} {100 1 10201 99 2 10000 98 3 9801 nosort}
+do_test format3-10.13.1 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 ORDER BY a+1 DESC LIMIT 3
+ }
+} {100 1 10201 99 2 10000 98 3 9801 sort}
+do_test format3-10.14 {
+ cksort {
+ SELECT * FROM t3 ORDER BY b LIMIT 3
+ }
+} {100 1 10201 99 2 10000 98 3 9801 nosort}
+do_test format3-10.15 {
+ cksort {
+ SELECT t3.a, t1.x FROM t3, t1 WHERE t3.a=t1.w ORDER BY t3.a LIMIT 3
+ }
+} {1 0 2 1 3 1 nosort}
+do_test format3-10.16 {
+ cksort {
+ SELECT t3.a, t1.x FROM t3, t1 WHERE t3.a=t1.w ORDER BY t1.x, t3.a LIMIT 3
+ }
+} {1 0 2 1 3 1 sort}
+do_test format3-10.17 {
+ cksort {
+ SELECT y FROM t1 ORDER BY w COLLATE text LIMIT 3;
+ }
+} {4 121 10201 sort}
+do_test format3-10.18 {
+ cksort {
+ SELECT y FROM t1 ORDER BY w COLLATE numeric LIMIT 3;
+ }
+} {4 9 16 sort}
+do_test format3-10.19 {
+ cksort {
+ SELECT y FROM t1 ORDER BY w LIMIT 3;
+ }
+} {4 9 16 nosort}
+
+# Check that all comparisons are numeric. Similar tests in misc1.test
+# check the same comparisons on a format4+ database and find that some
+# are numeric and some are text.
+#
+do_test format3-11.1 {
+ execsql {SELECT '0'=='0.0'}
+} {1}
+do_test format3-11.2 {
+ execsql {SELECT '0'==0.0}
+} {1}
+do_test format3-11.3 {
+ execsql {SELECT '123456789012345678901'=='123456789012345678900'}
+} {1}
+do_test format3-11.4 {
+ execsql {
+ CREATE TABLE t6(a INT UNIQUE, b TEXT UNIQUE);
+ INSERT INTO t6 VALUES('0','0.0');
+ SELECT * FROM t6;
+ }
+} {0 0.0}
+do_test format3-11.5 {
+ execsql {
+ INSERT OR IGNORE INTO t6 VALUES(0.0,'x');
+ SELECT * FROM t6;
+ }
+} {0 0.0}
+do_test format3-11.6 {
+ execsql {
+ INSERT OR IGNORE INTO t6 VALUES('y',0);
+ SELECT * FROM t6;
+ }
+} {0 0.0}
+do_test format3-11.7 {
+ execsql {
+ CREATE TABLE t7(x INTEGER, y TEXT, z);
+ INSERT INTO t7 VALUES(0,0,1);
+ INSERT INTO t7 VALUES(0.0,0,2);
+ INSERT INTO t7 VALUES(0,0.0,3);
+ INSERT INTO t7 VALUES(0.0,0.0,4);
+ SELECT DISTINCT x, y FROM t7 ORDER BY z;
+ }
+} {0 0}
+
+# Make sure attempts to attach a format 3 database fail.
+#
+do_test format3-12.1 {
+ file delete -force test2.db
+ sqlite db2 test2.db
+ catchsql {
+ CREATE TABLE t8(x,y);
+ ATTACH DATABASE 'test.db' AS format3;
+ } db2;
+} {1 {incompatible file format in auxiliary database: format3}}
+do_test format3-12.2 {
+ catchsql {
+ ATTACH DATABASE 'test2.db' AS test2;
+ }
+} {1 {cannot attach auxiliary databases to an older format master database}}
+db2 close
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/func.test b/usr/src/cmd/svc/configd/sqlite/test/func.test
new file mode 100644
index 0000000000..521e6af45f
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/func.test
@@ -0,0 +1,348 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing built-in functions.
+#
+# $Id: func.test,v 1.16.2.2 2004/07/18 21:14:05 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table to work with.
+#
+do_test func-0.0 {
+ execsql {CREATE TABLE tbl1(t1 text)}
+ foreach word {this program is free software} {
+ execsql "INSERT INTO tbl1 VALUES('$word')"
+ }
+ execsql {SELECT t1 FROM tbl1 ORDER BY t1}
+} {free is program software this}
+do_test func-0.1 {
+ execsql {
+ CREATE TABLE t2(a);
+ INSERT INTO t2 VALUES(1);
+ INSERT INTO t2 VALUES(NULL);
+ INSERT INTO t2 VALUES(345);
+ INSERT INTO t2 VALUES(NULL);
+ INSERT INTO t2 VALUES(67890);
+ SELECT * FROM t2;
+ }
+} {1 {} 345 {} 67890}
+
+# Check out the length() function
+#
+do_test func-1.0 {
+ execsql {SELECT length(t1) FROM tbl1 ORDER BY t1}
+} {4 2 7 8 4}
+do_test func-1.1 {
+ set r [catch {execsql {SELECT length(*) FROM tbl1 ORDER BY t1}} msg]
+ lappend r $msg
+} {1 {wrong number of arguments to function length()}}
+do_test func-1.2 {
+ set r [catch {execsql {SELECT length(t1,5) FROM tbl1 ORDER BY t1}} msg]
+ lappend r $msg
+} {1 {wrong number of arguments to function length()}}
+do_test func-1.3 {
+ execsql {SELECT length(t1), count(*) FROM tbl1 GROUP BY length(t1)
+ ORDER BY length(t1)}
+} {2 1 4 2 7 1 8 1}
+do_test func-1.4 {
+ execsql {SELECT coalesce(length(a),-1) FROM t2}
+} {1 -1 3 -1 5}
+
+# Check out the substr() function
+#
+do_test func-2.0 {
+ execsql {SELECT substr(t1,1,2) FROM tbl1 ORDER BY t1}
+} {fr is pr so th}
+do_test func-2.1 {
+ execsql {SELECT substr(t1,2,1) FROM tbl1 ORDER BY t1}
+} {r s r o h}
+do_test func-2.2 {
+ execsql {SELECT substr(t1,3,3) FROM tbl1 ORDER BY t1}
+} {ee {} ogr ftw is}
+do_test func-2.3 {
+ execsql {SELECT substr(t1,-1,1) FROM tbl1 ORDER BY t1}
+} {e s m e s}
+do_test func-2.4 {
+ execsql {SELECT substr(t1,-1,2) FROM tbl1 ORDER BY t1}
+} {e s m e s}
+do_test func-2.5 {
+ execsql {SELECT substr(t1,-2,1) FROM tbl1 ORDER BY t1}
+} {e i a r i}
+do_test func-2.6 {
+ execsql {SELECT substr(t1,-2,2) FROM tbl1 ORDER BY t1}
+} {ee is am re is}
+do_test func-2.7 {
+ execsql {SELECT substr(t1,-4,2) FROM tbl1 ORDER BY t1}
+} {fr {} gr wa th}
+do_test func-2.8 {
+ execsql {SELECT t1 FROM tbl1 ORDER BY substr(t1,2,20)}
+} {this software free program is}
+do_test func-2.9 {
+ execsql {SELECT substr(a,1,1) FROM t2}
+} {1 {} 3 {} 6}
+do_test func-2.10 {
+ execsql {SELECT substr(a,2,2) FROM t2}
+} {{} {} 45 {} 78}
+
+# Only do the following tests if TCL has UTF-8 capabilities and
+# the UTF-8 encoding is turned on in the SQLite library.
+#
+if {[sqlite -encoding]=="UTF-8" && "\u1234"!="u1234"} {
+
+# Put some UTF-8 characters in the database
+#
+do_test func-3.0 {
+ execsql {DELETE FROM tbl1}
+ foreach word "contains UTF-8 characters hi\u1234ho" {
+ execsql "INSERT INTO tbl1 VALUES('$word')"
+ }
+ execsql {SELECT t1 FROM tbl1 ORDER BY t1}
+} "UTF-8 characters contains hi\u1234ho"
+do_test func-3.1 {
+ execsql {SELECT length(t1) FROM tbl1 ORDER BY t1}
+} {5 10 8 5}
+do_test func-3.2 {
+ execsql {SELECT substr(t1,1,2) FROM tbl1 ORDER BY t1}
+} {UT ch co hi}
+do_test func-3.3 {
+ execsql {SELECT substr(t1,1,3) FROM tbl1 ORDER BY t1}
+} "UTF cha con hi\u1234"
+do_test func-3.4 {
+ execsql {SELECT substr(t1,2,2) FROM tbl1 ORDER BY t1}
+} "TF ha on i\u1234"
+do_test func-3.5 {
+ execsql {SELECT substr(t1,2,3) FROM tbl1 ORDER BY t1}
+} "TF- har ont i\u1234h"
+do_test func-3.6 {
+ execsql {SELECT substr(t1,3,2) FROM tbl1 ORDER BY t1}
+} "F- ar nt \u1234h"
+do_test func-3.7 {
+ execsql {SELECT substr(t1,4,2) FROM tbl1 ORDER BY t1}
+} "-8 ra ta ho"
+do_test func-3.8 {
+ execsql {SELECT substr(t1,-1,1) FROM tbl1 ORDER BY t1}
+} "8 s s o"
+do_test func-3.9 {
+ execsql {SELECT substr(t1,-3,2) FROM tbl1 ORDER BY t1}
+} "F- er in \u1234h"
+do_test func-3.10 {
+ execsql {SELECT substr(t1,-4,3) FROM tbl1 ORDER BY t1}
+} "TF- ter ain i\u1234h"
+do_test func-3.99 {
+ execsql {DELETE FROM tbl1}
+ foreach word {this program is free software} {
+ execsql "INSERT INTO tbl1 VALUES('$word')"
+ }
+ execsql {SELECT t1 FROM tbl1}
+} {this program is free software}
+
+} ;# End [sqlite -encoding]==UTF-8 and \u1234!=u1234
+
+# Test the abs() and round() functions.
+#
+do_test func-4.1 {
+ execsql {
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(1,2,3);
+ INSERT INTO t1 VALUES(2,1.2345678901234,-12345.67890);
+ INSERT INTO t1 VALUES(3,-2,-5);
+ }
+ catchsql {SELECT abs(a,b) FROM t1}
+} {1 {wrong number of arguments to function abs()}}
+do_test func-4.2 {
+ catchsql {SELECT abs() FROM t1}
+} {1 {wrong number of arguments to function abs()}}
+do_test func-4.3 {
+ catchsql {SELECT abs(b) FROM t1 ORDER BY a}
+} {0 {2 1.2345678901234 2}}
+do_test func-4.4 {
+ catchsql {SELECT abs(c) FROM t1 ORDER BY a}
+} {0 {3 12345.67890 5}}
+do_test func-4.4.1 {
+ execsql {SELECT abs(a) FROM t2}
+} {1 {} 345 {} 67890}
+do_test func-4.4.2 {
+ execsql {SELECT abs(t1) FROM tbl1}
+} {this program is free software}
+
+do_test func-4.5 {
+ catchsql {SELECT round(a,b,c) FROM t1}
+} {1 {wrong number of arguments to function round()}}
+do_test func-4.6 {
+ catchsql {SELECT round(b,2) FROM t1 ORDER BY b}
+} {0 {-2.00 1.23 2.00}}
+do_test func-4.7 {
+ catchsql {SELECT round(b,0) FROM t1 ORDER BY a}
+} {0 {2 1 -2}}
+do_test func-4.8 {
+ catchsql {SELECT round(c) FROM t1 ORDER BY a}
+} {0 {3 -12346 -5}}
+do_test func-4.9 {
+ catchsql {SELECT round(c,a) FROM t1 ORDER BY a}
+} {0 {3.0 -12345.68 -5.000}}
+do_test func-4.10 {
+ catchsql {SELECT 'x' || round(c,a) || 'y' FROM t1 ORDER BY a}
+} {0 {x3.0y x-12345.68y x-5.000y}}
+do_test func-4.11 {
+ catchsql {SELECT round() FROM t1 ORDER BY a}
+} {1 {wrong number of arguments to function round()}}
+do_test func-4.12 {
+ execsql {SELECT coalesce(round(a,2),'nil') FROM t2}
+} {1.00 nil 345.00 nil 67890.00}
+do_test func-4.13 {
+ execsql {SELECT round(t1,2) FROM tbl1}
+} {0.00 0.00 0.00 0.00 0.00}
+
+# Test the upper() and lower() functions
+#
+do_test func-5.1 {
+ execsql {SELECT upper(t1) FROM tbl1}
+} {THIS PROGRAM IS FREE SOFTWARE}
+do_test func-5.2 {
+ execsql {SELECT lower(upper(t1)) FROM tbl1}
+} {this program is free software}
+do_test func-5.3 {
+ execsql {SELECT upper(a), lower(a) FROM t2}
+} {1 1 {} {} 345 345 {} {} 67890 67890}
+do_test func-5.4 {
+ catchsql {SELECT upper(a,5) FROM t2}
+} {1 {wrong number of arguments to function upper()}}
+do_test func-5.5 {
+ catchsql {SELECT upper(*) FROM t2}
+} {1 {wrong number of arguments to function upper()}}
+
+# Test the coalesce() and nullif() functions
+#
+do_test func-6.1 {
+ execsql {SELECT coalesce(a,'xyz') FROM t2}
+} {1 xyz 345 xyz 67890}
+do_test func-6.2 {
+ execsql {SELECT coalesce(upper(a),'nil') FROM t2}
+} {1 nil 345 nil 67890}
+do_test func-6.3 {
+ execsql {SELECT coalesce(nullif(1,1),'nil')}
+} {nil}
+do_test func-6.4 {
+ execsql {SELECT coalesce(nullif(1,2),'nil')}
+} {1}
+do_test func-6.5 {
+ execsql {SELECT coalesce(nullif(1,NULL),'nil')}
+} {1}
+
+
+# Test the last_insert_rowid() function
+#
+do_test func-7.1 {
+ execsql {SELECT last_insert_rowid()}
+} [db last_insert_rowid]
+
+# Tests for aggregate functions and how they handle NULLs.
+#
+do_test func-8.1 {
+ execsql {
+ SELECT sum(a), count(a), round(avg(a),2), min(a), max(a), count(*) FROM t2;
+ }
+} {68236 3 22745.33 1 67890 5}
+do_test func-8.2 {
+ execsql {
+ SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t2;
+ }
+} {z+67890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP}
+do_test func-8.3 {
+ execsql {
+ CREATE TEMP TABLE t3 AS SELECT a FROM t2 ORDER BY a DESC;
+ SELECT min('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3;
+ }
+} {z+1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP}
+do_test func-8.4 {
+ execsql {
+ SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3;
+ }
+} {z+67890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP}
+
+# How do you test the random() function in a meaningful, deterministic way?
+#
+do_test func-9.1 {
+ execsql {
+ SELECT random() is not null;
+ }
+} {1}
+
+# Use the "sqlite_register_test_function" TCL command which is part of
+# the text fixture in order to verify correct operation of some of
+# the user-defined SQL function APIs that are not used by the built-in
+# functions.
+#
+db close
+set ::DB [sqlite db test.db]
+sqlite_register_test_function $::DB testfunc
+do_test func-10.1 {
+ catchsql {
+ SELECT testfunc(NULL,NULL);
+ }
+} {1 {first argument to test function may not be NULL}}
+do_test func-10.2 {
+ execsql {
+ SELECT testfunc(
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'int', 1234
+ );
+ }
+} {1234}
+do_test func-10.3 {
+ execsql {
+ SELECT testfunc(
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'string', NULL
+ );
+ }
+} {{}}
+do_test func-10.4 {
+ execsql {
+ SELECT testfunc(
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'double', 1.234
+ );
+ }
+} {1.234}
+do_test func-10.5 {
+ execsql {
+ SELECT testfunc(
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'int', 1234,
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'string', NULL,
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'double', 1.234,
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'int', 1234,
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'string', NULL,
+ 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'double', 1.234
+ );
+ }
+} {1.234}
+
+# Test the built-in sqlite_version(*) SQL function.
+#
+do_test func-11.1 {
+ execsql {
+ SELECT sqlite_version(*);
+ }
+} [sqlite -version]
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/hook.test b/usr/src/cmd/svc/configd/sqlite/test/hook.test
new file mode 100644
index 0000000000..dd8ebf3573
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/hook.test
@@ -0,0 +1,86 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2004 Jan 14
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for TCL interface to the
+# SQLite library.
+#
+# The focus of the tests in this file is the following interface:
+#
+# sqlite_commit_hook
+#
+# $Id: hook.test,v 1.3 2004/01/15 02:44:03 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test hook-1.2 {
+ db commit_hook
+} {}
+
+
+do_test hook-3.1 {
+ set commit_cnt 0
+ proc commit_hook {} {
+ incr ::commit_cnt
+ return 0
+ }
+ db commit_hook ::commit_hook
+ db commit_hook
+} {::commit_hook}
+do_test hook-3.2 {
+ set commit_cnt
+} {0}
+do_test hook-3.3 {
+ execsql {
+ CREATE TABLE t2(a,b);
+ }
+ set commit_cnt
+} {1}
+do_test hook-3.4 {
+ execsql {
+ INSERT INTO t2 VALUES(1,2);
+ INSERT INTO t2 SELECT a+1, b+1 FROM t2;
+ INSERT INTO t2 SELECT a+2, b+2 FROM t2;
+ }
+ set commit_cnt
+} {4}
+do_test hook-3.5 {
+ set commit_cnt {}
+ proc commit_hook {} {
+ set ::commit_cnt [execsql {SELECT * FROM t2}]
+ return 0
+ }
+ execsql {
+ INSERT INTO t2 VALUES(5,6);
+ }
+ set commit_cnt
+} {1 2 2 3 3 4 4 5 5 6}
+do_test hook-3.6 {
+ set commit_cnt {}
+ proc commit_hook {} {
+ set ::commit_cnt [execsql {SELECT * FROM t2}]
+ return 1
+ }
+ catchsql {
+ INSERT INTO t2 VALUES(6,7);
+ }
+} {1 {constraint failed}}
+do_test hook-3.7 {
+ set commit_cnt
+} {1 2 2 3 3 4 4 5 5 6 6 7}
+do_test hook-3.8 {
+ execsql {SELECT * FROM t2}
+} {1 2 2 3 3 4 4 5 5 6}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/in.test b/usr/src/cmd/svc/configd/sqlite/test/in.test
new file mode 100644
index 0000000000..612dc1909c
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/in.test
@@ -0,0 +1,306 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the IN and BETWEEN operator.
+#
+# $Id: in.test,v 1.11 2004/01/15 03:30:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Generate the test data we will need for the first squences of tests.
+#
+do_test in-1.0 {
+ set fd [open data1.txt w]
+ for {set i 1} {$i<=10} {incr i} {
+ puts $fd "$i\t[expr {int(pow(2,$i))}]"
+ }
+ close $fd
+ execsql {
+ CREATE TABLE t1(a int, b int);
+ COPY t1 FROM 'data1.txt';
+ }
+ file delete -force data1.txt
+ execsql {SELECT count(*) FROM t1}
+} {10}
+
+# Do basic testing of BETWEEN.
+#
+do_test in-1.1 {
+ execsql {SELECT a FROM t1 WHERE b BETWEEN 10 AND 50 ORDER BY a}
+} {4 5}
+do_test in-1.2 {
+ execsql {SELECT a FROM t1 WHERE b NOT BETWEEN 10 AND 50 ORDER BY a}
+} {1 2 3 6 7 8 9 10}
+do_test in-1.3 {
+ execsql {SELECT a FROM t1 WHERE b BETWEEN a AND a*5 ORDER BY a}
+} {1 2 3 4}
+do_test in-1.4 {
+ execsql {SELECT a FROM t1 WHERE b NOT BETWEEN a AND a*5 ORDER BY a}
+} {5 6 7 8 9 10}
+do_test in-1.6 {
+ execsql {SELECT a FROM t1 WHERE b BETWEEN a AND a*5 OR b=512 ORDER BY a}
+} {1 2 3 4 9}
+do_test in-1.7 {
+ execsql {SELECT a+ 100*(a BETWEEN 1 and 3) FROM t1 ORDER BY b}
+} {101 102 103 4 5 6 7 8 9 10}
+
+
+# Testing of the IN operator using static lists on the right-hand side.
+#
+do_test in-2.1 {
+ execsql {SELECT a FROM t1 WHERE b IN (8,12,16,24,32) ORDER BY a}
+} {3 4 5}
+do_test in-2.2 {
+ execsql {SELECT a FROM t1 WHERE b NOT IN (8,12,16,24,32) ORDER BY a}
+} {1 2 6 7 8 9 10}
+do_test in-2.3 {
+ execsql {SELECT a FROM t1 WHERE b IN (8,12,16,24,32) OR b=512 ORDER BY a}
+} {3 4 5 9}
+do_test in-2.4 {
+ execsql {SELECT a FROM t1 WHERE b NOT IN (8,12,16,24,32) OR b=512 ORDER BY a}
+} {1 2 6 7 8 9 10}
+do_test in-2.5 {
+ execsql {SELECT a+100*(b IN (8,16,24)) FROM t1 ORDER BY b}
+} {1 2 103 104 5 6 7 8 9 10}
+
+do_test in-2.6 {
+ set v [catch {execsql {SELECT a FROM t1 WHERE b IN (b+10,20)}} msg]
+ lappend v $msg
+} {1 {right-hand side of IN operator must be constant}}
+do_test in-2.7 {
+ set v [catch {execsql {SELECT a FROM t1 WHERE b IN (max(5,10,b),20)}} msg]
+ lappend v $msg
+} {1 {right-hand side of IN operator must be constant}}
+do_test in-2.8 {
+ execsql {SELECT a FROM t1 WHERE b IN (8*2,64/2) ORDER BY b}
+} {4 5}
+do_test in-2.9 {
+ set v [catch {execsql {SELECT a FROM t1 WHERE b IN (max(5,10),20)}} msg]
+ lappend v $msg
+} {1 {right-hand side of IN operator must be constant}}
+do_test in-2.10 {
+ set v [catch {execsql {SELECT a FROM t1 WHERE min(0,b IN (a,30))}} msg]
+ lappend v $msg
+} {1 {right-hand side of IN operator must be constant}}
+do_test in-2.11 {
+ set v [catch {execsql {SELECT a FROM t1 WHERE c IN (10,20)}} msg]
+ lappend v $msg
+} {1 {no such column: c}}
+
+# Testing the IN operator where the right-hand side is a SELECT
+#
+do_test in-3.1 {
+ execsql {
+ SELECT a FROM t1
+ WHERE b IN (SELECT b FROM t1 WHERE a<5)
+ ORDER BY a
+ }
+} {1 2 3 4}
+do_test in-3.2 {
+ execsql {
+ SELECT a FROM t1
+ WHERE b IN (SELECT b FROM t1 WHERE a<5) OR b==512
+ ORDER BY a
+ }
+} {1 2 3 4 9}
+do_test in-3.3 {
+ execsql {
+ SELECT a + 100*(b IN (SELECT b FROM t1 WHERE a<5)) FROM t1 ORDER BY b
+ }
+} {101 102 103 104 5 6 7 8 9 10}
+
+# Make sure the UPDATE and DELETE commands work with IN-SELECT
+#
+do_test in-4.1 {
+ execsql {
+ UPDATE t1 SET b=b*2
+ WHERE b IN (SELECT b FROM t1 WHERE a>8)
+ }
+ execsql {SELECT b FROM t1 ORDER BY b}
+} {2 4 8 16 32 64 128 256 1024 2048}
+do_test in-4.2 {
+ execsql {
+ DELETE FROM t1 WHERE b IN (SELECT b FROM t1 WHERE a>8)
+ }
+ execsql {SELECT a FROM t1 ORDER BY a}
+} {1 2 3 4 5 6 7 8}
+do_test in-4.3 {
+ execsql {
+ DELETE FROM t1 WHERE b NOT IN (SELECT b FROM t1 WHERE a>4)
+ }
+ execsql {SELECT a FROM t1 ORDER BY a}
+} {5 6 7 8}
+
+# Do an IN with a constant RHS but where the RHS has many, many
+# elements. We need to test that collisions in the hash table
+# are resolved properly.
+#
+do_test in-5.1 {
+ execsql {
+ INSERT INTO t1 VALUES('hello', 'world');
+ SELECT * FROM t1
+ WHERE a IN (
+ 'Do','an','IN','with','a','constant','RHS','but','where','the',
+ 'has','many','elements','We','need','to','test','that',
+ 'collisions','hash','table','are','resolved','properly',
+ 'This','in-set','contains','thirty','one','entries','hello');
+ }
+} {hello world}
+
+# Make sure the IN operator works with INTEGER PRIMARY KEY fields.
+#
+do_test in-6.1 {
+ execsql {
+ CREATE TABLE ta(a INTEGER PRIMARY KEY, b);
+ INSERT INTO ta VALUES(1,1);
+ INSERT INTO ta VALUES(2,2);
+ INSERT INTO ta VALUES(3,3);
+ INSERT INTO ta VALUES(4,4);
+ INSERT INTO ta VALUES(6,6);
+ INSERT INTO ta VALUES(8,8);
+ INSERT INTO ta VALUES(10,
+ 'This is a key that is long enough to require a malloc in the VDBE');
+ SELECT * FROM ta WHERE a<10;
+ }
+} {1 1 2 2 3 3 4 4 6 6 8 8}
+do_test in-6.2 {
+ execsql {
+ CREATE TABLE tb(a INTEGER PRIMARY KEY, b);
+ INSERT INTO tb VALUES(1,1);
+ INSERT INTO tb VALUES(2,2);
+ INSERT INTO tb VALUES(3,3);
+ INSERT INTO tb VALUES(5,5);
+ INSERT INTO tb VALUES(7,7);
+ INSERT INTO tb VALUES(9,9);
+ INSERT INTO tb VALUES(11,
+ 'This is a key that is long enough to require a malloc in the VDBE');
+ SELECT * FROM tb WHERE a<10;
+ }
+} {1 1 2 2 3 3 5 5 7 7 9 9}
+do_test in-6.3 {
+ execsql {
+ SELECT a FROM ta WHERE b IN (SELECT a FROM tb);
+ }
+} {1 2 3}
+do_test in-6.4 {
+ execsql {
+ SELECT a FROM ta WHERE b NOT IN (SELECT a FROM tb);
+ }
+} {4 6 8 10}
+do_test in-6.5 {
+ execsql {
+ SELECT a FROM ta WHERE b IN (SELECT b FROM tb);
+ }
+} {1 2 3 10}
+do_test in-6.6 {
+ execsql {
+ SELECT a FROM ta WHERE b NOT IN (SELECT b FROM tb);
+ }
+} {4 6 8}
+do_test in-6.7 {
+ execsql {
+ SELECT a FROM ta WHERE a IN (SELECT a FROM tb);
+ }
+} {1 2 3}
+do_test in-6.8 {
+ execsql {
+ SELECT a FROM ta WHERE a NOT IN (SELECT a FROM tb);
+ }
+} {4 6 8 10}
+do_test in-6.9 {
+ execsql {
+ SELECT a FROM ta WHERE a IN (SELECT b FROM tb);
+ }
+} {1 2 3}
+do_test in-6.10 {
+ execsql {
+ SELECT a FROM ta WHERE a NOT IN (SELECT b FROM tb);
+ }
+} {4 6 8 10}
+
+# Tests of IN operator against empty sets. (Ticket #185)
+#
+do_test in-7.1 {
+ execsql {
+ SELECT a FROM t1 WHERE a IN ();
+ }
+} {}
+do_test in-7.2 {
+ execsql {
+ SELECT a FROM t1 WHERE a IN (5);
+ }
+} {5}
+do_test in-7.3 {
+ execsql {
+ SELECT a FROM t1 WHERE a NOT IN () ORDER BY a;
+ }
+} {5 6 7 8 hello}
+do_test in-7.4 {
+ execsql {
+ SELECT a FROM t1 WHERE a IN (5) AND b IN ();
+ }
+} {}
+do_test in-7.5 {
+ execsql {
+ SELECT a FROM t1 WHERE a IN (5) AND b NOT IN ();
+ }
+} {5}
+do_test in-7.6 {
+ execsql {
+ SELECT a FROM ta WHERE a IN ();
+ }
+} {}
+do_test in-7.7 {
+ execsql {
+ SELECT a FROM ta WHERE a NOT IN ();
+ }
+} {1 2 3 4 6 8 10}
+
+do_test in-8.1 {
+ execsql {
+ SELECT b FROM t1 WHERE a IN ('hello','there')
+ }
+} {world}
+do_test in-8.2 {
+ execsql {
+ SELECT b FROM t1 WHERE a IN ("hello",'there')
+ }
+} {world}
+
+# Test constructs of the form: expr IN tablename
+#
+do_test in-9.1 {
+ execsql {
+ CREATE TABLE t4 AS SELECT a FROM tb;
+ SELECT * FROM t4;
+ }
+} {1 2 3 5 7 9 11}
+do_test in-9.2 {
+ execsql {
+ SELECT b FROM t1 WHERE a IN t4;
+ }
+} {32 128}
+do_test in-9.3 {
+ execsql {
+ SELECT b FROM t1 WHERE a NOT IN t4;
+ }
+} {64 256 world}
+do_test in-9.4 {
+ catchsql {
+ SELECT b FROM t1 WHERE a NOT IN tb;
+ }
+} {1 {only a single result allowed for a SELECT that is part of an expression}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/index.test b/usr/src/cmd/svc/configd/sqlite/test/index.test
new file mode 100644
index 0000000000..aabe7f3869
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/index.test
@@ -0,0 +1,536 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the CREATE INDEX statement.
+#
+# $Id: index.test,v 1.24.2.1 2004/07/20 00:50:30 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a basic index and verify it is added to sqlite_master
+#
+do_test index-1.1 {
+ execsql {CREATE TABLE test1(f1 int, f2 int, f3 int)}
+ execsql {CREATE INDEX index1 ON test1(f1)}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {index1 test1}
+do_test index-1.1b {
+ execsql {SELECT name, sql, tbl_name, type FROM sqlite_master
+ WHERE name='index1'}
+} {index1 {CREATE INDEX index1 ON test1(f1)} test1 index}
+do_test index-1.1c {
+ db close
+ sqlite db test.db
+ execsql {SELECT name, sql, tbl_name, type FROM sqlite_master
+ WHERE name='index1'}
+} {index1 {CREATE INDEX index1 ON test1(f1)} test1 index}
+do_test index-1.1d {
+ db close
+ sqlite db test.db
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {index1 test1}
+
+# Verify that the index dies with the table
+#
+do_test index-1.2 {
+ execsql {DROP TABLE test1}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {}
+
+# Try adding an index to a table that does not exist
+#
+do_test index-2.1 {
+ set v [catch {execsql {CREATE INDEX index1 ON test1(f1)}} msg]
+ lappend v $msg
+} {1 {no such table: test1}}
+
+# Try adding an index on a column of a table where the table
+# exists but the column does not.
+#
+do_test index-2.1 {
+ execsql {CREATE TABLE test1(f1 int, f2 int, f3 int)}
+ set v [catch {execsql {CREATE INDEX index1 ON test1(f4)}} msg]
+ lappend v $msg
+} {1 {table test1 has no column named f4}}
+
+# Try an index with some columns that match and others that do now.
+#
+do_test index-2.2 {
+ set v [catch {execsql {CREATE INDEX index1 ON test1(f1, f2, f4, f3)}} msg]
+ execsql {DROP TABLE test1}
+ lappend v $msg
+} {1 {table test1 has no column named f4}}
+
+# Try creating a bunch of indices on the same table
+#
+set r {}
+for {set i 1} {$i<100} {incr i} {
+ lappend r [format index%02d $i]
+}
+do_test index-3.1 {
+ execsql {CREATE TABLE test1(f1 int, f2 int, f3 int, f4 int, f5 int)}
+ for {set i 1} {$i<100} {incr i} {
+ set sql "CREATE INDEX [format index%02d $i] ON test1(f[expr {($i%5)+1}])"
+ execsql $sql
+ }
+ execsql {SELECT name FROM sqlite_master
+ WHERE type='index' AND tbl_name='test1'
+ ORDER BY name}
+} $r
+
+
+# Verify that all the indices go away when we drop the table.
+#
+do_test index-3.3 {
+ execsql {DROP TABLE test1}
+ execsql {SELECT name FROM sqlite_master
+ WHERE type='index' AND tbl_name='test1'
+ ORDER BY name}
+} {}
+
+# Create a table and insert values into that table. Then create
+# an index on that table. Verify that we can select values
+# from the table correctly using the index.
+#
+# Note that the index names "index9" and "indext" are chosen because
+# they both have the same hash.
+#
+do_test index-4.1 {
+ execsql {CREATE TABLE test1(cnt int, power int)}
+ for {set i 1} {$i<20} {incr i} {
+ execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])"
+ }
+ execsql {CREATE INDEX index9 ON test1(cnt)}
+ execsql {CREATE INDEX indext ON test1(power)}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {index9 indext test1}
+do_test index-4.2 {
+ execsql {SELECT cnt FROM test1 WHERE power=4}
+} {2}
+do_test index-4.3 {
+ execsql {SELECT cnt FROM test1 WHERE power=1024}
+} {10}
+do_test index-4.4 {
+ execsql {SELECT power FROM test1 WHERE cnt=6}
+} {64}
+do_test index-4.5 {
+ execsql {DROP INDEX indext}
+ execsql {SELECT power FROM test1 WHERE cnt=6}
+} {64}
+do_test index-4.6 {
+ execsql {SELECT cnt FROM test1 WHERE power=1024}
+} {10}
+do_test index-4.7 {
+ execsql {CREATE INDEX indext ON test1(cnt)}
+ execsql {SELECT power FROM test1 WHERE cnt=6}
+} {64}
+do_test index-4.8 {
+ execsql {SELECT cnt FROM test1 WHERE power=1024}
+} {10}
+do_test index-4.9 {
+ execsql {DROP INDEX index9}
+ execsql {SELECT power FROM test1 WHERE cnt=6}
+} {64}
+do_test index-4.10 {
+ execsql {SELECT cnt FROM test1 WHERE power=1024}
+} {10}
+do_test index-4.11 {
+ execsql {DROP INDEX indext}
+ execsql {SELECT power FROM test1 WHERE cnt=6}
+} {64}
+do_test index-4.12 {
+ execsql {SELECT cnt FROM test1 WHERE power=1024}
+} {10}
+do_test index-4.13 {
+ execsql {DROP TABLE test1}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {}
+integrity_check index-4.14
+
+# Do not allow indices to be added to sqlite_master
+#
+do_test index-5.1 {
+ set v [catch {execsql {CREATE INDEX index1 ON sqlite_master(name)}} msg]
+ lappend v $msg
+} {1 {table sqlite_master may not be indexed}}
+do_test index-5.2 {
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {}
+
+# Do not allow indices with duplicate names to be added
+#
+do_test index-6.1 {
+ execsql {CREATE TABLE test1(f1 int, f2 int)}
+ execsql {CREATE TABLE test2(g1 real, g2 real)}
+ execsql {CREATE INDEX index1 ON test1(f1)}
+ set v [catch {execsql {CREATE INDEX index1 ON test2(g1)}} msg]
+ lappend v $msg
+} {1 {index index1 already exists}}
+do_test index-6.1b {
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {index1 test1 test2}
+do_test index-6.2 {
+ set v [catch {execsql {CREATE INDEX test1 ON test2(g1)}} msg]
+ lappend v $msg
+} {1 {there is already a table named test1}}
+do_test index-6.2b {
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {index1 test1 test2}
+do_test index-6.3 {
+ execsql {DROP TABLE test1}
+ execsql {DROP TABLE test2}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {}
+do_test index-6.4 {
+ execsql {
+ CREATE TABLE test1(a,b);
+ CREATE INDEX index1 ON test1(a);
+ CREATE INDEX index2 ON test1(b);
+ CREATE INDEX index3 ON test1(a,b);
+ DROP TABLE test1;
+ SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name;
+ }
+} {}
+integrity_check index-6.5
+
+
+# Create a primary key
+#
+do_test index-7.1 {
+ execsql {CREATE TABLE test1(f1 int, f2 int primary key)}
+ for {set i 1} {$i<20} {incr i} {
+ execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])"
+ }
+ execsql {SELECT count(*) FROM test1}
+} {19}
+do_test index-7.2 {
+ execsql {SELECT f1 FROM test1 WHERE f2=65536}
+} {16}
+do_test index-7.3 {
+ execsql {
+ SELECT name FROM sqlite_master
+ WHERE type='index' AND tbl_name='test1'
+ }
+} {{(test1 autoindex 1)}}
+do_test index-7.4 {
+ execsql {DROP table test1}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {}
+integrity_check index-7.5
+
+# Make sure we cannot drop a non-existant index.
+#
+do_test index-8.1 {
+ set v [catch {execsql {DROP INDEX index1}} msg]
+ lappend v $msg
+} {1 {no such index: index1}}
+
+# Make sure we don't actually create an index when the EXPLAIN keyword
+# is used.
+#
+do_test index-9.1 {
+ execsql {CREATE TABLE tab1(a int)}
+ execsql {EXPLAIN CREATE INDEX idx1 ON tab1(a)}
+ execsql {SELECT name FROM sqlite_master WHERE tbl_name='tab1'}
+} {tab1}
+do_test index-9.2 {
+ execsql {CREATE INDEX idx1 ON tab1(a)}
+ execsql {SELECT name FROM sqlite_master WHERE tbl_name='tab1' ORDER BY name}
+} {idx1 tab1}
+integrity_check index-9.3
+
+# Allow more than one entry with the same key.
+#
+do_test index-10.0 {
+ execsql {
+ CREATE TABLE t1(a int, b int);
+ CREATE INDEX i1 ON t1(a);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t1 VALUES(2,4);
+ INSERT INTO t1 VALUES(3,8);
+ INSERT INTO t1 VALUES(1,12);
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {2 12}
+do_test index-10.1 {
+ execsql {
+ SELECT b FROM t1 WHERE a=2 ORDER BY b;
+ }
+} {4}
+do_test index-10.2 {
+ execsql {
+ DELETE FROM t1 WHERE b=12;
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {2}
+do_test index-10.3 {
+ execsql {
+ DELETE FROM t1 WHERE b=2;
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {}
+do_test index-10.4 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES (1,1);
+ INSERT INTO t1 VALUES (1,2);
+ INSERT INTO t1 VALUES (1,3);
+ INSERT INTO t1 VALUES (1,4);
+ INSERT INTO t1 VALUES (1,5);
+ INSERT INTO t1 VALUES (1,6);
+ INSERT INTO t1 VALUES (1,7);
+ INSERT INTO t1 VALUES (1,8);
+ INSERT INTO t1 VALUES (1,9);
+ INSERT INTO t1 VALUES (2,0);
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {1 2 3 4 5 6 7 8 9}
+do_test index-10.5 {
+ execsql {
+ DELETE FROM t1 WHERE b IN (2, 4, 6, 8);
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {1 3 5 7 9}
+do_test index-10.6 {
+ execsql {
+ DELETE FROM t1 WHERE b>2;
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {1}
+do_test index-10.7 {
+ execsql {
+ DELETE FROM t1 WHERE b=1;
+ SELECT b FROM t1 WHERE a=1 ORDER BY b;
+ }
+} {}
+do_test index-10.8 {
+ execsql {
+ SELECT b FROM t1 ORDER BY b;
+ }
+} {0}
+integrity_check index-10.9
+
+# Automatically create an index when we specify a primary key.
+#
+do_test index-11.1 {
+ execsql {
+ CREATE TABLE t3(
+ a text,
+ b int,
+ c float,
+ PRIMARY KEY(b)
+ );
+ }
+ for {set i 1} {$i<=50} {incr i} {
+ execsql "INSERT INTO t3 VALUES('x${i}x',$i,0.$i)"
+ }
+ set sqlite_search_count 0
+ concat [execsql {SELECT c FROM t3 WHERE b==10}] $sqlite_search_count
+} {0.10 3}
+integrity_check index-11.2
+
+
+# Numeric strings should compare as if they were numbers. So even if the
+# strings are not character-by-character the same, if they represent the
+# same number they should compare equal to one another. Verify that this
+# is true in indices.
+#
+do_test index-12.1 {
+ execsql {
+ CREATE TABLE t4(a,b);
+ INSERT INTO t4 VALUES('0.0',1);
+ INSERT INTO t4 VALUES('0.00',2);
+ INSERT INTO t4 VALUES('abc',3);
+ INSERT INTO t4 VALUES('-1.0',4);
+ INSERT INTO t4 VALUES('+1.0',5);
+ INSERT INTO t4 VALUES('0',6);
+ INSERT INTO t4 VALUES('00000',7);
+ SELECT a FROM t4 ORDER BY b;
+ }
+} {0.0 0.00 abc -1.0 +1.0 0 00000}
+do_test index-12.2 {
+ execsql {
+ SELECT a FROM t4 WHERE a==0 ORDER BY b
+ }
+} {0.0 0.00 0 00000}
+do_test index-12.3 {
+ execsql {
+ SELECT a FROM t4 WHERE a<0.5 ORDER BY b
+ }
+} {0.0 0.00 -1.0 0 00000}
+do_test index-12.4 {
+ execsql {
+ SELECT a FROM t4 WHERE a>-0.5 ORDER BY b
+ }
+} {0.0 0.00 abc +1.0 0 00000}
+do_test index-12.5 {
+ execsql {
+ CREATE INDEX t4i1 ON t4(a);
+ SELECT a FROM t4 WHERE a==0 ORDER BY b
+ }
+} {0.0 0.00 0 00000}
+do_test index-12.6 {
+ execsql {
+ SELECT a FROM t4 WHERE a<0.5 ORDER BY b
+ }
+} {0.0 0.00 -1.0 0 00000}
+do_test index-12.7 {
+ execsql {
+ SELECT a FROM t4 WHERE a>-0.5 ORDER BY b
+ }
+} {0.0 0.00 abc +1.0 0 00000}
+integrity_check index-12.8
+
+# Make sure we cannot drop an automatically created index.
+#
+do_test index-13.1 {
+ execsql {
+ CREATE TABLE t5(
+ a int UNIQUE,
+ b float PRIMARY KEY,
+ c varchar(10),
+ UNIQUE(a,c)
+ );
+ INSERT INTO t5 VALUES(1,2,3);
+ SELECT * FROM t5;
+ }
+} {1 2 3}
+do_test index-13.2 {
+ set ::idxlist [execsql {
+ SELECT name FROM sqlite_master WHERE type="index" AND tbl_name="t5";
+ }]
+ llength $::idxlist
+} {3}
+for {set i 0} {$i<[llength $::idxlist]} {incr i} {
+ do_test index-13.3.$i {
+ catchsql "
+ DROP INDEX '[lindex $::idxlist $i]';
+ "
+ } {1 {index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped}}
+}
+do_test index-13.4 {
+ execsql {
+ INSERT INTO t5 VALUES('a','b','c');
+ SELECT * FROM t5;
+ }
+} {1 2 3 a b c}
+integrity_check index-13.5
+
+# Check the sort order of data in an index.
+#
+do_test index-14.1 {
+ execsql {
+ CREATE TABLE t6(a,b,c);
+ CREATE INDEX t6i1 ON t6(a,b);
+ INSERT INTO t6 VALUES('','',1);
+ INSERT INTO t6 VALUES('',NULL,2);
+ INSERT INTO t6 VALUES(NULL,'',3);
+ INSERT INTO t6 VALUES('abc',123,4);
+ INSERT INTO t6 VALUES(123,'abc',5);
+ SELECT c FROM t6 ORDER BY a,b;
+ }
+} {3 5 2 1 4}
+do_test index-14.2 {
+ execsql {
+ SELECT c FROM t6 WHERE a='';
+ }
+} {2 1}
+do_test index-14.3 {
+ execsql {
+ SELECT c FROM t6 WHERE b='';
+ }
+} {1 3}
+do_test index-14.4 {
+ execsql {
+ SELECT c FROM t6 WHERE a>'';
+ }
+} {4}
+do_test index-14.5 {
+ execsql {
+ SELECT c FROM t6 WHERE a>='';
+ }
+} {2 1 4}
+do_test index-14.6 {
+ execsql {
+ SELECT c FROM t6 WHERE a>123;
+ }
+} {2 1 4}
+do_test index-14.7 {
+ execsql {
+ SELECT c FROM t6 WHERE a>=123;
+ }
+} {5 2 1 4}
+do_test index-14.8 {
+ execsql {
+ SELECT c FROM t6 WHERE a<'abc';
+ }
+} {5 2 1}
+do_test index-14.9 {
+ execsql {
+ SELECT c FROM t6 WHERE a<='abc';
+ }
+} {5 2 1 4}
+do_test index-14.10 {
+ execsql {
+ SELECT c FROM t6 WHERE a<='';
+ }
+} {5 2 1}
+do_test index-14.11 {
+ execsql {
+ SELECT c FROM t6 WHERE a<'';
+ }
+} {5}
+integrity_check index-14.12
+
+do_test index-15.1 {
+ execsql {
+ DELETE FROM t1;
+ SELECT * FROM t1;
+ }
+} {}
+do_test index-15.2 {
+ execsql {
+ INSERT INTO t1 VALUES('1.234e5',1);
+ INSERT INTO t1 VALUES('12.33e04',2);
+ INSERT INTO t1 VALUES('12.35E4',3);
+ INSERT INTO t1 VALUES('12.34e',4);
+ INSERT INTO t1 VALUES('12.32e+4',5);
+ INSERT INTO t1 VALUES('12.36E+04',6);
+ INSERT INTO t1 VALUES('12.36E+',7);
+ INSERT INTO t1 VALUES('+123.10000E+0003',8);
+ INSERT INTO t1 VALUES('+',9);
+ INSERT INTO t1 VALUES('+12347.E+02',10);
+ INSERT INTO t1 VALUES('+12347E+02',11);
+ SELECT b FROM t1 ORDER BY a;
+ }
+} {8 5 2 1 3 6 11 9 10 4 7}
+integrity_check index-15.1
+
+# Drop index with a quoted name. Ticket #695.
+#
+do_test index-16.1 {
+ execsql {
+ CREATE INDEX "t6i2" ON t6(c);
+ DROP INDEX "t6i2";
+ }
+} {}
+do_test index-16.2 {
+ execsql {
+ DROP INDEX "t6i1";
+ }
+} {}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/insert.test b/usr/src/cmd/svc/configd/sqlite/test/insert.test
new file mode 100644
index 0000000000..b615347712
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/insert.test
@@ -0,0 +1,289 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the INSERT statement.
+#
+# $Id: insert.test,v 1.15 2003/06/15 23:42:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Try to insert into a non-existant table.
+#
+do_test insert-1.1 {
+ set v [catch {execsql {INSERT INTO test1 VALUES(1,2,3)}} msg]
+ lappend v $msg
+} {1 {no such table: test1}}
+
+# Try to insert into sqlite_master
+#
+do_test insert-1.2 {
+ set v [catch {execsql {INSERT INTO sqlite_master VALUES(1,2,3,4)}} msg]
+ lappend v $msg
+} {1 {table sqlite_master may not be modified}}
+
+# Try to insert the wrong number of entries.
+#
+do_test insert-1.3 {
+ execsql {CREATE TABLE test1(one int, two int, three int)}
+ set v [catch {execsql {INSERT INTO test1 VALUES(1,2)}} msg]
+ lappend v $msg
+} {1 {table test1 has 3 columns but 2 values were supplied}}
+do_test insert-1.3b {
+ set v [catch {execsql {INSERT INTO test1 VALUES(1,2,3,4)}} msg]
+ lappend v $msg
+} {1 {table test1 has 3 columns but 4 values were supplied}}
+do_test insert-1.3c {
+ set v [catch {execsql {INSERT INTO test1(one,two) VALUES(1,2,3,4)}} msg]
+ lappend v $msg
+} {1 {4 values for 2 columns}}
+do_test insert-1.3d {
+ set v [catch {execsql {INSERT INTO test1(one,two) VALUES(1)}} msg]
+ lappend v $msg
+} {1 {1 values for 2 columns}}
+
+# Try to insert into a non-existant column of a table.
+#
+do_test insert-1.4 {
+ set v [catch {execsql {INSERT INTO test1(one,four) VALUES(1,2)}} msg]
+ lappend v $msg
+} {1 {table test1 has no column named four}}
+
+# Make sure the inserts actually happen
+#
+do_test insert-1.5 {
+ execsql {INSERT INTO test1 VALUES(1,2,3)}
+ execsql {SELECT * FROM test1}
+} {1 2 3}
+do_test insert-1.5b {
+ execsql {INSERT INTO test1 VALUES(4,5,6)}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {1 2 3 4 5 6}
+do_test insert-1.5c {
+ execsql {INSERT INTO test1 VALUES(7,8,9)}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {1 2 3 4 5 6 7 8 9}
+
+do_test insert-1.6 {
+ execsql {DELETE FROM test1}
+ execsql {INSERT INTO test1(one,two) VALUES(1,2)}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {1 2 {}}
+do_test insert-1.6b {
+ execsql {INSERT INTO test1(two,three) VALUES(5,6)}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {{} 5 6 1 2 {}}
+do_test insert-1.6c {
+ execsql {INSERT INTO test1(three,one) VALUES(7,8)}
+ execsql {SELECT * FROM test1 ORDER BY one}
+} {{} 5 6 1 2 {} 8 {} 7}
+
+# A table to use for testing default values
+#
+do_test insert-2.1 {
+ execsql {
+ CREATE TABLE test2(
+ f1 int default -111,
+ f2 real default +4.32,
+ f3 int default +222,
+ f4 int default 7.89
+ )
+ }
+ execsql {SELECT * from test2}
+} {}
+do_test insert-2.2 {
+ execsql {INSERT INTO test2(f1,f3) VALUES(+10,-10)}
+ execsql {SELECT * FROM test2}
+} {10 4.32 -10 7.89}
+do_test insert-2.3 {
+ execsql {INSERT INTO test2(f2,f4) VALUES(1.23,-3.45)}
+ execsql {SELECT * FROM test2 WHERE f1==-111}
+} {-111 1.23 222 -3.45}
+do_test insert-2.4 {
+ execsql {INSERT INTO test2(f1,f2,f4) VALUES(77,+1.23,3.45)}
+ execsql {SELECT * FROM test2 WHERE f1==77}
+} {77 1.23 222 3.45}
+do_test insert-2.10 {
+ execsql {
+ DROP TABLE test2;
+ CREATE TABLE test2(
+ f1 int default 111,
+ f2 real default -4.32,
+ f3 text default hi,
+ f4 text default 'abc-123',
+ f5 varchar(10)
+ )
+ }
+ execsql {SELECT * from test2}
+} {}
+do_test insert-2.11 {
+ execsql {INSERT INTO test2(f2,f4) VALUES(-2.22,'hi!')}
+ execsql {SELECT * FROM test2}
+} {111 -2.22 hi hi! {}}
+do_test insert-2.12 {
+ execsql {INSERT INTO test2(f1,f5) VALUES(1,'xyzzy')}
+ execsql {SELECT * FROM test2 ORDER BY f1}
+} {1 -4.32 hi abc-123 xyzzy 111 -2.22 hi hi! {}}
+
+# Do additional inserts with default values, but this time
+# on a table that has indices. In particular we want to verify
+# that the correct default values are inserted into the indices.
+#
+do_test insert-3.1 {
+ execsql {
+ DELETE FROM test2;
+ CREATE INDEX index9 ON test2(f1,f2);
+ CREATE INDEX indext ON test2(f4,f5);
+ SELECT * from test2;
+ }
+} {}
+do_test insert-3.2 {
+ execsql {INSERT INTO test2(f2,f4) VALUES(-3.33,'hum')}
+ execsql {SELECT * FROM test2 WHERE f1=111 AND f2=-3.33}
+} {111 -3.33 hi hum {}}
+do_test insert-3.3 {
+ execsql {INSERT INTO test2(f1,f2,f5) VALUES(22,-4.44,'wham')}
+ execsql {SELECT * FROM test2 WHERE f1=111 AND f2=-3.33}
+} {111 -3.33 hi hum {}}
+do_test insert-3.4 {
+ execsql {SELECT * FROM test2 WHERE f1=22 AND f2=-4.44}
+} {22 -4.44 hi abc-123 wham}
+integrity_check insert-3.5
+
+# Test of expressions in the VALUES clause
+#
+do_test insert-4.1 {
+ execsql {
+ CREATE TABLE t3(a,b,c);
+ INSERT INTO t3 VALUES(1+2+3,4,5);
+ SELECT * FROM t3;
+ }
+} {6 4 5}
+do_test insert-4.2 {
+ execsql {
+ INSERT INTO t3 VALUES((SELECT max(a) FROM t3)+1,5,6);
+ SELECT * FROM t3 ORDER BY a;
+ }
+} {6 4 5 7 5 6}
+do_test insert-4.3 {
+ catchsql {
+ INSERT INTO t3 VALUES((SELECT max(a) FROM t3)+1,t3.a,6);
+ SELECT * FROM t3 ORDER BY a;
+ }
+} {1 {no such column: t3.a}}
+do_test insert-4.4 {
+ execsql {
+ INSERT INTO t3 VALUES((SELECT b FROM t3 WHERE a=0),6,7);
+ SELECT * FROM t3 ORDER BY a;
+ }
+} {{} 6 7 6 4 5 7 5 6}
+do_test insert-4.5 {
+ execsql {
+ SELECT b,c FROM t3 WHERE a IS NULL;
+ }
+} {6 7}
+do_test insert-4.6 {
+ catchsql {
+ INSERT INTO t3 VALUES(notafunc(2,3),2,3);
+ }
+} {1 {no such function: notafunc}}
+do_test insert-4.7 {
+ execsql {
+ INSERT INTO t3 VALUES(min(1,2,3),max(1,2,3),99);
+ SELECT * FROM t3 WHERE c=99;
+ }
+} {1 3 99}
+
+# Test the ability to insert from a temporary table into itself.
+# Ticket #275.
+#
+do_test insert-5.1 {
+ execsql {
+ CREATE TEMP TABLE t4(x);
+ INSERT INTO t4 VALUES(1);
+ SELECT * FROM t4;
+ }
+} {1}
+do_test insert-5.2 {
+ execsql {
+ INSERT INTO t4 SELECT x+1 FROM t4;
+ SELECT * FROM t4;
+ }
+} {1 2}
+do_test insert-5.3 {
+ # verify that a temporary table is used to copy t4 to t4
+ set x [execsql {
+ EXPLAIN INSERT INTO t4 SELECT x+2 FROM t4;
+ }]
+ expr {[lsearch $x OpenTemp]>0}
+} {1}
+do_test insert-5.4 {
+ # Verify that table "test1" begins on page 3. This should be the same
+ # page number used by "t4" above.
+ execsql {
+ SELECT rootpage FROM sqlite_master WHERE name='test1';
+ }
+} {3}
+do_test insert-5.5 {
+ # Verify that "t4" begins on page 3.
+ execsql {
+ SELECT rootpage FROM sqlite_temp_master WHERE name='t4';
+ }
+} {3}
+do_test insert-5.6 {
+ # This should not use an intermediate temporary table.
+ execsql {
+ INSERT INTO t4 SELECT one FROM test1 WHERE three=7;
+ SELECT * FROM t4
+ }
+} {1 2 8}
+do_test insert-5.7 {
+ # verify that no temporary table is used to copy test1 to t4
+ set x [execsql {
+ EXPLAIN INSERT INTO t4 SELECT one FROM test1;
+ }]
+ expr {[lsearch $x OpenTemp]>0}
+} {0}
+
+# Ticket #334: REPLACE statement corrupting indices.
+#
+do_test insert-6.1 {
+ execsql {
+ CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t1 VALUES(2,3);
+ SELECT b FROM t1 WHERE b=2;
+ }
+} {2}
+do_test insert-6.2 {
+ execsql {
+ REPLACE INTO t1 VALUES(1,4);
+ SELECT b FROM t1 WHERE b=2;
+ }
+} {}
+do_test insert-6.3 {
+ execsql {
+ UPDATE OR REPLACE t1 SET a=2 WHERE b=4;
+ SELECT * FROM t1 WHERE b=4;
+ }
+} {2 4}
+do_test insert-6.4 {
+ execsql {
+ SELECT * FROM t1 WHERE b=3;
+ }
+} {}
+
+integrity_check insert-99.0
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/insert2.test b/usr/src/cmd/svc/configd/sqlite/test/insert2.test
new file mode 100644
index 0000000000..1d1d72a11a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/insert2.test
@@ -0,0 +1,197 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the INSERT statement that takes is
+# result from a SELECT.
+#
+# $Id: insert2.test,v 1.10 2002/06/25 13:16:04 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create some tables with data that we can select against
+#
+do_test insert2-1.0 {
+ execsql {CREATE TABLE d1(n int, log int);}
+ for {set i 1} {$i<=20} {incr i} {
+ for {set j 0} {pow(2,$j)<$i} {incr j} {}
+ execsql "INSERT INTO d1 VALUES($i,$j)"
+ }
+ execsql {SELECT * FROM d1 ORDER BY n}
+} {1 0 2 1 3 2 4 2 5 3 6 3 7 3 8 3 9 4 10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 5 18 5 19 5 20 5}
+
+# Insert into a new table from the old one.
+#
+do_test insert2-1.1.1 {
+ execsql {
+ CREATE TABLE t1(log int, cnt int);
+ PRAGMA count_changes=on;
+ INSERT INTO t1 SELECT log, count(*) FROM d1 GROUP BY log;
+ }
+} {6}
+do_test insert2-1.1.2 {
+ db changes
+} {6}
+do_test insert2-1.1.3 {
+ execsql {SELECT * FROM t1 ORDER BY log}
+} {0 1 1 1 2 2 3 4 4 8 5 4}
+
+do_test insert2-1.2.1 {
+ catch {execsql {DROP TABLE t1}}
+ execsql {
+ CREATE TABLE t1(log int, cnt int);
+ INSERT INTO t1
+ SELECT log, count(*) FROM d1 GROUP BY log
+ EXCEPT SELECT n-1,log FROM d1;
+ }
+} {4}
+do_test insert2-1.2.2 {
+ execsql {
+ SELECT * FROM t1 ORDER BY log;
+ }
+} {0 1 3 4 4 8 5 4}
+do_test insert2-1.3.1 {
+ catch {execsql {DROP TABLE t1}}
+ execsql {
+ CREATE TABLE t1(log int, cnt int);
+ PRAGMA count_changes=off;
+ INSERT INTO t1
+ SELECT log, count(*) FROM d1 GROUP BY log
+ INTERSECT SELECT n-1,log FROM d1;
+ }
+} {}
+do_test insert2-1.3.2 {
+ execsql {
+ SELECT * FROM t1 ORDER BY log;
+ }
+} {1 1 2 2}
+do_test insert2-1.4 {
+ catch {execsql {DROP TABLE t1}}
+ set r [execsql {
+ CREATE TABLE t1(log int, cnt int);
+ CREATE INDEX i1 ON t1(log);
+ CREATE INDEX i2 ON t1(cnt);
+ INSERT INTO t1 SELECT log, count() FROM d1 GROUP BY log;
+ SELECT * FROM t1 ORDER BY log;
+ }]
+ lappend r [execsql {SELECT cnt FROM t1 WHERE log=3}]
+ lappend r [execsql {SELECT log FROM t1 WHERE cnt=4 ORDER BY log}]
+} {0 1 1 1 2 2 3 4 4 8 5 4 4 {3 5}}
+
+do_test insert2-2.0 {
+ execsql {
+ CREATE TABLE t3(a,b,c);
+ CREATE TABLE t4(x,y);
+ INSERT INTO t4 VALUES(1,2);
+ SELECT * FROM t4;
+ }
+} {1 2}
+do_test insert2-2.1 {
+ execsql {
+ INSERT INTO t3(a,c) SELECT * FROM t4;
+ SELECT * FROM t3;
+ }
+} {1 {} 2}
+do_test insert2-2.2 {
+ execsql {
+ DELETE FROM t3;
+ INSERT INTO t3(c,b) SELECT * FROM t4;
+ SELECT * FROM t3;
+ }
+} {{} 2 1}
+do_test insert2-2.3 {
+ execsql {
+ DELETE FROM t3;
+ INSERT INTO t3(c,a,b) SELECT x, 'hi', y FROM t4;
+ SELECT * FROM t3;
+ }
+} {hi 2 1}
+
+integrity_check insert2-3.0
+
+# File table t4 with lots of data
+#
+do_test insert2-3.1 {
+ execsql {
+ SELECT * from t4;
+ }
+} {1 2}
+do_test insert2-3.2 {
+ execsql {
+ BEGIN;
+ INSERT INTO t4 VALUES(2,4);
+ INSERT INTO t4 VALUES(3,6);
+ INSERT INTO t4 VALUES(4,8);
+ INSERT INTO t4 VALUES(5,10);
+ INSERT INTO t4 VALUES(6,12);
+ INSERT INTO t4 VALUES(7,14);
+ INSERT INTO t4 VALUES(8,16);
+ INSERT INTO t4 VALUES(9,18);
+ INSERT INTO t4 VALUES(10,20);
+ COMMIT;
+ }
+ db changes
+} {9}
+do_test insert2-3.2.1 {
+ execsql {
+ SELECT count(*) FROM t4;
+ }
+} {10}
+do_test insert2-3.3 {
+ execsql {
+ BEGIN;
+ INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4;
+ INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4;
+ INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4;
+ INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4;
+ COMMIT;
+ SELECT count(*) FROM t4;
+ }
+} {160}
+do_test insert2-3.4 {
+ execsql {
+ BEGIN;
+ UPDATE t4 SET y='lots of data for the row where x=' || x
+ || ' and y=' || y || ' - even more data to fill space';
+ COMMIT;
+ SELECT count(*) FROM t4;
+ }
+} {160}
+do_test insert2-3.5 {
+ execsql {
+ BEGIN;
+ INSERT INTO t4 SELECT x+(SELECT max(x)+1 FROM t4),y FROM t4;
+ SELECT count(*) from t4;
+ ROLLBACK;
+ }
+} {320}
+do_test insert2-3.6 {
+ execsql {
+ SELECT count(*) FROM t4;
+ }
+} {160}
+do_test insert2-3.7 {
+ execsql {
+ BEGIN;
+ DELETE FROM t4 WHERE x!=123;
+ SELECT count(*) FROM t4;
+ ROLLBACK;
+ }
+} {1}
+do_test insert2-3.8 {
+ db changes
+} {159}
+integrity_check insert2-3.9
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/interrupt.test b/usr/src/cmd/svc/configd/sqlite/test/interrupt.test
new file mode 100644
index 0000000000..d98ff2ae7e
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/interrupt.test
@@ -0,0 +1,170 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2004 Feb 8
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is the sqlite_interrupt() API.
+#
+# $Id: interrupt.test,v 1.4.2.1 2004/05/10 20:27:42 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Compute a checksum on the entire database.
+#
+proc cksum {{db db}} {
+ set txt [$db eval {SELECT name, type, sql FROM sqlite_master}]\n
+ foreach tbl [$db eval {SELECT name FROM sqlite_master WHERE type='table'}] {
+ append txt [$db eval "SELECT * FROM $tbl"]\n
+ }
+ foreach prag {default_synchronous default_cache_size} {
+ append txt $prag-[$db eval "PRAGMA $prag"]\n
+ }
+ set cksum [string length $txt]-[md5 $txt]
+ # puts $cksum-[file size test.db]
+ return $cksum
+}
+
+# This routine attempts to execute the sql in $sql. It triggers an
+# interrupt a progressively later and later points during the processing
+# and checks to make sure SQLITE_INTERRUPT is returned. Eventually,
+# the routine completes successfully.
+#
+proc interrupt_test {testid sql result {initcnt 0} {maxcnt 1000000}} {
+ set orig_sum [cksum]
+ set i $initcnt
+ global sqlite_interrupt_count
+ while {$i<$maxcnt} {
+ incr i
+ set sqlite_interrupt_count $i
+ do_test $testid.$i.1 [format {
+ set ::r [catchsql %s]
+ set ::code [db errorcode]
+ expr {$::code==0 || $::code==9}
+ } [list $sql]] 1
+ if {$::code==9} {
+ do_test $testid.$i.2 {
+ cksum
+ } $orig_sum
+ } elseif {$sqlite_interrupt_count>0} {
+ do_test $testid.$i.99 {
+ set ::r
+ } [list 0 $result]
+ break
+ }
+ }
+ set sqlite_interrupt_count 0
+}
+
+do_test interrupt-1.1 {
+ execsql {
+ CREATE TABLE t1(a,b);
+ SELECT name FROM sqlite_master;
+ }
+} {t1}
+interrupt_test interrupt-1.2 {DROP TABLE t1} {} 1 14
+do_test interrupt-1.3 {
+ execsql {
+ SELECT name FROM sqlite_master;
+ }
+} {}
+integrity_check interrupt-1.4
+
+do_test interrrupt-2.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(a,b);
+ INSERT INTO t1 VALUES(1,randstr(300,400));
+ INSERT INTO t1 SELECT a+1, randstr(300,400) FROM t1;
+ INSERT INTO t1 SELECT a+2, a || '-' || b FROM t1;
+ INSERT INTO t1 SELECT a+4, a || '-' || b FROM t1;
+ INSERT INTO t1 SELECT a+8, a || '-' || b FROM t1;
+ INSERT INTO t1 SELECT a+16, a || '-' || b FROM t1;
+ INSERT INTO t1 SELECT a+32, a || '-' || b FROM t1;
+ COMMIT;
+ UPDATE t1 SET b=substr(b,-5,5);
+ SELECT count(*) from t1;
+ }
+} 64
+set origsize [file size test.db]
+set cksum [db eval {SELECT md5sum(a || b) FROM t1}]
+interrupt_test interrupt-2.2 {VACUUM} {} 100
+do_test interrupt-2.3 {
+ execsql {
+ SELECT md5sum(a || b) FROM t1;
+ }
+} $cksum
+do_test interrupt-2.4 {
+ expr {$::origsize>[file size test.db]}
+} 1
+integrity_check interrupt-2.5
+
+# Ticket #594. If an interrupt occurs in the middle of a transaction
+# and that transaction is later rolled back, the internal schema tables do
+# not reset.
+#
+for {set i 1} {$i<50} {incr i 5} {
+ do_test interrupt-3.$i.1 {
+ execsql {
+ BEGIN;
+ CREATE TEMP TABLE t2(x,y);
+ SELECT name FROM sqlite_temp_master;
+ }
+ } {t2}
+ do_test interrupt-3.$i.2 {
+ set ::sqlite_interrupt_count $::i
+ catchsql {
+ INSERT INTO t2 SELECT * FROM t1;
+ }
+ } {1 interrupted}
+ do_test interrupt-3.$i.3 {
+ execsql {
+ SELECT name FROM sqlite_temp_master;
+ }
+ } {t2}
+ do_test interrupt-3.$i.4 {
+ catchsql {
+ ROLLBACK
+ }
+ } {0 {}}
+ do_test interrupt-3.$i.5 {
+ catchsql {SELECT name FROM sqlite_temp_master};
+ execsql {
+ SELECT name FROM sqlite_temp_master;
+ }
+ } {}
+}
+
+# There are reports of a memory leak if an interrupt occurs during
+# the beginning of a complex query - before the first callback. We
+# will try to reproduce it here:
+#
+execsql {
+ CREATE TABLE t2(a,b,c);
+ INSERT INTO t2 SELECT round(a/10), randstr(50,80), randstr(50,60) FROM t1;
+}
+set sql {
+ SELECT max(min(b,c)), min(max(b,c)), a FROM t2 GROUP BY a ORDER BY a;
+}
+set sqlite_interrupt_count 1000000
+execsql $sql
+set max_count [expr {1000000-$sqlite_interrupt_count}]
+for {set i 1} {$i<$max_count-5} {incr i 1} {
+ do_test interrupt-4.$i.1 {
+ set ::sqlite_interrupt_count $::i
+ catchsql $sql
+ } {1 interrupted}
+}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/intpkey.test b/usr/src/cmd/svc/configd/sqlite/test/intpkey.test
new file mode 100644
index 0000000000..546ab8649a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/intpkey.test
@@ -0,0 +1,490 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for the special processing associated
+# with INTEGER PRIMARY KEY columns.
+#
+# $Id: intpkey.test,v 1.14 2003/06/15 23:42:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table with a primary key and a datatype other than
+# integer
+#
+do_test intpkey-1.0 {
+ execsql {
+ CREATE TABLE t1(a TEXT PRIMARY KEY, b, c);
+ }
+} {}
+
+# There should be an index associated with the primary key
+#
+do_test intpkey-1.1 {
+ execsql {
+ SELECT name FROM sqlite_master
+ WHERE type='index' AND tbl_name='t1';
+ }
+} {{(t1 autoindex 1)}}
+
+# Now create a table with an integer primary key and verify that
+# there is no associated index.
+#
+do_test intpkey-1.2 {
+ execsql {
+ DROP TABLE t1;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c);
+ SELECT name FROM sqlite_master
+ WHERE type='index' AND tbl_name='t1';
+ }
+} {}
+
+# Insert some records into the new table. Specify the primary key
+# and verify that the key is used as the record number.
+#
+do_test intpkey-1.3 {
+ execsql {
+ INSERT INTO t1 VALUES(5,'hello','world');
+ }
+ db last_insert_rowid
+} {5}
+do_test intpkey-1.4 {
+ execsql {
+ SELECT * FROM t1;
+ }
+} {5 hello world}
+do_test intpkey-1.5 {
+ execsql {
+ SELECT rowid, * FROM t1;
+ }
+} {5 5 hello world}
+
+# Attempting to insert a duplicate primary key should give a constraint
+# failure.
+#
+do_test intpkey-1.6 {
+ set r [catch {execsql {
+ INSERT INTO t1 VALUES(5,'second','entry');
+ }} msg]
+ lappend r $msg
+} {1 {PRIMARY KEY must be unique}}
+do_test intpkey-1.7 {
+ execsql {
+ SELECT rowid, * FROM t1;
+ }
+} {5 5 hello world}
+do_test intpkey-1.8 {
+ set r [catch {execsql {
+ INSERT INTO t1 VALUES(6,'second','entry');
+ }} msg]
+ lappend r $msg
+} {0 {}}
+do_test intpkey-1.8.1 {
+ db last_insert_rowid
+} {6}
+do_test intpkey-1.9 {
+ execsql {
+ SELECT rowid, * FROM t1;
+ }
+} {5 5 hello world 6 6 second entry}
+
+# A ROWID is automatically generated for new records that do not specify
+# the integer primary key.
+#
+do_test intpkey-1.10 {
+ execsql {
+ INSERT INTO t1(b,c) VALUES('one','two');
+ SELECT b FROM t1 ORDER BY b;
+ }
+} {hello one second}
+
+# Try to change the ROWID for the new entry.
+#
+do_test intpkey-1.11 {
+ execsql {
+ UPDATE t1 SET a=4 WHERE b='one';
+ SELECT * FROM t1;
+ }
+} {4 one two 5 hello world 6 second entry}
+
+# Make sure SELECT statements are able to use the primary key column
+# as an index.
+#
+do_test intpkey-1.12 {
+ execsql {
+ SELECT * FROM t1 WHERE a==4;
+ }
+} {4 one two}
+
+# Try to insert a non-integer value into the primary key field. This
+# should result in a data type mismatch.
+#
+do_test intpkey-1.13.1 {
+ set r [catch {execsql {
+ INSERT INTO t1 VALUES('x','y','z');
+ }} msg]
+ lappend r $msg
+} {1 {datatype mismatch}}
+do_test intpkey-1.13.2 {
+ set r [catch {execsql {
+ INSERT INTO t1 VALUES('','y','z');
+ }} msg]
+ lappend r $msg
+} {1 {datatype mismatch}}
+do_test intpkey-1.14 {
+ set r [catch {execsql {
+ INSERT INTO t1 VALUES(3.4,'y','z');
+ }} msg]
+ lappend r $msg
+} {1 {datatype mismatch}}
+do_test intpkey-1.15 {
+ set r [catch {execsql {
+ INSERT INTO t1 VALUES(-3,'y','z');
+ }} msg]
+ lappend r $msg
+} {0 {}}
+do_test intpkey-1.16 {
+ execsql {SELECT * FROM t1}
+} {-3 y z 4 one two 5 hello world 6 second entry}
+
+#### INDICES
+# Check to make sure indices work correctly with integer primary keys
+#
+do_test intpkey-2.1 {
+ execsql {
+ CREATE INDEX i1 ON t1(b);
+ SELECT * FROM t1 WHERE b=='y'
+ }
+} {-3 y z}
+do_test intpkey-2.1.1 {
+ execsql {
+ SELECT * FROM t1 WHERE b=='y' AND rowid<0
+ }
+} {-3 y z}
+do_test intpkey-2.1.2 {
+ execsql {
+ SELECT * FROM t1 WHERE b=='y' AND rowid<0 AND rowid>=-20
+ }
+} {-3 y z}
+do_test intpkey-2.1.3 {
+ execsql {
+ SELECT * FROM t1 WHERE b>='y'
+ }
+} {-3 y z}
+do_test intpkey-2.1.4 {
+ execsql {
+ SELECT * FROM t1 WHERE b>='y' AND rowid<10
+ }
+} {-3 y z}
+
+do_test intpkey-2.2 {
+ execsql {
+ UPDATE t1 SET a=8 WHERE b=='y';
+ SELECT * FROM t1 WHERE b=='y';
+ }
+} {8 y z}
+do_test intpkey-2.3 {
+ execsql {
+ SELECT rowid, * FROM t1;
+ }
+} {4 4 one two 5 5 hello world 6 6 second entry 8 8 y z}
+do_test intpkey-2.4 {
+ execsql {
+ SELECT rowid, * FROM t1 WHERE b<'second'
+ }
+} {5 5 hello world 4 4 one two}
+do_test intpkey-2.4.1 {
+ execsql {
+ SELECT rowid, * FROM t1 WHERE 'second'>b
+ }
+} {5 5 hello world 4 4 one two}
+do_test intpkey-2.4.2 {
+ execsql {
+ SELECT rowid, * FROM t1 WHERE 8>rowid AND 'second'>b
+ }
+} {4 4 one two 5 5 hello world}
+do_test intpkey-2.4.3 {
+ execsql {
+ SELECT rowid, * FROM t1 WHERE 8>rowid AND 'second'>b AND 0<rowid
+ }
+} {4 4 one two 5 5 hello world}
+do_test intpkey-2.5 {
+ execsql {
+ SELECT rowid, * FROM t1 WHERE b>'a'
+ }
+} {5 5 hello world 4 4 one two 6 6 second entry 8 8 y z}
+do_test intpkey-2.6 {
+ execsql {
+ DELETE FROM t1 WHERE rowid=4;
+ SELECT * FROM t1 WHERE b>'a';
+ }
+} {5 hello world 6 second entry 8 y z}
+do_test intpkey-2.7 {
+ execsql {
+ UPDATE t1 SET a=-4 WHERE rowid=8;
+ SELECT * FROM t1 WHERE b>'a';
+ }
+} {5 hello world 6 second entry -4 y z}
+do_test intpkey-2.7 {
+ execsql {
+ SELECT * FROM t1
+ }
+} {-4 y z 5 hello world 6 second entry}
+
+# Do an SQL statement. Append the search count to the end of the result.
+#
+proc count sql {
+ set ::sqlite_search_count 0
+ return [concat [execsql $sql] $::sqlite_search_count]
+}
+
+# Create indices that include the integer primary key as one of their
+# columns.
+#
+do_test intpkey-3.1 {
+ execsql {
+ CREATE INDEX i2 ON t1(a);
+ }
+} {}
+do_test intpkey-3.2 {
+ count {
+ SELECT * FROM t1 WHERE a=5;
+ }
+} {5 hello world 0}
+do_test intpkey-3.3 {
+ count {
+ SELECT * FROM t1 WHERE a>4 AND a<6;
+ }
+} {5 hello world 2}
+do_test intpkey-3.4 {
+ count {
+ SELECT * FROM t1 WHERE b>='hello' AND b<'hello2';
+ }
+} {5 hello world 3}
+do_test intpkey-3.5 {
+ execsql {
+ CREATE INDEX i3 ON t1(c,a);
+ }
+} {}
+do_test intpkey-3.6 {
+ count {
+ SELECT * FROM t1 WHERE c=='world';
+ }
+} {5 hello world 3}
+do_test intpkey-3.7 {
+ execsql {INSERT INTO t1 VALUES(11,'hello','world')}
+ count {
+ SELECT * FROM t1 WHERE c=='world';
+ }
+} {5 hello world 11 hello world 5}
+do_test intpkey-3.8 {
+ count {
+ SELECT * FROM t1 WHERE c=='world' AND a>7;
+ }
+} {11 hello world 5}
+do_test intpkey-3.9 {
+ count {
+ SELECT * FROM t1 WHERE 7<a;
+ }
+} {11 hello world 1}
+
+# Test inequality constraints on integer primary keys and rowids
+#
+do_test intpkey-4.1 {
+ count {
+ SELECT * FROM t1 WHERE 11=rowid
+ }
+} {11 hello world 0}
+do_test intpkey-4.2 {
+ count {
+ SELECT * FROM t1 WHERE 11=rowid AND b=='hello'
+ }
+} {11 hello world 0}
+do_test intpkey-4.3 {
+ count {
+ SELECT * FROM t1 WHERE 11=rowid AND b=='hello' AND c IS NOT NULL;
+ }
+} {11 hello world 0}
+do_test intpkey-4.4 {
+ count {
+ SELECT * FROM t1 WHERE rowid==11
+ }
+} {11 hello world 0}
+do_test intpkey-4.5 {
+ count {
+ SELECT * FROM t1 WHERE oid==11 AND b=='hello'
+ }
+} {11 hello world 0}
+do_test intpkey-4.6 {
+ count {
+ SELECT * FROM t1 WHERE a==11 AND b=='hello' AND c IS NOT NULL;
+ }
+} {11 hello world 0}
+
+do_test intpkey-4.7 {
+ count {
+ SELECT * FROM t1 WHERE 8<rowid;
+ }
+} {11 hello world 1}
+do_test intpkey-4.8 {
+ count {
+ SELECT * FROM t1 WHERE 8<rowid AND 11>=oid;
+ }
+} {11 hello world 1}
+do_test intpkey-4.9 {
+ count {
+ SELECT * FROM t1 WHERE 11<=_rowid_ AND 12>=a;
+ }
+} {11 hello world 1}
+do_test intpkey-4.10 {
+ count {
+ SELECT * FROM t1 WHERE 0>=_rowid_;
+ }
+} {-4 y z 1}
+do_test intpkey-4.11 {
+ count {
+ SELECT * FROM t1 WHERE a<0;
+ }
+} {-4 y z 1}
+do_test intpkey-4.12 {
+ count {
+ SELECT * FROM t1 WHERE a<0 AND a>10;
+ }
+} {1}
+
+# Make sure it is OK to insert a rowid of 0
+#
+do_test intpkey-5.1 {
+ execsql {
+ INSERT INTO t1 VALUES(0,'zero','entry');
+ }
+ count {
+ SELECT * FROM t1 WHERE a=0;
+ }
+} {0 zero entry 0}
+do_test intpkey=5.2 {
+ execsql {
+ SELECT rowid, a FROM t1
+ }
+} {-4 -4 0 0 5 5 6 6 11 11}
+
+# Test the ability of the COPY command to put data into a
+# table that contains an integer primary key.
+#
+do_test intpkey-6.1 {
+ set f [open ./data1.txt w]
+ puts $f "20\tb-20\tc-20"
+ puts $f "21\tb-21\tc-21"
+ puts $f "22\tb-22\tc-22"
+ close $f
+ execsql {
+ COPY t1 FROM 'data1.txt';
+ SELECT * FROM t1 WHERE a>=20;
+ }
+} {20 b-20 c-20 21 b-21 c-21 22 b-22 c-22}
+do_test intpkey-6.2 {
+ execsql {
+ SELECT * FROM t1 WHERE b=='hello'
+ }
+} {5 hello world 11 hello world}
+do_test intpkey-6.3 {
+ execsql {
+ DELETE FROM t1 WHERE b='b-21';
+ SELECT * FROM t1 WHERE b=='b-21';
+ }
+} {}
+do_test intpkey-6.4 {
+ execsql {
+ SELECT * FROM t1 WHERE a>=20
+ }
+} {20 b-20 c-20 22 b-22 c-22}
+
+# Do an insert of values with the columns specified out of order.
+#
+do_test intpkey-7.1 {
+ execsql {
+ INSERT INTO t1(c,b,a) VALUES('row','new',30);
+ SELECT * FROM t1 WHERE rowid>=30;
+ }
+} {30 new row}
+do_test intpkey-7.2 {
+ execsql {
+ SELECT * FROM t1 WHERE rowid>20;
+ }
+} {22 b-22 c-22 30 new row}
+
+# Do an insert from a select statement.
+#
+do_test intpkey-8.1 {
+ execsql {
+ CREATE TABLE t2(x INTEGER PRIMARY KEY, y, z);
+ INSERT INTO t2 SELECT * FROM t1;
+ SELECT rowid FROM t2;
+ }
+} {-4 0 5 6 11 20 22 30}
+do_test intpkey-8.2 {
+ execsql {
+ SELECT x FROM t2;
+ }
+} {-4 0 5 6 11 20 22 30}
+
+do_test intpkey-9.1 {
+ execsql {
+ UPDATE t1 SET c='www' WHERE c='world';
+ SELECT rowid, a, c FROM t1 WHERE c=='www';
+ }
+} {5 5 www 11 11 www}
+
+
+# Check insert of NULL for primary key
+#
+do_test intpkey-10.1 {
+ execsql {
+ DROP TABLE t2;
+ CREATE TABLE t2(x INTEGER PRIMARY KEY, y, z);
+ INSERT INTO t2 VALUES(NULL, 1, 2);
+ SELECT * from t2;
+ }
+} {1 1 2}
+do_test intpkey-10.2 {
+ execsql {
+ INSERT INTO t2 VALUES(NULL, 2, 3);
+ SELECT * from t2 WHERE x=2;
+ }
+} {2 2 3}
+do_test intpkey-10.3 {
+ execsql {
+ INSERT INTO t2 SELECT NULL, z, y FROM t2;
+ SELECT * FROM t2;
+ }
+} {1 1 2 2 2 3 3 2 1 4 3 2}
+
+# This tests checks to see if a floating point number can be used
+# to reference an integer primary key.
+#
+do_test intpkey-11.1 {
+ execsql {
+ SELECT b FROM t1 WHERE a=2.0+3.0;
+ }
+} {hello}
+do_test intpkey-11.1 {
+ execsql {
+ SELECT b FROM t1 WHERE a=2.0+3.5;
+ }
+} {}
+
+integrity_check intpkey-12.1
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/ioerr.test b/usr/src/cmd/svc/configd/sqlite/test/ioerr.test
new file mode 100644
index 0000000000..d7539bc209
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/ioerr.test
@@ -0,0 +1,123 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 October 12
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing for correct handling of I/O errors
+# such as writes failing because the disk is full.
+#
+# The tests in this file use special facilities that are only
+# available in the SQLite test fixture.
+#
+# $Id: ioerr.test,v 1.3 2003/04/25 15:37:59 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+set ::go 1
+for {set n 1} {$go} {incr n} {
+ do_test ioerr-1.$n.1 {
+ set ::sqlite_io_error_pending 0
+ db close
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite db test.db
+ execsql {SELECT * FROM sqlite_master}
+ } {}
+ do_test ioerr-1.$n.2 [subst {
+ set ::sqlite_io_error_pending $n
+ }] $n
+ do_test ioerr-1.$n.3 {
+ set r [catch {db eval {
+ CREATE TABLE t1(a,b,c);
+ SELECT * FROM sqlite_master;
+ BEGIN TRANSACTION;
+ INSERT INTO t1 VALUES(1,2,3);
+ INSERT INTO t1 VALUES(4,5,6);
+ ROLLBACK;
+ SELECT * FROM t1;
+ BEGIN TRANSACTION;
+ INSERT INTO t1 VALUES(1,2,3);
+ INSERT INTO t1 VALUES(4,5,6);
+ COMMIT;
+ SELECT * FROM t1;
+ DELETE FROM t1 WHERE a<100;
+ }} msg]
+ # if {$r} {puts $msg}
+ set ::go [expr {$::sqlite_io_error_pending<=0}]
+ expr {$::sqlite_io_error_pending>0 || $r!=0}
+ } {1}
+}
+set ::sqlite_io_error_pending 0
+
+proc cksum {{db db}} {
+ set txt [$db eval {SELECT name, type, sql FROM sqlite_master}]\n
+ foreach tbl [$db eval {SELECT name FROM sqlite_master WHERE type='table'}] {
+ append txt [$db eval "SELECT * FROM $tbl"]\n
+ }
+ foreach prag {default_synchronous default_cache_size} {
+ append txt $prag-[$db eval "PRAGMA $prag"]\n
+ }
+ set cksum [string length $txt]-[md5 $txt]
+ # puts $cksum-[file size test.db]
+ return $cksum
+}
+
+set ::go 1
+for {set n 1} {$go} {incr n} {
+ do_test ioerr-2.$n.1 {
+ set ::sqlite_io_error_pending 0
+ db close
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite db test.db
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(a, b, c);
+ INSERT INTO t1 VALUES(1, randstr(5,50), randstr(5,50));
+ INSERT INTO t1 SELECT a+2, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT a+4, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT a+8, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT a+16, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT a+32, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT a+64, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT a+128, b||'-'||rowid, c||'-'||rowid FROM t1;
+ CREATE TABLE t2 AS SELECT * FROM t1;
+ CREATE TABLE t3 AS SELECT * FROM t1;
+ COMMIT;
+ DROP TABLE t2;
+ }
+ set ::cksum [cksum]
+ execsql {
+ SELECT name FROM sqlite_master WHERE type='table'
+ }
+ } {t1 t3}
+ do_test ioerr-2.$n.2 [subst {
+ set ::sqlite_io_error_pending $n
+ }] $n
+ do_test ioerr-2.$n.3 {
+ set r [catch {db eval {
+ VACUUM;
+ }} msg]
+ # puts "error_pending=$::sqlite_io_error_pending"
+ # if {$r} {puts $msg}
+ set ::go [expr {$::sqlite_io_error_pending<=0}]
+ expr {$::sqlite_io_error_pending>0 || $r!=0}
+ set ::sqlite_io_error_pending 0
+ db close
+ sqlite db test.db
+ cksum
+ } $cksum
+}
+set ::sqlite_io_error_pending 0
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/join.test b/usr/src/cmd/svc/configd/sqlite/test/join.test
new file mode 100644
index 0000000000..990a623e54
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/join.test
@@ -0,0 +1,396 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 May 24
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for joins, including outer joins.
+#
+# $Id: join.test,v 1.11 2003/09/27 13:39:40 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test join-1.1 {
+ execsql {
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(1,2,3);
+ INSERT INTO t1 VALUES(2,3,4);
+ INSERT INTO t1 VALUES(3,4,5);
+ SELECT * FROM t1;
+ }
+} {1 2 3 2 3 4 3 4 5}
+do_test join-1.2 {
+ execsql {
+ CREATE TABLE t2(b,c,d);
+ INSERT INTO t2 VALUES(1,2,3);
+ INSERT INTO t2 VALUES(2,3,4);
+ INSERT INTO t2 VALUES(3,4,5);
+ SELECT * FROM t2;
+ }
+} {1 2 3 2 3 4 3 4 5}
+
+do_test join-1.3 {
+ execsql2 {
+ SELECT * FROM t1 NATURAL JOIN t2;
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.d 4 t1.a 2 t1.b 3 t1.c 4 t2.d 5}
+do_test join-1.3.1 {
+ execsql2 {
+ SELECT * FROM t2 NATURAL JOIN t1;
+ }
+} {t2.b 2 t2.c 3 t2.d 4 t1.a 1 t2.b 3 t2.c 4 t2.d 5 t1.a 2}
+do_test join-1.4 {
+ execsql2 {
+ SELECT * FROM t1 INNER JOIN t2 USING(b,c);
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.d 4 t1.a 2 t1.b 3 t1.c 4 t2.d 5}
+do_test join-1.5 {
+ execsql2 {
+ SELECT * FROM t1 INNER JOIN t2 USING(b);
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.c 3 t2.d 4 t1.a 2 t1.b 3 t1.c 4 t2.c 4 t2.d 5}
+do_test join-1.6 {
+ execsql2 {
+ SELECT * FROM t1 INNER JOIN t2 USING(c);
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.b 2 t2.d 4 t1.a 2 t1.b 3 t1.c 4 t2.b 3 t2.d 5}
+do_test join-1.7 {
+ execsql2 {
+ SELECT * FROM t1 INNER JOIN t2 USING(c,b);
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.d 4 t1.a 2 t1.b 3 t1.c 4 t2.d 5}
+
+do_test join-1.8 {
+ execsql {
+ SELECT * FROM t1 NATURAL CROSS JOIN t2;
+ }
+} {1 2 3 4 2 3 4 5}
+do_test join-1.9 {
+ execsql {
+ SELECT * FROM t1 CROSS JOIN t2 USING(b,c);
+ }
+} {1 2 3 4 2 3 4 5}
+do_test join-1.10 {
+ execsql {
+ SELECT * FROM t1 NATURAL INNER JOIN t2;
+ }
+} {1 2 3 4 2 3 4 5}
+do_test join-1.11 {
+ execsql {
+ SELECT * FROM t1 INNER JOIN t2 USING(b,c);
+ }
+} {1 2 3 4 2 3 4 5}
+do_test join-1.12 {
+ execsql {
+ SELECT * FROM t1 natural inner join t2;
+ }
+} {1 2 3 4 2 3 4 5}
+do_test join-1.13 {
+ execsql2 {
+ SELECT * FROM t1 NATURAL JOIN
+ (SELECT b as 'c', c as 'd', d as 'e' FROM t2) as t3
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t3.d 4 t3.e 5}
+do_test join-1.14 {
+ execsql2 {
+ SELECT * FROM (SELECT b as 'c', c as 'd', d as 'e' FROM t2) as 'tx'
+ NATURAL JOIN t1
+ }
+} {tx.c 3 tx.d 4 tx.e 5 t1.a 1 t1.b 2}
+
+do_test join-1.15 {
+ execsql {
+ CREATE TABLE t3(c,d,e);
+ INSERT INTO t3 VALUES(2,3,4);
+ INSERT INTO t3 VALUES(3,4,5);
+ INSERT INTO t3 VALUES(4,5,6);
+ SELECT * FROM t3;
+ }
+} {2 3 4 3 4 5 4 5 6}
+do_test join-1.16 {
+ execsql {
+ SELECT * FROM t1 natural join t2 natural join t3;
+ }
+} {1 2 3 4 5 2 3 4 5 6}
+do_test join-1.17 {
+ execsql2 {
+ SELECT * FROM t1 natural join t2 natural join t3;
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.d 4 t3.e 5 t1.a 2 t1.b 3 t1.c 4 t2.d 5 t3.e 6}
+do_test join-1.18 {
+ execsql {
+ CREATE TABLE t4(d,e,f);
+ INSERT INTO t4 VALUES(2,3,4);
+ INSERT INTO t4 VALUES(3,4,5);
+ INSERT INTO t4 VALUES(4,5,6);
+ SELECT * FROM t4;
+ }
+} {2 3 4 3 4 5 4 5 6}
+do_test join-1.19 {
+ execsql {
+ SELECT * FROM t1 natural join t2 natural join t4;
+ }
+} {1 2 3 4 5 6}
+do_test join-1.19 {
+ execsql2 {
+ SELECT * FROM t1 natural join t2 natural join t4;
+ }
+} {t1.a 1 t1.b 2 t1.c 3 t2.d 4 t4.e 5 t4.f 6}
+do_test join-1.20 {
+ execsql {
+ SELECT * FROM t1 natural join t2 natural join t3 WHERE t1.a=1
+ }
+} {1 2 3 4 5}
+
+do_test join-2.1 {
+ execsql {
+ SELECT * FROM t1 NATURAL LEFT JOIN t2;
+ }
+} {1 2 3 4 2 3 4 5 3 4 5 {}}
+do_test join-2.2 {
+ execsql {
+ SELECT * FROM t2 NATURAL LEFT OUTER JOIN t1;
+ }
+} {1 2 3 {} 2 3 4 1 3 4 5 2}
+do_test join-2.3 {
+ catchsql {
+ SELECT * FROM t1 NATURAL RIGHT OUTER JOIN t2;
+ }
+} {1 {RIGHT and FULL OUTER JOINs are not currently supported}}
+do_test join-2.4 {
+ execsql {
+ SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d
+ }
+} {1 2 3 {} {} {} 2 3 4 {} {} {} 3 4 5 1 2 3}
+do_test join-2.5 {
+ execsql {
+ SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d WHERE t1.a>1
+ }
+} {2 3 4 {} {} {} 3 4 5 1 2 3}
+do_test join-2.6 {
+ execsql {
+ SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d WHERE t2.b IS NULL OR t2.b>1
+ }
+} {1 2 3 {} {} {} 2 3 4 {} {} {}}
+
+do_test join-3.1 {
+ catchsql {
+ SELECT * FROM t1 NATURAL JOIN t2 ON t1.a=t2.b;
+ }
+} {1 {a NATURAL join may not have an ON or USING clause}}
+do_test join-3.2 {
+ catchsql {
+ SELECT * FROM t1 NATURAL JOIN t2 USING(b);
+ }
+} {1 {a NATURAL join may not have an ON or USING clause}}
+do_test join-3.3 {
+ catchsql {
+ SELECT * FROM t1 JOIN t2 ON t1.a=t2.b USING(b);
+ }
+} {1 {cannot have both ON and USING clauses in the same join}}
+do_test join-3.4 {
+ catchsql {
+ SELECT * FROM t1 JOIN t2 USING(a);
+ }
+} {1 {cannot join using column a - column not present in both tables}}
+do_test join-3.5 {
+ catchsql {
+ SELECT * FROM t1 USING(a);
+ }
+} {0 {1 2 3 2 3 4 3 4 5}}
+do_test join-3.6 {
+ catchsql {
+ SELECT * FROM t1 JOIN t2 ON t3.a=t2.b;
+ }
+} {1 {no such column: t3.a}}
+do_test join-3.7 {
+ catchsql {
+ SELECT * FROM t1 INNER OUTER JOIN t2;
+ }
+} {1 {unknown or unsupported join type: INNER OUTER}}
+do_test join-3.7 {
+ catchsql {
+ SELECT * FROM t1 LEFT BOGUS JOIN t2;
+ }
+} {1 {unknown or unsupported join type: LEFT BOGUS}}
+
+do_test join-4.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t5(a INTEGER PRIMARY KEY);
+ CREATE TABLE t6(a INTEGER);
+ INSERT INTO t6 VALUES(NULL);
+ INSERT INTO t6 VALUES(NULL);
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ COMMIT;
+ }
+ execsql {
+ SELECT * FROM t6 NATURAL JOIN t5;
+ }
+} {}
+do_test join-4.2 {
+ execsql {
+ SELECT * FROM t6, t5 WHERE t6.a<t5.a;
+ }
+} {}
+do_test join-4.3 {
+ execsql {
+ SELECT * FROM t6, t5 WHERE t6.a>t5.a;
+ }
+} {}
+do_test join-4.4 {
+ execsql {
+ UPDATE t6 SET a='xyz';
+ SELECT * FROM t6 NATURAL JOIN t5;
+ }
+} {}
+do_test join-4.6 {
+ execsql {
+ SELECT * FROM t6, t5 WHERE t6.a<t5.a;
+ }
+} {}
+do_test join-4.7 {
+ execsql {
+ SELECT * FROM t6, t5 WHERE t6.a>t5.a;
+ }
+} {}
+do_test join-4.8 {
+ execsql {
+ UPDATE t6 SET a=1;
+ SELECT * FROM t6 NATURAL JOIN t5;
+ }
+} {}
+do_test join-4.9 {
+ execsql {
+ SELECT * FROM t6, t5 WHERE t6.a<t5.a;
+ }
+} {}
+do_test join-4.10 {
+ execsql {
+ SELECT * FROM t6, t5 WHERE t6.a>t5.a;
+ }
+} {}
+
+do_test join-5.1 {
+ execsql {
+ BEGIN;
+ create table centros (id integer primary key, centro);
+ INSERT INTO centros VALUES(1,'xxx');
+ create table usuarios (id integer primary key, nombre, apellidos,
+ idcentro integer);
+ INSERT INTO usuarios VALUES(1,'a','aa',1);
+ INSERT INTO usuarios VALUES(2,'b','bb',1);
+ INSERT INTO usuarios VALUES(3,'c','cc',NULL);
+ create index idcentro on usuarios (idcentro);
+ END;
+ select usuarios.id, usuarios.nombre, centros.centro from
+ usuarios left outer join centros on usuarios.idcentro = centros.id;
+ }
+} {1 a xxx 2 b xxx 3 c {}}
+
+# A test for ticket #247.
+#
+do_test join-7.1 {
+ execsql {
+ CREATE TABLE t7 (x, y);
+ INSERT INTO t7 VALUES ("pa1", 1);
+ INSERT INTO t7 VALUES ("pa2", NULL);
+ INSERT INTO t7 VALUES ("pa3", NULL);
+ INSERT INTO t7 VALUES ("pa4", 2);
+ INSERT INTO t7 VALUES ("pa30", 131);
+ INSERT INTO t7 VALUES ("pa31", 130);
+ INSERT INTO t7 VALUES ("pa28", NULL);
+
+ CREATE TABLE t8 (a integer primary key, b);
+ INSERT INTO t8 VALUES (1, "pa1");
+ INSERT INTO t8 VALUES (2, "pa4");
+ INSERT INTO t8 VALUES (3, NULL);
+ INSERT INTO t8 VALUES (4, NULL);
+ INSERT INTO t8 VALUES (130, "pa31");
+ INSERT INTO t8 VALUES (131, "pa30");
+
+ SELECT coalesce(t8.a,999) from t7 LEFT JOIN t8 on y=a;
+ }
+} {1 999 999 2 131 130 999}
+
+# Make sure a left join where the right table is really a view that
+# is itself a join works right. Ticket #306.
+#
+do_test join-8.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t9(a INTEGER PRIMARY KEY, b);
+ INSERT INTO t9 VALUES(1,11);
+ INSERT INTO t9 VALUES(2,22);
+ CREATE TABLE t10(x INTEGER PRIMARY KEY, y);
+ INSERT INTO t10 VALUES(1,2);
+ INSERT INTO t10 VALUES(3,3);
+ CREATE TABLE t11(p INTEGER PRIMARY KEY, q);
+ INSERT INTO t11 VALUES(2,111);
+ INSERT INTO t11 VALUES(3,333);
+ CREATE VIEW v10_11 AS SELECT x, q FROM t10, t11 WHERE t10.y=t11.p;
+ COMMIT;
+ SELECT * FROM t9 LEFT JOIN v10_11 ON( a=x );
+ }
+} {1 11 1 111 2 22 {} {}}
+do_test join-8.2 {
+ execsql {
+ SELECT * FROM t9 LEFT JOIN (SELECT x, q FROM t10, t11 WHERE t10.y=t11.p)
+ ON( a=x);
+ }
+} {1 11 1 111 2 22 {} {}}
+do_test join-8.3 {
+ execsql {
+ SELECT * FROM v10_11 LEFT JOIN t9 ON( a=x );
+ }
+} {1 111 1 11 3 333 {} {}}
+
+# Ticket #350 describes a scenario where LEFT OUTER JOIN does not
+# function correctly if the right table in the join is really
+# subquery.
+#
+# To test the problem, we generate the same LEFT OUTER JOIN in two
+# separate selects but with on using a subquery and the other calling
+# the table directly. Then connect the two SELECTs using an EXCEPT.
+# Both queries should generate the same results so the answer should
+# be an empty set.
+#
+do_test join-9.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t12(a,b);
+ INSERT INTO t12 VALUES(1,11);
+ INSERT INTO t12 VALUES(2,22);
+ CREATE TABLE t13(b,c);
+ INSERT INTO t13 VALUES(22,222);
+ COMMIT;
+ SELECT * FROM t12 NATURAL LEFT JOIN t13
+ EXCEPT
+ SELECT * FROM t12 NATURAL LEFT JOIN (SELECT * FROM t13 WHERE b>0);
+ }
+} {}
+do_test join-9.2 {
+ execsql {
+ CREATE VIEW v13 AS SELECT * FROM t13 WHERE b>0;
+ SELECT * FROM t12 NATURAL LEFT JOIN t13
+ EXCEPT
+ SELECT * FROM t12 NATURAL LEFT JOIN v13;
+ }
+} {}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/join2.test b/usr/src/cmd/svc/configd/sqlite/test/join2.test
new file mode 100644
index 0000000000..493e1a8613
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/join2.test
@@ -0,0 +1,76 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 May 24
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for joins, including outer joins.
+#
+# $Id: join2.test,v 1.1 2004/01/24 20:18:13 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test join2-1.1 {
+ execsql {
+ CREATE TABLE t1(a,b);
+ INSERT INTO t1 VALUES(1,11);
+ INSERT INTO t1 VALUES(2,22);
+ INSERT INTO t1 VALUES(3,33);
+ SELECT * FROM t1;
+ }
+} {1 11 2 22 3 33}
+do_test join2-1.2 {
+ execsql {
+ CREATE TABLE t2(b,c);
+ INSERT INTO t2 VALUES(11,111);
+ INSERT INTO t2 VALUES(33,333);
+ INSERT INTO t2 VALUES(44,444);
+ SELECT * FROM t2;
+ }
+} {11 111 33 333 44 444};
+do_test join2-1.3 {
+ execsql {
+ CREATE TABLE t3(c,d);
+ INSERT INTO t3 VALUES(111,1111);
+ INSERT INTO t3 VALUES(444,4444);
+ INSERT INTO t3 VALUES(555,5555);
+ SELECT * FROM t3;
+ }
+} {111 1111 444 4444 555 5555}
+
+do_test join2-1.4 {
+ execsql {
+ SELECT * FROM
+ t1 NATURAL JOIN t2 NATURAL JOIN t3
+ }
+} {1 11 111 1111}
+do_test join2-1.5 {
+ execsql {
+ SELECT * FROM
+ t1 NATURAL JOIN t2 NATURAL LEFT OUTER JOIN t3
+ }
+} {1 11 111 1111 3 33 333 {}}
+do_test join2-1.6 {
+ execsql {
+ SELECT * FROM
+ t1 NATURAL LEFT OUTER JOIN t2 NATURAL JOIN t3
+ }
+} {1 11 111 1111}
+do_test join2-1.6 {
+ execsql {
+ SELECT * FROM
+ t1 NATURAL LEFT OUTER JOIN (t2 NATURAL JOIN t3)
+ }
+} {1 11 111 1111 2 22 {} {} 3 33 {} {}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/join3_28.test b/usr/src/cmd/svc/configd/sqlite/test/join3_28.test
new file mode 100644
index 0000000000..5ddbb8a1d5
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/join3_28.test
@@ -0,0 +1,37 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 May 24
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for joins, including outer joins, where
+# there are a large number of tables involved in the join.
+#
+# $Id: join3_28.test,v 1.1.2.1 2004/07/22 16:08:39 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+catch {unset result}
+set result {}
+for {set N 1} {$N<=40} {incr N} {
+ lappend result $N
+ do_test join3-1.$N {
+ execsql "CREATE TABLE t${N}(x);"
+ execsql "INSERT INTO t$N VALUES($N)"
+ set sql "SELECT * FROM t1"
+ for {set i 2} {$i<=$N} {incr i} {append sql ", t$i"}
+ execsql $sql
+ } $result
+}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/join4_28.test b/usr/src/cmd/svc/configd/sqlite/test/join4_28.test
new file mode 100644
index 0000000000..189a44af32
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/join4_28.test
@@ -0,0 +1,80 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 May 24
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for left outer joins containing WHERE
+# clauses that restrict the scope of the left term of the join.
+#
+# $Id: join4_28.test,v 1.1.2.1 2004/07/22 16:08:39 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test join4-1.1 {
+ execsql {
+ create temp table t1(a integer, b varchar(10));
+ insert into t1 values(1,'one');
+ insert into t1 values(2,'two');
+ insert into t1 values(3,'three');
+ insert into t1 values(4,'four');
+
+ create temp table t2(x integer, y varchar(10), z varchar(10));
+ insert into t2 values(2,'niban','ok');
+ insert into t2 values(4,'yonban','err');
+ }
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok'
+ }
+} {2 two 2 niban ok}
+do_test join4-1.2 {
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x and t2.z='ok'
+ }
+} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}}
+do_test join4-1.3 {
+ execsql {
+ create index i2 on t2(z);
+ }
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok'
+ }
+} {2 two 2 niban ok}
+do_test join4-1.4 {
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x and t2.z='ok'
+ }
+} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}}
+do_test join4-1.5 {
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x where t2.z>='ok'
+ }
+} {2 two 2 niban ok}
+do_test join4-1.4 {
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x and t2.z>='ok'
+ }
+} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}}
+do_test join4-1.6 {
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x where t2.z IN ('ok')
+ }
+} {2 two 2 niban ok}
+do_test join4-1.7 {
+ execsql {
+ select * from t1 left outer join t2 on t1.a=t2.x and t2.z IN ('ok')
+ }
+} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/lastinsert.test b/usr/src/cmd/svc/configd/sqlite/test/lastinsert.test
new file mode 100644
index 0000000000..8137d1763b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/lastinsert.test
@@ -0,0 +1,322 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests to make sure that value returned by last_insert_rowid() (LIRID)
+# is updated properly, especially inside triggers
+#
+# Note 1: insert into table is now the only statement which changes LIRID
+# Note 2: upon entry into before or instead of triggers,
+# LIRID is unchanged (rather than -1)
+# Note 3: LIRID is changed within the context of a trigger,
+# but is restored once the trigger exits
+# Note 4: LIRID is not changed by an insert into a view (since everything
+# is done within instead of trigger context)
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# ----------------------------------------------------------------------------
+# 1.x - basic tests (no triggers)
+
+# LIRID changed properly after an insert into a table
+do_test lastinsert-1.1 {
+ catchsql {
+ create table t1 (k integer primary key);
+ insert into t1 values (1);
+ insert into t1 values (NULL);
+ insert into t1 values (NULL);
+ select last_insert_rowid();
+ }
+} {0 3}
+
+# LIRID unchanged after an update on a table
+do_test lastinsert-1.2 {
+ catchsql {
+ update t1 set k=4 where k=2;
+ select last_insert_rowid();
+ }
+} {0 3}
+
+# LIRID unchanged after a delete from a table
+do_test lastinsert-1.3 {
+ catchsql {
+ delete from t1 where k=4;
+ select last_insert_rowid();
+ }
+} {0 3}
+
+# LIRID unchanged after create table/view statements
+do_test lastinsert-1.4 {
+ catchsql {
+ create table t2 (k integer primary key, val1, val2, val3);
+ create view v as select * from t1;
+ select last_insert_rowid();
+ }
+} {0 3}
+
+# ----------------------------------------------------------------------------
+# 2.x - tests with after insert trigger
+
+# LIRID changed properly after an insert into table containing an after trigger
+do_test lastinsert-2.1 {
+ catchsql {
+ delete from t2;
+ create trigger r1 after insert on t1 for each row begin
+ insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL);
+ update t2 set k=k+10, val2=100+last_insert_rowid();
+ update t2 set val3=1000+last_insert_rowid();
+ end;
+ insert into t1 values (13);
+ select last_insert_rowid();
+ }
+} {0 13}
+
+# LIRID equals NEW.k upon entry into after insert trigger
+do_test lastinsert-2.2 {
+ catchsql {
+ select val1 from t2;
+ }
+} {0 13}
+
+# LIRID changed properly by insert within context of after insert trigger
+do_test lastinsert-2.3 {
+ catchsql {
+ select val2 from t2;
+ }
+} {0 126}
+
+# LIRID unchanged by update within context of after insert trigger
+do_test lastinsert-2.4 {
+ catchsql {
+ select val3 from t2;
+ }
+} {0 1026}
+
+# ----------------------------------------------------------------------------
+# 3.x - tests with after update trigger
+
+# LIRID not changed after an update onto a table containing an after trigger
+do_test lastinsert-3.1 {
+ catchsql {
+ delete from t2;
+ drop trigger r1;
+ create trigger r1 after update on t1 for each row begin
+ insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL);
+ update t2 set k=k+10, val2=100+last_insert_rowid();
+ update t2 set val3=1000+last_insert_rowid();
+ end;
+ update t1 set k=14 where k=3;
+ select last_insert_rowid();
+ }
+} {0 13}
+
+# LIRID unchanged upon entry into after update trigger
+do_test lastinsert-3.2 {
+ catchsql {
+ select val1 from t2;
+ }
+} {0 13}
+
+# LIRID changed properly by insert within context of after update trigger
+do_test lastinsert-3.3 {
+ catchsql {
+ select val2 from t2;
+ }
+} {0 128}
+
+# LIRID unchanged by update within context of after update trigger
+do_test lastinsert-3.4 {
+ catchsql {
+ select val3 from t2;
+ }
+} {0 1028}
+
+# ----------------------------------------------------------------------------
+# 4.x - tests with instead of insert trigger
+
+# LIRID not changed after an insert into view containing an instead of trigger
+do_test lastinsert-4.1 {
+ catchsql {
+ delete from t2;
+ drop trigger r1;
+ create trigger r1 instead of insert on v for each row begin
+ insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL);
+ update t2 set k=k+10, val2=100+last_insert_rowid();
+ update t2 set val3=1000+last_insert_rowid();
+ end;
+ insert into v values (15);
+ select last_insert_rowid();
+ }
+} {0 13}
+
+# LIRID unchanged upon entry into instead of trigger
+do_test lastinsert-4.2 {
+ catchsql {
+ select val1 from t2;
+ }
+} {0 13}
+
+# LIRID changed properly by insert within context of instead of trigger
+do_test lastinsert-4.3 {
+ catchsql {
+ select val2 from t2;
+ }
+} {0 130}
+
+# LIRID unchanged by update within context of instead of trigger
+do_test lastinsert-4.4 {
+ catchsql {
+ select val3 from t2;
+ }
+} {0 1030}
+
+# ----------------------------------------------------------------------------
+# 5.x - tests with before delete trigger
+
+# LIRID not changed after a delete on a table containing a before trigger
+do_test lastinsert-5.1 {
+ catchsql {
+ delete from t2;
+ drop trigger r1;
+ create trigger r1 before delete on t1 for each row begin
+ insert into t2 values (77, last_insert_rowid(), NULL, NULL);
+ update t2 set k=k+10, val2=100+last_insert_rowid();
+ update t2 set val3=1000+last_insert_rowid();
+ end;
+ delete from t1 where k=1;
+ select last_insert_rowid();
+ }
+} {0 13}
+
+# LIRID unchanged upon entry into delete trigger
+do_test lastinsert-5.2 {
+ catchsql {
+ select val1 from t2;
+ }
+} {0 13}
+
+# LIRID changed properly by insert within context of delete trigger
+do_test lastinsert-5.3 {
+ catchsql {
+ select val2 from t2;
+ }
+} {0 177}
+
+# LIRID unchanged by update within context of delete trigger
+do_test lastinsert-5.4 {
+ catchsql {
+ select val3 from t2;
+ }
+} {0 1077}
+
+# ----------------------------------------------------------------------------
+# 6.x - tests with instead of update trigger
+
+# LIRID not changed after an update on a view containing an instead of trigger
+do_test lastinsert-6.1 {
+ catchsql {
+ delete from t2;
+ drop trigger r1;
+ create trigger r1 instead of update on v for each row begin
+ insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL);
+ update t2 set k=k+10, val2=100+last_insert_rowid();
+ update t2 set val3=1000+last_insert_rowid();
+ end;
+ update v set k=16 where k=14;
+ select last_insert_rowid();
+ }
+} {0 13}
+
+# LIRID unchanged upon entry into instead of trigger
+do_test lastinsert-6.2 {
+ catchsql {
+ select val1 from t2;
+ }
+} {0 13}
+
+# LIRID changed properly by insert within context of instead of trigger
+do_test lastinsert-6.3 {
+ catchsql {
+ select val2 from t2;
+ }
+} {0 132}
+
+# LIRID unchanged by update within context of instead of trigger
+do_test lastinsert-6.4 {
+ catchsql {
+ select val3 from t2;
+ }
+} {0 1032}
+
+# ----------------------------------------------------------------------------
+# 7.x - complex tests with temporary tables and nested instead of triggers
+
+do_test lastinsert-7.1 {
+ catchsql {
+ drop table t1; drop table t2; drop trigger r1;
+ create temp table t1 (k integer primary key);
+ create temp table t2 (k integer primary key);
+ create temp view v1 as select * from t1;
+ create temp view v2 as select * from t2;
+ create temp table rid (k integer primary key, rin, rout);
+ insert into rid values (1, NULL, NULL);
+ insert into rid values (2, NULL, NULL);
+ create temp trigger r1 instead of insert on v1 for each row begin
+ update rid set rin=last_insert_rowid() where k=1;
+ insert into t1 values (100+NEW.k);
+ insert into v2 values (100+last_insert_rowid());
+ update rid set rout=last_insert_rowid() where k=1;
+ end;
+ create temp trigger r2 instead of insert on v2 for each row begin
+ update rid set rin=last_insert_rowid() where k=2;
+ insert into t2 values (1000+NEW.k);
+ update rid set rout=last_insert_rowid() where k=2;
+ end;
+ insert into t1 values (77);
+ select last_insert_rowid();
+ }
+} {0 77}
+
+do_test lastinsert-7.2 {
+ catchsql {
+ insert into v1 values (5);
+ select last_insert_rowid();
+ }
+} {0 77}
+
+do_test lastinsert-7.3 {
+ catchsql {
+ select rin from rid where k=1;
+ }
+} {0 77}
+
+do_test lastinsert-7.4 {
+ catchsql {
+ select rout from rid where k=1;
+ }
+} {0 105}
+
+do_test lastinsert-7.5 {
+ catchsql {
+ select rin from rid where k=2;
+ }
+} {0 105}
+
+do_test lastinsert-7.6 {
+ catchsql {
+ select rout from rid where k=2;
+ }
+} {0 1205}
+
+finish_test
+
diff --git a/usr/src/cmd/svc/configd/sqlite/test/laststmtchanges.test b/usr/src/cmd/svc/configd/sqlite/test/laststmtchanges.test
new file mode 100644
index 0000000000..b35ed12731
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/laststmtchanges.test
@@ -0,0 +1,247 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests to make sure that value returned by last_statement_change_count()
+# (LSCC) is updated properly, especially inside triggers
+#
+# Note 1: LSCC remains constant within a statement and only updates once
+# the statement is finished (triggers count as part of statement)
+# Note 2: LSCC is changed within the context of a trigger
+# much like last_insert_rowid() (see lastinsert.test),
+# but is restored once the trigger exits
+# Note 3: LSCC is not changed by a change to a view (since everything
+# is done within instead of trigger context)
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# ----------------------------------------------------------------------------
+# 1.x - basic tests (no triggers)
+
+# LSCC set properly after insert
+do_test laststmtchanges-1.1 {
+ catchsql {
+ create table t0 (x);
+ insert into t0 values (1);
+ insert into t0 values (1);
+ insert into t0 values (2);
+ insert into t0 values (2);
+ insert into t0 values (1);
+ insert into t0 values (1);
+ insert into t0 values (1);
+ insert into t0 values (2);
+ select last_statement_change_count();
+ }
+} {0 1}
+
+# LSCC set properly after update
+do_test laststmtchanges-1.2 {
+ catchsql {
+ update t0 set x=3 where x=1;
+ select last_statement_change_count();
+ }
+} {0 5}
+
+# LSCC unchanged within an update statement
+do_test laststmtchanges-1.3 {
+ catchsql {
+ update t0 set x=x+last_statement_change_count() where x=3;
+ select count() from t0 where x=8;
+ }
+} {0 5}
+
+# LSCC set properly after update on table where no rows changed
+do_test laststmtchanges-1.4 {
+ catchsql {
+ update t0 set x=77 where x=88;
+ select last_statement_change_count();
+ }
+} {0 0}
+
+# LSCC set properly after delete from table
+do_test laststmtchanges-1.5 {
+ catchsql {
+ delete from t0 where x=2;
+ select last_statement_change_count();
+ }
+} {0 3}
+
+# ----------------------------------------------------------------------------
+# 2.x - tests with after insert trigger
+
+# LSCC changed properly after insert into table containing after trigger
+do_test laststmtchanges-2.1 {
+ catchsql {
+ create table t1 (k integer primary key);
+ create table t2 (k integer primary key, v1, v2);
+ create trigger r1 after insert on t1 for each row begin
+ insert into t2 values (NULL, last_statement_change_count(), NULL);
+ update t0 set x=x;
+ update t2 set v2=last_statement_change_count();
+ end;
+ insert into t1 values (77);
+ select last_statement_change_count();
+ }
+} {0 1}
+
+# LSCC unchanged upon entry into after insert trigger
+do_test laststmtchanges-2.2 {
+ catchsql {
+ select v1 from t2;
+ }
+} {0 3}
+
+# LSCC changed properly by update within context of after insert trigger
+do_test laststmtchanges-2.3 {
+ catchsql {
+ select v2 from t2;
+ }
+} {0 5}
+
+# ----------------------------------------------------------------------------
+# 3.x - tests with after update trigger
+
+# LSCC changed properly after update into table containing after trigger
+do_test laststmtchanges-3.1 {
+ catchsql {
+ drop trigger r1;
+ delete from t2; delete from t2;
+ create trigger r1 after update on t1 for each row begin
+ insert into t2 values (NULL, last_statement_change_count(), NULL);
+ delete from t0 where oid=1 or oid=2;
+ update t2 set v2=last_statement_change_count();
+ end;
+ update t1 set k=k;
+ select last_statement_change_count();
+ }
+} {0 1}
+
+# LSCC unchanged upon entry into after update trigger
+do_test laststmtchanges-3.2 {
+ catchsql {
+ select v1 from t2;
+ }
+} {0 0}
+
+# LSCC changed properly by delete within context of after update trigger
+do_test laststmtchanges-3.3 {
+ catchsql {
+ select v2 from t2;
+ }
+} {0 2}
+
+# ----------------------------------------------------------------------------
+# 4.x - tests with before delete trigger
+
+# LSCC changed properly on delete from table containing before trigger
+do_test laststmtchanges-4.1 {
+ catchsql {
+ drop trigger r1;
+ delete from t2; delete from t2;
+ create trigger r1 before delete on t1 for each row begin
+ insert into t2 values (NULL, last_statement_change_count(), NULL);
+ insert into t0 values (5);
+ update t2 set v2=last_statement_change_count();
+ end;
+ delete from t1;
+ select last_statement_change_count();
+ }
+} {0 1}
+
+# LSCC unchanged upon entry into before delete trigger
+do_test laststmtchanges-4.2 {
+ catchsql {
+ select v1 from t2;
+ }
+} {0 0}
+
+# LSCC changed properly by insert within context of before delete trigger
+do_test laststmtchanges-4.3 {
+ catchsql {
+ select v2 from t2;
+ }
+} {0 1}
+
+# ----------------------------------------------------------------------------
+# 5.x - complex tests with temporary tables and nested instead of triggers
+
+do_test laststmtchanges-5.1 {
+ catchsql {
+ drop table t0; drop table t1; drop table t2;
+ create temp table t0(x);
+ create temp table t1 (k integer primary key);
+ create temp table t2 (k integer primary key);
+ create temp view v1 as select * from t1;
+ create temp view v2 as select * from t2;
+ create temp table n1 (k integer primary key, n);
+ create temp table n2 (k integer primary key, n);
+ insert into t0 values (1);
+ insert into t0 values (2);
+ insert into t0 values (1);
+ insert into t0 values (1);
+ insert into t0 values (1);
+ insert into t0 values (2);
+ insert into t0 values (2);
+ insert into t0 values (1);
+ create temp trigger r1 instead of insert on v1 for each row begin
+ insert into n1 values (NULL, last_statement_change_count());
+ update t0 set x=x*10 where x=1;
+ insert into n1 values (NULL, last_statement_change_count());
+ insert into t1 values (NEW.k);
+ insert into n1 values (NULL, last_statement_change_count());
+ update t0 set x=x*10 where x=0;
+ insert into v2 values (100+NEW.k);
+ insert into n1 values (NULL, last_statement_change_count());
+ end;
+ create temp trigger r2 instead of insert on v2 for each row begin
+ insert into n2 values (NULL, last_statement_change_count());
+ insert into t2 values (1000+NEW.k);
+ insert into n2 values (NULL, last_statement_change_count());
+ update t0 set x=x*100 where x=0;
+ insert into n2 values (NULL, last_statement_change_count());
+ delete from t0 where x=2;
+ insert into n2 values (NULL, last_statement_change_count());
+ end;
+ insert into t1 values (77);
+ select last_statement_change_count();
+ }
+} {0 1}
+
+do_test laststmtchanges-5.2 {
+ catchsql {
+ delete from t1 where k=88;
+ select last_statement_change_count();
+ }
+} {0 0}
+
+do_test laststmtchanges-5.3 {
+ catchsql {
+ insert into v1 values (5);
+ select last_statement_change_count();
+ }
+} {0 0}
+
+do_test laststmtchanges-5.4 {
+ catchsql {
+ select n from n1;
+ }
+} {0 {0 5 1 0}}
+
+do_test laststmtchanges-5.5 {
+ catchsql {
+ select n from n2;
+ }
+} {0 {0 1 0 3}}
+
+finish_test
+
diff --git a/usr/src/cmd/svc/configd/sqlite/test/limit.test b/usr/src/cmd/svc/configd/sqlite/test/limit.test
new file mode 100644
index 0000000000..04a56fe798
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/limit.test
@@ -0,0 +1,320 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 November 6
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the LIMIT ... OFFSET ... clause
+# of SELECT statements.
+#
+# $Id: limit.test,v 1.11.2.1 2004/07/19 23:33:04 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Build some test data
+#
+set fd [open data1.txt w]
+for {set i 1} {$i<=32} {incr i} {
+ for {set j 0} {pow(2,$j)<$i} {incr j} {}
+ puts $fd "[expr {32-$i}]\t[expr {10-$j}]"
+}
+close $fd
+execsql {
+ CREATE TABLE t1(x int, y int);
+ COPY t1 FROM 'data1.txt'
+}
+file delete data1.txt
+
+do_test limit-1.0 {
+ execsql {SELECT count(*) FROM t1}
+} {32}
+do_test limit-1.1 {
+ execsql {SELECT count(*) FROM t1 LIMIT 5}
+} {32}
+do_test limit-1.2.1 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 5}
+} {0 1 2 3 4}
+do_test limit-1.2.2 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 5 OFFSET 2}
+} {2 3 4 5 6}
+do_test limit-1.2.3 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 2, 5}
+} {2 3 4 5 6}
+do_test limit-1.3 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 5 OFFSET 5}
+} {5 6 7 8 9}
+do_test limit-1.4.1 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 50 OFFSET 30}
+} {30 31}
+do_test limit-1.4.2 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 30, 50}
+} {30 31}
+do_test limit-1.5 {
+ execsql {SELECT x FROM t1 ORDER BY x LIMIT 50 OFFSET 50}
+} {}
+do_test limit-1.6 {
+ execsql {SELECT * FROM t1 AS a, t1 AS b ORDER BY a.x, b.x LIMIT 5}
+} {0 5 0 5 0 5 1 5 0 5 2 5 0 5 3 5 0 5 4 5}
+do_test limit-1.7 {
+ execsql {SELECT * FROM t1 AS a, t1 AS b ORDER BY a.x, b.x LIMIT 5 OFFSET 32}
+} {1 5 0 5 1 5 1 5 1 5 2 5 1 5 3 5 1 5 4 5}
+
+do_test limit-2.1 {
+ execsql {
+ CREATE VIEW v1 AS SELECT * FROM t1 LIMIT 2;
+ SELECT count(*) FROM (SELECT * FROM v1);
+ }
+} 2
+do_test limit-2.2 {
+ execsql {
+ CREATE TABLE t2 AS SELECT * FROM t1 LIMIT 2;
+ SELECT count(*) FROM t2;
+ }
+} 2
+do_test limit-2.3 {
+ execsql {
+ SELECT count(*) FROM t1 WHERE rowid IN (SELECT rowid FROM t1 LIMIT 2);
+ }
+} 2
+
+do_test limit-3.1 {
+ execsql {
+ SELECT z FROM (SELECT y*10+x AS z FROM t1 ORDER BY x LIMIT 10)
+ ORDER BY z LIMIT 5;
+ }
+} {50 51 52 53 54}
+
+do_test limit-4.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t3(x);
+ INSERT INTO t3 SELECT x FROM t1 ORDER BY x LIMIT 10 OFFSET 1;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3;
+ END;
+ SELECT count(*) FROM t3;
+ }
+} {10240}
+do_test limit-4.2 {
+ execsql {
+ SELECT x FROM t3 LIMIT 2 OFFSET 10000
+ }
+} {10001 10002}
+do_test limit-4.3 {
+ execsql {
+ CREATE TABLE t4 AS SELECT x,
+ 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x ||
+ 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x ||
+ 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x ||
+ 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x ||
+ 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x AS y
+ FROM t3 LIMIT 1000;
+ SELECT x FROM t4 ORDER BY y DESC LIMIT 1 OFFSET 999;
+ }
+} {1000}
+
+do_test limit-5.1 {
+ execsql {
+ CREATE TABLE t5(x,y);
+ INSERT INTO t5 SELECT x-y, x+y FROM t1 WHERE x BETWEEN 10 AND 15
+ ORDER BY x LIMIT 2;
+ SELECT * FROM t5 ORDER BY x;
+ }
+} {5 15 6 16}
+do_test limit-5.2 {
+ execsql {
+ DELETE FROM t5;
+ INSERT INTO t5 SELECT x-y, x+y FROM t1 WHERE x BETWEEN 10 AND 15
+ ORDER BY x DESC LIMIT 2;
+ SELECT * FROM t5 ORDER BY x;
+ }
+} {9 19 10 20}
+do_test limit-5.3 {
+ execsql {
+ DELETE FROM t5;
+ INSERT INTO t5 SELECT x-y, x+y FROM t1 WHERE x ORDER BY x DESC LIMIT 31;
+ SELECT * FROM t5 ORDER BY x LIMIT 2;
+ }
+} {-4 6 -3 7}
+do_test limit-5.4 {
+ execsql {
+ SELECT * FROM t5 ORDER BY x DESC, y DESC LIMIT 2;
+ }
+} {21 41 21 39}
+do_test limit-5.5 {
+ execsql {
+ DELETE FROM t5;
+ INSERT INTO t5 SELECT a.x*100+b.x, a.y*100+b.y FROM t1 AS a, t1 AS b
+ ORDER BY 1, 2 LIMIT 1000;
+ SELECT count(*), sum(x), sum(y), min(x), max(x), min(y), max(y) FROM t5;
+ }
+} {1000 1528204 593161 0 3107 505 1005}
+
+# There is some contraversy about whether LIMIT 0 should be the same as
+# no limit at all or if LIMIT 0 should result in zero output rows.
+#
+do_test limit-6.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t6(a);
+ INSERT INTO t6 VALUES(1);
+ INSERT INTO t6 VALUES(2);
+ INSERT INTO t6 SELECT a+2 FROM t6;
+ COMMIT;
+ SELECT * FROM t6;
+ }
+} {1 2 3 4}
+do_test limit-6.2 {
+ execsql {
+ SELECT * FROM t6 LIMIT -1 OFFSET -1;
+ }
+} {1 2 3 4}
+do_test limit-6.3 {
+ execsql {
+ SELECT * FROM t6 LIMIT 2 OFFSET -123;
+ }
+} {1 2}
+do_test limit-6.4 {
+ execsql {
+ SELECT * FROM t6 LIMIT -432 OFFSET 2;
+ }
+} {3 4}
+do_test limit-6.5 {
+ execsql {
+ SELECT * FROM t6 LIMIT -1
+ }
+} {1 2 3 4}
+do_test limit-6.6 {
+ execsql {
+ SELECT * FROM t6 LIMIT -1 OFFSET 1
+ }
+} {2 3 4}
+do_test limit-6.7 {
+ execsql {
+ SELECT * FROM t6 LIMIT 0
+ }
+} {}
+do_test limit-6.8 {
+ execsql {
+ SELECT * FROM t6 LIMIT 0 OFFSET 1
+ }
+} {}
+
+# Make sure LIMIT works well with compound SELECT statements.
+# Ticket #393
+#
+do_test limit-7.1.1 {
+ catchsql {
+ SELECT x FROM t2 LIMIT 5 UNION ALL SELECT a FROM t6;
+ }
+} {1 {LIMIT clause should come after UNION ALL not before}}
+do_test limit-7.1.2 {
+ catchsql {
+ SELECT x FROM t2 LIMIT 5 UNION SELECT a FROM t6;
+ }
+} {1 {LIMIT clause should come after UNION not before}}
+do_test limit-7.1.3 {
+ catchsql {
+ SELECT x FROM t2 LIMIT 5 EXCEPT SELECT a FROM t6 LIMIT 3;
+ }
+} {1 {LIMIT clause should come after EXCEPT not before}}
+do_test limit-7.1.4 {
+ catchsql {
+ SELECT x FROM t2 LIMIT 0,5 INTERSECT SELECT a FROM t6;
+ }
+} {1 {LIMIT clause should come after INTERSECT not before}}
+do_test limit-7.2 {
+ execsql {
+ SELECT x FROM t2 UNION ALL SELECT a FROM t6 LIMIT 5;
+ }
+} {31 30 1 2 3}
+do_test limit-7.3 {
+ execsql {
+ SELECT x FROM t2 UNION ALL SELECT a FROM t6 LIMIT 3 OFFSET 1;
+ }
+} {30 1 2}
+do_test limit-7.4 {
+ execsql {
+ SELECT x FROM t2 UNION ALL SELECT a FROM t6 ORDER BY 1 LIMIT 3 OFFSET 1;
+ }
+} {2 3 4}
+do_test limit-7.5 {
+ execsql {
+ SELECT x FROM t2 UNION SELECT x+2 FROM t2 LIMIT 2 OFFSET 1;
+ }
+} {31 32}
+do_test limit-7.6 {
+ execsql {
+ SELECT x FROM t2 UNION SELECT x+2 FROM t2 ORDER BY 1 DESC LIMIT 2 OFFSET 1;
+ }
+} {32 31}
+do_test limit-7.7 {
+ execsql {
+ SELECT a+9 FROM t6 EXCEPT SELECT y FROM t2 LIMIT 2;
+ }
+} {11 12}
+do_test limit-7.8 {
+ execsql {
+ SELECT a+9 FROM t6 EXCEPT SELECT y FROM t2 ORDER BY 1 DESC LIMIT 2;
+ }
+} {13 12}
+do_test limit-7.9 {
+ execsql {
+ SELECT a+26 FROM t6 INTERSECT SELECT x FROM t2 LIMIT 1;
+ }
+} {30}
+do_test limit-7.10 {
+ execsql {
+ SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2 LIMIT 1;
+ }
+} {30}
+do_test limit-7.11 {
+ execsql {
+ SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2 LIMIT 1 OFFSET 1;
+ }
+} {31}
+do_test limit-7.12 {
+ execsql {
+ SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2
+ ORDER BY 1 DESC LIMIT 1 OFFSET 1;
+ }
+} {30}
+
+# Tests for limit in conjunction with distinct. The distinct should
+# occur before both the limit and the offset. Ticket #749.
+#
+do_test limit-8.1 {
+ execsql {
+ SELECT DISTINCT round(x/100) FROM t3 LIMIT 5;
+ }
+} {0 1 2 3 4}
+do_test limit-8.2 {
+ execsql {
+ SELECT DISTINCT round(x/100) FROM t3 LIMIT 5 OFFSET 5;
+ }
+} {5 6 7 8 9}
+do_test limit-8.3 {
+ execsql {
+ SELECT DISTINCT round(x/100) FROM t3 LIMIT 5 OFFSET 25;
+ }
+} {25 26 27 28 29}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/lock.test b/usr/src/cmd/svc/configd/sqlite/test/lock.test
new file mode 100644
index 0000000000..ca4310c4b2
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/lock.test
@@ -0,0 +1,352 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is database locks.
+#
+# $Id: lock.test,v 1.20 2004/02/14 16:31:04 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create an alternative connection to the database
+#
+do_test lock-1.0 {
+ sqlite db2 ./test.db
+ set dummy {}
+} {}
+do_test lock-1.1 {
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
+} {}
+do_test lock-1.2 {
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} db2
+} {}
+do_test lock-1.3 {
+ execsql {CREATE TABLE t1(a int, b int)}
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
+} {t1}
+#do_test lock-1.4 {
+# catchsql {
+# SELECT name FROM sqlite_master WHERE type='table' ORDER BY name
+# } db2
+#} {1 {database schema has changed}}
+do_test lock-1.5 {
+ catchsql {
+ SELECT name FROM sqlite_master WHERE type='table' ORDER BY name
+ } db2
+} {0 t1}
+
+do_test lock-1.6 {
+ execsql {INSERT INTO t1 VALUES(1,2)}
+ execsql {SELECT * FROM t1}
+} {1 2}
+do_test lock-1.7 {
+ execsql {SELECT * FROM t1} db2
+} {1 2}
+do_test lock-1.8 {
+ execsql {UPDATE t1 SET a=b, b=a} db2
+ execsql {SELECT * FROM t1} db2
+} {2 1}
+do_test lock-1.9 {
+ execsql {SELECT * FROM t1}
+} {2 1}
+do_test lock-1.10 {
+ execsql {BEGIN TRANSACTION}
+ execsql {SELECT * FROM t1}
+} {2 1}
+do_test lock-1.11 {
+ catchsql {SELECT * FROM t1} db2
+} {1 {database is locked}}
+do_test lock-1.12 {
+ execsql {ROLLBACK}
+ catchsql {SELECT * FROM t1}
+} {0 {2 1}}
+
+do_test lock-1.13 {
+ execsql {CREATE TABLE t2(x int, y int)}
+ execsql {INSERT INTO t2 VALUES(8,9)}
+ execsql {SELECT * FROM t2}
+} {8 9}
+do_test lock-1.14.1 {
+ catchsql {SELECT * FROM t2} db2
+} {1 {no such table: t2}}
+do_test lock-1.14.2 {
+ catchsql {SELECT * FROM t1} db2
+} {0 {2 1}}
+do_test lock-1.15 {
+ catchsql {SELECT * FROM t2} db2
+} {0 {8 9}}
+
+do_test lock-1.16 {
+ db eval {SELECT * FROM t1} qv {
+ set x [db eval {SELECT * FROM t1}]
+ }
+ set x
+} {2 1}
+do_test lock-1.17 {
+ db eval {SELECT * FROM t1} qv {
+ set x [db eval {SELECT * FROM t2}]
+ }
+ set x
+} {8 9}
+
+# You cannot UPDATE a table from within the callback of a SELECT
+# on that same table because the SELECT has the table locked.
+#
+do_test lock-1.18 {
+ db eval {SELECT * FROM t1} qv {
+ set r [catch {db eval {UPDATE t1 SET a=b, b=a}} msg]
+ lappend r $msg
+ }
+ set r
+} {1 {database table is locked}}
+
+# But you can UPDATE a different table from the one that is used in
+# the SELECT.
+#
+do_test lock-1.19 {
+ db eval {SELECT * FROM t1} qv {
+ set r [catch {db eval {UPDATE t2 SET x=y, y=x}} msg]
+ lappend r $msg
+ }
+ set r
+} {0 {}}
+do_test lock-1.20 {
+ execsql {SELECT * FROM t2}
+} {9 8}
+
+# It is possible to do a SELECT of the same table within the
+# callback of another SELECT on that same table because two
+# or more read-only cursors can be open at once.
+#
+do_test lock-1.21 {
+ db eval {SELECT * FROM t1} qv {
+ set r [catch {db eval {SELECT a FROM t1}} msg]
+ lappend r $msg
+ }
+ set r
+} {0 2}
+
+# Under UNIX you can do two SELECTs at once with different database
+# connections, because UNIX supports reader/writer locks. Under windows,
+# this is not possible.
+#
+if {$::tcl_platform(platform)=="unix"} {
+ do_test lock-1.22 {
+ db eval {SELECT * FROM t1} qv {
+ set r [catch {db2 eval {SELECT a FROM t1}} msg]
+ lappend r $msg
+ }
+ set r
+ } {0 2}
+}
+integrity_check lock-1.23
+
+# If one thread has a transaction another thread cannot start
+# a transaction.
+#
+do_test lock-2.1 {
+ execsql {BEGIN TRANSACTION}
+ set r [catch {execsql {BEGIN TRANSACTION} db2} msg]
+ lappend r $msg
+} {1 {database is locked}}
+
+# Nor can the other thread do a query.
+#
+do_test lock-2.2 {
+ set r [catch {execsql {SELECT * FROM t2} db2} msg]
+ lappend r $msg
+} {1 {database is locked}}
+
+# If the other thread (the one that does not hold the transaction)
+# tries to start a transaction, we get a busy callback.
+#
+do_test lock-2.3 {
+ proc callback {args} {
+ set ::callback_value $args
+ break
+ }
+ set ::callback_value {}
+ db2 busy callback
+ set r [catch {execsql {BEGIN TRANSACTION} db2} msg]
+ lappend r $msg
+ lappend r $::callback_value
+} {1 {database is locked} {{} 1}}
+do_test lock-2.4 {
+ proc callback {file count} {
+ lappend ::callback_value $count
+ if {$count>4} break
+ }
+ set ::callback_value {}
+ db2 busy callback
+ set r [catch {execsql {BEGIN TRANSACTION} db2} msg]
+ lappend r $msg
+ lappend r $::callback_value
+} {1 {database is locked} {1 2 3 4 5}}
+do_test lock-2.5 {
+ proc callback {file count} {
+ lappend ::callback_value $count
+ if {$count>4} break
+ }
+ set ::callback_value {}
+ db2 busy callback
+ set r [catch {execsql {SELECT * FROM t1} db2} msg]
+ lappend r $msg
+ lappend r $::callback_value
+} {1 {database is locked} {1 2 3 4 5}}
+
+# In this test, the 3rd invocation of the busy callback causes
+# the first thread to release its transaction. That allows the
+# second thread to continue.
+#
+do_test lock-2.6 {
+ proc callback {file count} {
+ lappend ::callback_value $count
+ if {$count>2} {
+ execsql {ROLLBACK}
+ }
+ }
+ set ::callback_value {}
+ db2 busy callback
+ set r [catch {execsql {SELECT * FROM t2} db2} msg]
+ lappend r $msg
+ lappend r $::callback_value
+} {0 {9 8} {1 2 3}}
+do_test lock-2.7 {
+ execsql {BEGIN TRANSACTION}
+ proc callback {file count} {
+ lappend ::callback_value $count
+ if {$count>2} {
+ execsql {ROLLBACK}
+ }
+ }
+ set ::callback_value {}
+ db2 busy callback
+ set r [catch {execsql {BEGIN TRANSACTION} db2} msg]
+ execsql {ROLLBACK} db2
+ lappend r $msg
+ lappend r $::callback_value
+} {0 {} {1 2 3}}
+
+# Test the built-in busy timeout handler
+#
+do_test lock-2.8 {
+ db2 timeout 400
+ execsql BEGIN
+ catchsql BEGIN db2
+} {1 {database is locked}}
+do_test lock-2.9 {
+ db2 timeout 0
+ execsql COMMIT
+} {}
+integrity_check lock-2.10
+
+# Try to start two transactions in a row
+#
+do_test lock-3.1 {
+ execsql {BEGIN TRANSACTION}
+ set r [catch {execsql {BEGIN TRANSACTION}} msg]
+ execsql {ROLLBACK}
+ lappend r $msg
+} {1 {cannot start a transaction within a transaction}}
+integrity_check lock-3.2
+
+# Make sure the busy handler and error messages work when
+# opening a new pointer to the database while another pointer
+# has the database locked.
+#
+do_test lock-4.1 {
+ db2 close
+ catch {db eval ROLLBACK}
+ db eval BEGIN
+ sqlite db2 ./test.db
+ set rc [catch {db2 eval {SELECT * FROM t1}} msg]
+ lappend rc $msg
+} {1 {database is locked}}
+do_test lock-4.2 {
+ set ::callback_value {}
+ set rc [catch {db2 eval {SELECT * FROM t1}} msg]
+ lappend rc $msg $::callback_value
+} {1 {database is locked} {}}
+do_test lock-4.3 {
+ proc callback {file count} {
+ lappend ::callback_value $count
+ if {$count>4} break
+ }
+ db2 busy callback
+ set rc [catch {db2 eval {SELECT * FROM t1}} msg]
+ lappend rc $msg $::callback_value
+} {1 {database is locked} {1 2 3 4 5}}
+execsql {ROLLBACK}
+
+# When one thread is writing, other threads cannot read. Except if the
+# writing thread is writing to its temporary tables, the other threads
+# can still read.
+#
+proc tx_exec {sql} {
+ db2 eval $sql
+}
+do_test lock-5.1 {
+ execsql {
+ SELECT * FROM t1
+ }
+} {2 1}
+do_test lock-5.2 {
+ db function tx_exec tx_exec
+ catchsql {
+ INSERT INTO t1(a,b) SELECT 3, tx_exec('SELECT y FROM t2 LIMIT 1');
+ }
+} {1 {database is locked}}
+do_test lock-5.3 {
+ execsql {
+ CREATE TEMP TABLE t3(x);
+ SELECT * FROM t3;
+ }
+} {}
+do_test lock-5.4 {
+ catchsql {
+ INSERT INTO t3 SELECT tx_exec('SELECT y FROM t2 LIMIT 1');
+ }
+} {0 {}}
+do_test lock-5.5 {
+ execsql {
+ SELECT * FROM t3;
+ }
+} {8}
+do_test lock-5.6 {
+ catchsql {
+ UPDATE t1 SET a=tx_exec('SELECT x FROM t2');
+ }
+} {1 {database is locked}}
+do_test lock-5.7 {
+ execsql {
+ SELECT * FROM t1;
+ }
+} {2 1}
+do_test lock-5.8 {
+ catchsql {
+ UPDATE t3 SET x=tx_exec('SELECT x FROM t2');
+ }
+} {0 {}}
+do_test lock-5.9 {
+ execsql {
+ SELECT * FROM t3;
+ }
+} {9}
+
+do_test lock-999.1 {
+ rename db2 {}
+} {}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/main.test b/usr/src/cmd/svc/configd/sqlite/test/main.test
new file mode 100644
index 0000000000..529e40ed61
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/main.test
@@ -0,0 +1,300 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is exercising the code in main.c.
+#
+# $Id: main.test,v 1.14 2003/05/04 17:58:27 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Tests of the sqlite_complete() function.
+#
+do_test main-1.1 {
+ db complete {This is a test}
+} {0}
+do_test main-1.2 {
+ db complete {
+ }
+} {1}
+do_test main-1.3 {
+ db complete {
+ -- a comment ;
+ }
+} {1}
+do_test main-1.4 {
+ db complete {
+ -- a comment ;
+ ;
+ }
+} {1}
+do_test main-1.5 {
+ db complete {DROP TABLE 'xyz;}
+} {0}
+do_test main-1.6 {
+ db complete {DROP TABLE 'xyz';}
+} {1}
+do_test main-1.7 {
+ db complete {DROP TABLE "xyz;}
+} {0}
+do_test main-1.8 {
+ db complete {DROP TABLE "xyz';}
+} {0}
+do_test main-1.9 {
+ db complete {DROP TABLE "xyz";}
+} {1}
+do_test main-1.10 {
+ db complete {DROP TABLE xyz; hi}
+} {0}
+do_test main-1.11 {
+ db complete {DROP TABLE xyz; }
+} {1}
+do_test main-1.12 {
+ db complete {DROP TABLE xyz; -- hi }
+} {1}
+do_test main-1.13 {
+ db complete {DROP TABLE xyz; -- hi
+ }
+} {1}
+do_test main-1.14 {
+ db complete {SELECT a-b FROM t1; }
+} {1}
+do_test main-1.15 {
+ db complete {SELECT a-b FROM t1 }
+} {0}
+do_test main-1.16 {
+ db complete {
+ CREATE TABLE abc(x,y);
+ }
+} {1}
+do_test main-1.17 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr;
+ }
+} {0}
+do_test main-1.18 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; END;
+ }
+} {1}
+do_test main-1.19 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE abc BEGIN
+ UPDATE pqr;
+ unknown command;
+ }
+} {0}
+do_test main-1.20 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr;
+ }
+} {0}
+do_test main-1.21 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE end BEGIN
+ SELECT a, b FROM end;
+ }
+} {0}
+do_test main-1.22 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE end BEGIN
+ SELECT a, b FROM end;
+ END;
+ }
+} {1}
+do_test main-1.23 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE end BEGIN
+ SELECT a, b FROM end;
+ END;
+ SELECT a, b FROM end;
+ }
+} {1}
+do_test main-1.24 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE [;end;] BEGIN
+ UPDATE pqr;
+ }
+} {0}
+do_test main-1.25 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=[;end;];;;
+ }
+} {0}
+do_test main-1.26 {
+ db complete {
+ CREATE -- a comment
+ TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {0}
+do_test main-1.27.1 {
+ db complete {
+ CREATE -- a comment
+ TRIGGERX xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {1}
+do_test main-1.27.2 {
+ db complete {
+ CREATE/**/TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {0}
+do_test main-1.27.3 {
+ db complete {
+ /* */ EXPLAIN -- A comment
+ CREATE/**/TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {0}
+do_test main-1.27.4 {
+ db complete {
+ BOGUS token
+ CREATE TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {1}
+do_test main-1.27.5 {
+ db complete {
+ EXPLAIN
+ CREATE TEMP TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {0}
+do_test main-1.28 {
+ db complete {
+ CREATE TEMP TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ }
+} {0}
+do_test main-1.29 {
+ db complete {
+ CREATE TRIGGER xyz AFTER DELETE backend BEGIN
+ UPDATE pqr SET a=5;
+ EXPLAIN select * from xyz;
+ }
+} {0}
+do_test main-1.30 {
+ db complete {
+ CREATE TABLE /* In comment ; */
+ }
+} {0}
+do_test main-1.31 {
+ db complete {
+ CREATE TABLE /* In comment ; */ hi;
+ }
+} {1}
+do_test main-1.31 {
+ db complete {
+ CREATE TABLE /* In comment ; */;
+ }
+} {1}
+do_test main-1.32 {
+ db complete {
+ stuff;
+ /*
+ CREATE TABLE
+ multiple lines
+ of text
+ */
+ }
+} {1}
+do_test main-1.33 {
+ db complete {
+ /*
+ CREATE TABLE
+ multiple lines
+ of text;
+ }
+} {0}
+do_test main-1.34 {
+ db complete {
+ /*
+ CREATE TABLE
+ multiple lines "*/
+ of text;
+ }
+} {1}
+do_test main-1.35 {
+ db complete {hi /**/ there;}
+} {1}
+do_test main-1.36 {
+ db complete {hi there/***/;}
+} {1}
+
+
+# Try to open a database with a corrupt database file.
+#
+do_test main-2.0 {
+ catch {db close}
+ file delete -force test.db
+ set fd [open test.db w]
+ puts $fd hi!
+ close $fd
+ set v [catch {sqlite db test.db} msg]
+ if {$v} {lappend v $msg} {lappend v {}}
+} {0 {}}
+
+# Here are some tests for tokenize.c.
+#
+do_test main-3.1 {
+ catch {db close}
+ foreach f [glob -nocomplain testdb/*] {file delete -force $f}
+ file delete -force testdb
+ sqlite db testdb
+ set v [catch {execsql {SELECT * from T1 where x!!5}} msg]
+ lappend v $msg
+} {1 {unrecognized token: "!!"}}
+do_test main-3.2 {
+ catch {db close}
+ foreach f [glob -nocomplain testdb/*] {file delete -force $f}
+ file delete -force testdb
+ sqlite db testdb
+ set v [catch {execsql {SELECT * from T1 where @x}} msg]
+ lappend v $msg
+} {1 {unrecognized token: "@"}}
+
+do_test main-3.3 {
+ catch {db close}
+ foreach f [glob -nocomplain testdb/*] {file delete -force $f}
+ file delete -force testdb
+ sqlite db testdb
+ execsql {
+ create table T1(X REAL);
+ insert into T1 values(0.5);
+ insert into T1 values(0.5e2);
+ insert into T1 values(0.5e-002);
+ insert into T1 values(5e-002);
+ insert into T1 values(-5.0e-2);
+ insert into T1 values(-5.1e-2);
+ insert into T1 values(0.5e2);
+ insert into T1 values(0.5E+02);
+ insert into T1 values(5E+02);
+ insert into T1 values(5.0E+03);
+ select x*10 from T1 order by x*5;
+ }
+} {-0.51 -0.5 0.05 0.5 5 500 500 500 5000 50000}
+do_test main-3.4 {
+ set v [catch {execsql {create bogus}} msg]
+ lappend v $msg
+} {1 {near "bogus": syntax error}}
+do_test main-3.5 {
+ set v [catch {execsql {create}} msg]
+ lappend v $msg
+} {1 {near "create": syntax error}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/malloc.test b/usr/src/cmd/svc/configd/sqlite/test/malloc.test
new file mode 100644
index 0000000000..613435d28a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/malloc.test
@@ -0,0 +1,228 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file attempts to check the library in an out-of-memory situation.
+# When compiled with -DMEMORY_DEBUG=1, the SQLite library accepts a special
+# command (sqlite_malloc_fail N) which causes the N-th malloc to fail. This
+# special feature is used to see what happens in the library if a malloc
+# were to really fail due to an out-of-memory situation.
+#
+# $Id: malloc.test,v 1.6 2004/02/14 01:39:50 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Only run these tests if memory debugging is turned on.
+#
+if {[info command sqlite_malloc_stat]==""} {
+ puts "Skipping malloc tests: not compiled with -DMEMORY_DEBUG..."
+ finish_test
+ return
+}
+
+for {set go 1; set i 1} {$go} {incr i} {
+ do_test malloc-1.$i {
+ sqlite_malloc_fail 0
+ catch {db close}
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite_malloc_fail $i
+ set v [catch {sqlite db test.db} msg]
+ if {$v} {
+ set msg ""
+ } else {
+ set v [catch {execsql {
+ CREATE TABLE t1(
+ a int, b float, c double, d text, e varchar(20),
+ primary key(a,b,c)
+ );
+ CREATE INDEX i1 ON t1(a,b);
+ INSERT INTO t1 VALUES(1,2.3,4.5,'hi','there');
+ INSERT INTO t1 VALUES(6,7.0,0.8,'hello','out yonder');
+ SELECT * FROM t1;
+ SELECT avg(b) FROM t1 GROUP BY a HAVING b>20.0;
+ DELETE FROM t1 WHERE a IN (SELECT min(a) FROM t1);
+ SELECT count(*) FROM t1;
+ }} msg]
+ }
+ set leftover [lindex [sqlite_malloc_stat] 2]
+ if {$leftover>0} {
+ if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"}
+ set ::go 0
+ set v {1 1}
+ } else {
+ set v2 [expr {$msg=="" || $msg=="out of memory"}]
+ if {!$v2} {puts "\nError message returned: $msg"}
+ lappend v $v2
+ }
+ } {1 1}
+}
+
+set fd [open ./data.tmp w]
+for {set i 1} {$i<=20} {incr i} {
+ puts $fd "$i\t[expr {$i*$i}]\t[expr {100-$i}] abcdefghijklmnopqrstuvwxyz"
+}
+close $fd
+
+for {set go 1; set i 1} {$go} {incr i} {
+ do_test malloc-2.$i {
+ sqlite_malloc_fail 0
+ catch {db close}
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite_malloc_fail $i
+ set v [catch {sqlite db test.db} msg]
+ if {$v} {
+ set msg ""
+ } else {
+ set v [catch {execsql {
+ CREATE TABLE t1(a int, b int, c int);
+ CREATE INDEX i1 ON t1(a,b);
+ COPY t1 FROM 'data.tmp';
+ SELECT 'stuff', count(*) as 'other stuff', max(a+10) FROM t1;
+ UPDATE t1 SET b=b||b||b||b;
+ UPDATE t1 SET b=a WHERE a in (10,12,22);
+ INSERT INTO t1(c,b,a) VALUES(20,10,5);
+ INSERT INTO t1 SELECT * FROM t1
+ WHERE a IN (SELECT a FROM t1 WHERE a<10);
+ DELETE FROM t1 WHERE a>=10;
+ DROP INDEX i1;
+ DELETE FROM t1;
+ }} msg]
+ }
+ set leftover [lindex [sqlite_malloc_stat] 2]
+ if {$leftover>0} {
+ if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"}
+ set ::go 0
+ set v {1 1}
+ } else {
+ set v2 [expr {$msg=="" || $msg=="out of memory"}]
+ if {!$v2} {puts "\nError message returned: $msg"}
+ lappend v $v2
+ }
+ } {1 1}
+}
+
+set fd [open ./data.tmp w]
+for {set i 1} {$i<=10} {incr i} {
+ puts $fd "$i\t[expr {$i*$i}]\t[expr {100-$i}]"
+}
+close $fd
+
+for {set go 1; set i 1} {$go} {incr i} {
+ do_test malloc-3.$i {
+ sqlite_malloc_fail 0
+ catch {db close}
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite_malloc_fail $i
+ set v [catch {sqlite db test.db} msg]
+ if {$v} {
+ set msg ""
+ } else {
+ set v [catch {execsql {
+ BEGIN TRANSACTION;
+ CREATE TABLE t1(a int, b int, c int);
+ CREATE INDEX i1 ON t1(a,b);
+ COPY t1 FROM 'data.tmp';
+ INSERT INTO t1(c,b,a) VALUES(20,10,5);
+ DELETE FROM t1 WHERE a>=10;
+ DROP INDEX i1;
+ DELETE FROM t1;
+ ROLLBACK;
+ }} msg]
+ }
+ set leftover [lindex [sqlite_malloc_stat] 2]
+ if {$leftover>0} {
+ if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"}
+ set ::go 0
+ set v {1 1}
+ } else {
+ set v2 [expr {$msg=="" || $msg=="out of memory"}]
+ if {!$v2} {puts "\nError message returned: $msg"}
+ lappend v $v2
+ }
+ } {1 1}
+}
+for {set go 1; set i 1} {$go} {incr i} {
+ do_test malloc-4.$i {
+ sqlite_malloc_fail 0
+ catch {db close}
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite_malloc_fail $i
+ set v [catch {sqlite db test.db} msg]
+ if {$v} {
+ set msg ""
+ } else {
+ set v [catch {execsql {
+ BEGIN TRANSACTION;
+ CREATE TABLE t1(a int, b int, c int);
+ CREATE INDEX i1 ON t1(a,b);
+ COPY t1 FROM 'data.tmp';
+ UPDATE t1 SET b=a WHERE a in (10,12,22);
+ INSERT INTO t1 SELECT * FROM t1
+ WHERE a IN (SELECT a FROM t1 WHERE a<10);
+ DROP INDEX i1;
+ DELETE FROM t1;
+ COMMIT;
+ }} msg]
+ }
+ set leftover [lindex [sqlite_malloc_stat] 2]
+ if {$leftover>0} {
+ if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"}
+ set ::go 0
+ set v {1 1}
+ } else {
+ set v2 [expr {$msg=="" || $msg=="out of memory"}]
+ if {!$v2} {puts "\nError message returned: $msg"}
+ lappend v $v2
+ }
+ } {1 1}
+}
+for {set go 1; set i 1} {$go} {incr i} {
+ do_test malloc-5.$i {
+ sqlite_malloc_fail 0
+ catch {db close}
+ catch {file delete -force test.db}
+ catch {file delete -force test.db-journal}
+ sqlite_malloc_fail $i
+ set v [catch {sqlite db test.db} msg]
+ if {$v} {
+ set msg ""
+ } else {
+ set v [catch {execsql {
+ BEGIN TRANSACTION;
+ CREATE TABLE t1(a,b);
+ CREATE TABLE t2(x,y);
+ CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN
+ INSERT INTO t2(x,y) VALUES(new.rowid,1);
+ END;
+ INSERT INTO t1(a,b) VALUES(2,3);
+ COMMIT;
+ }} msg]
+ }
+ set leftover [lindex [sqlite_malloc_stat] 2]
+ if {$leftover>0} {
+ if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"}
+ set ::go 0
+ set v {1 1}
+ } else {
+ set v2 [expr {$msg=="" || $msg=="out of memory"}]
+ if {!$v2} {puts "\nError message returned: $msg"}
+ lappend v $v2
+ }
+ } {1 1}
+}
+sqlite_malloc_fail 0
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/memdb.test b/usr/src/cmd/svc/configd/sqlite/test/memdb.test
new file mode 100644
index 0000000000..c70d60c4d3
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/memdb.test
@@ -0,0 +1,399 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is in-memory database backend.
+#
+# $Id: memdb.test,v 1.6 2003/08/05 13:13:39 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# In the following sequence of tests, compute the MD5 sum of the content
+# of a table, make lots of modifications to that table, then do a rollback.
+# Verify that after the rollback, the MD5 checksum is unchanged.
+#
+# These tests were browed from trans.tcl.
+#
+do_test memdb-1.1 {
+ db close
+ sqlite db :memory:
+ # sqlite db test.db
+ execsql {
+ BEGIN;
+ CREATE TABLE t3(x TEXT);
+ INSERT INTO t3 VALUES(randstr(10,400));
+ INSERT INTO t3 VALUES(randstr(10,400));
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ COMMIT;
+ SELECT count(*) FROM t3;
+ }
+} {1024}
+
+# The following procedure computes a "signature" for table "t3". If
+# T3 changes in any way, the signature should change.
+#
+# This is used to test ROLLBACK. We gather a signature for t3, then
+# make lots of changes to t3, then rollback and take another signature.
+# The two signatures should be the same.
+#
+proc signature {{fn {}}} {
+ set rx [db eval {SELECT x FROM t3}]
+ # set r1 [md5 $rx\n]
+ if {$fn!=""} {
+ # set fd [open $fn w]
+ # puts $fd $rx
+ # close $fd
+ }
+ # set r [db eval {SELECT count(*), md5sum(x) FROM t3}]
+ # puts "SIG($fn)=$r1"
+ return [list [string length $rx] $rx]
+}
+
+# Do rollbacks. Make sure the signature does not change.
+#
+set limit 10
+for {set i 2} {$i<=$limit} {incr i} {
+ set ::sig [signature one]
+ # puts "sig=$sig"
+ set cnt [lindex $::sig 0]
+ set ::journal_format [expr {($i%3)+1}]
+ if {$i%2==0} {
+ execsql {PRAGMA synchronous=FULL}
+ } else {
+ execsql {PRAGMA synchronous=NORMAL}
+ }
+ do_test memdb-1.$i.1-$cnt {
+ execsql {
+ BEGIN;
+ DELETE FROM t3 WHERE random()%10!=0;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ ROLLBACK;
+ }
+ set sig2 [signature two]
+ } $sig
+ # puts "sig2=$sig2"
+ # if {$sig2!=$sig} exit
+ do_test memdb-1.$i.2-$cnt {
+ execsql {
+ BEGIN;
+ DELETE FROM t3 WHERE random()%10!=0;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ DELETE FROM t3 WHERE random()%10!=0;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ ROLLBACK;
+ }
+ signature
+ } $sig
+ if {$i<$limit} {
+ do_test memdb-1.$i.9-$cnt {
+ execsql {
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0;
+ }
+ } {}
+ }
+ set ::pager_old_format 0
+}
+
+do_test memdb-2.1 {
+ execsql {
+ PRAGMA integrity_check
+ }
+} {ok}
+
+do_test memdb-3.1 {
+ execsql {
+ CREATE TABLE t4(a,b,c,d);
+ BEGIN;
+ INSERT INTO t4 VALUES(1,2,3,4);
+ SELECT * FROM t4;
+ }
+} {1 2 3 4}
+do_test memdb-3.2 {
+ execsql {
+ SELECT name FROM sqlite_master WHERE type='table';
+ }
+} {t3 t4}
+do_test memdb-3.3 {
+ execsql {
+ DROP TABLE t4;
+ SELECT name FROM sqlite_master WHERE type='table';
+ }
+} {t3}
+do_test memdb-3.4 {
+ execsql {
+ ROLLBACK;
+ SELECT name FROM sqlite_master WHERE type='table';
+ }
+} {t3 t4}
+
+# Create tables for the first group of tests.
+#
+do_test memdb-4.0 {
+ execsql {
+ CREATE TABLE t1(a, b, c, UNIQUE(a,b));
+ CREATE TABLE t2(x);
+ SELECT c FROM t1 ORDER BY c;
+ }
+} {}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf The conflict resolution algorithm on the BEGIN statement
+# cmd An INSERT or REPLACE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "c" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t2
+#
+foreach {i conf cmd t0 t1 t2} {
+ 1 {} INSERT 1 {} 1
+ 2 {} {INSERT OR IGNORE} 0 3 1
+ 3 {} {INSERT OR REPLACE} 0 4 1
+ 4 {} REPLACE 0 4 1
+ 5 {} {INSERT OR FAIL} 1 {} 1
+ 6 {} {INSERT OR ABORT} 1 {} 1
+ 7 {} {INSERT OR ROLLBACK} 1 {} {}
+ 8 IGNORE INSERT 0 3 1
+ 9 IGNORE {INSERT OR IGNORE} 0 3 1
+ 10 IGNORE {INSERT OR REPLACE} 0 4 1
+ 11 IGNORE REPLACE 0 4 1
+ 12 IGNORE {INSERT OR FAIL} 1 {} 1
+ 13 IGNORE {INSERT OR ABORT} 1 {} 1
+ 14 IGNORE {INSERT OR ROLLBACK} 1 {} {}
+ 15 REPLACE INSERT 0 4 1
+ 16 FAIL INSERT 1 {} 1
+ 17 ABORT INSERT 1 {} 1
+ 18 ROLLBACK INSERT 1 {} {}
+} {
+ do_test memdb-4.$i {
+ if {$conf!=""} {set conf "ON CONFLICT $conf"}
+ set r0 [catch {execsql [subst {
+ DELETE FROM t1;
+ DELETE FROM t2;
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN $conf;
+ INSERT INTO t2 VALUES(1);
+ $cmd INTO t1 VALUES(1,2,4);
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]}
+ set r2 [execsql {SELECT x FROM t2}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+do_test memdb-5.0 {
+ execsql {
+ DROP TABLE t2;
+ DROP TABLE t3;
+ CREATE TABLE t2(a,b,c);
+ INSERT INTO t2 VALUES(1,2,1);
+ INSERT INTO t2 VALUES(2,3,2);
+ INSERT INTO t2 VALUES(3,4,1);
+ INSERT INTO t2 VALUES(4,5,4);
+ SELECT c FROM t2 ORDER BY b;
+ CREATE TABLE t3(x);
+ INSERT INTO t3 VALUES(1);
+ }
+} {1 2 1 4}
+
+# Six columns of configuration data as follows:
+#
+# i The reference number of the test
+# conf1 The conflict resolution algorithm on the UNIQUE constraint
+# conf2 The conflict resolution algorithm on the BEGIN statement
+# cmd An UPDATE command to execute against table t1
+# t0 True if there is an error from $cmd
+# t1 Content of "b" column of t1 assuming no error in $cmd
+# t2 Content of "x" column of t3
+#
+foreach {i conf1 conf2 cmd t0 t1 t2} {
+ 1 {} {} UPDATE 1 {6 7 8 9} 1
+ 2 REPLACE {} UPDATE 0 {7 6 9} 1
+ 3 IGNORE {} UPDATE 0 {6 7 3 9} 1
+ 4 FAIL {} UPDATE 1 {6 7 3 4} 1
+ 5 ABORT {} UPDATE 1 {1 2 3 4} 1
+ 6 ROLLBACK {} UPDATE 1 {1 2 3 4} 0
+ 7 REPLACE {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 8 IGNORE {} {UPDATE OR REPLACE} 0 {7 6 9} 1
+ 9 FAIL {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 10 ABORT {} {UPDATE OR REPLACE} 0 {7 6 9} 1
+ 11 ROLLBACK {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 12 {} {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1
+ 13 {} {} {UPDATE OR REPLACE} 0 {7 6 9} 1
+ 14 {} {} {UPDATE OR FAIL} 1 {6 7 3 4} 1
+ 15 {} {} {UPDATE OR ABORT} 1 {1 2 3 4} 1
+ 16 {} {} {UPDATE OR ROLLBACK} 1 {1 2 3 4} 0
+ 17 {} IGNORE UPDATE 0 {6 7 3 9} 1
+ 18 {} REPLACE UPDATE 0 {7 6 9} 1
+ 19 {} FAIL UPDATE 1 {6 7 3 4} 1
+ 20 {} ABORT UPDATE 1 {1 2 3 4} 1
+ 21 {} ROLLBACK UPDATE 1 {1 2 3 4} 0
+ 22 REPLACE IGNORE UPDATE 0 {6 7 3 9} 1
+ 23 IGNORE REPLACE UPDATE 0 {7 6 9} 1
+ 24 REPLACE FAIL UPDATE 1 {6 7 3 4} 1
+ 25 IGNORE ABORT UPDATE 1 {1 2 3 4} 1
+ 26 REPLACE ROLLBACK UPDATE 1 {1 2 3 4} 0
+} {
+ if {$t0} {set t1 {column a is not unique}}
+ do_test memdb-5.$i {
+ if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"}
+ if {$conf2!=""} {set conf2 "ON CONFLICT $conf2"}
+ set r0 [catch {execsql [subst {
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c, UNIQUE(a) $conf1);
+ INSERT INTO t1 SELECT * FROM t2;
+ UPDATE t3 SET x=0;
+ BEGIN $conf2;
+ $cmd t3 SET x=1;
+ $cmd t1 SET b=b*2;
+ $cmd t1 SET a=c+5;
+ }]} r1]
+ catch {execsql {COMMIT}}
+ if {!$r0} {set r1 [execsql {SELECT a FROM t1 ORDER BY b}]}
+ set r2 [execsql {SELECT x FROM t3}]
+ list $r0 $r1 $r2
+ } [list $t0 $t1 $t2]
+}
+
+do_test memdb-6.1 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {1 2 1 2 3 2 3 4 1 4 5 4}
+do_test memdb-6.2 {
+ execsql {
+ BEGIN;
+ DROP TABLE t2;
+ SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1;
+ }
+} {t1 t3 t4}
+do_test memdb-6.3 {
+ execsql {
+ ROLLBACK;
+ SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1;
+ }
+} {t1 t2 t3 t4}
+do_test memdb-6.4 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {1 2 1 2 3 2 3 4 1 4 5 4}
+do_test memdb-6.5 {
+ execsql {
+ SELECT a FROM t2 UNION SELECT b FROM t2 ORDER BY 1;
+ }
+} {1 2 3 4 5}
+do_test memdb-6.6 {
+ execsql {
+ CREATE INDEX i2 ON t2(c);
+ SELECT a FROM t2 ORDER BY c;
+ }
+} {1 3 2 4}
+do_test memdb-6.6 {
+ execsql {
+ SELECT a FROM t2 ORDER BY c DESC;
+ }
+} {4 2 3 1}
+do_test memdb-6.7 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t5(x,y);
+ INSERT INTO t5 VALUES(1,2);
+ SELECT * FROM t5;
+ }
+} {1 2}
+do_test memdb-6.8 {
+ execsql {
+ SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1;
+ }
+} {t1 t2 t3 t4 t5}
+do_test memdb-6.9 {
+ execsql {
+ ROLLBACK;
+ SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1;
+ }
+} {t1 t2 t3 t4}
+do_test memdb-6.10 {
+ execsql {
+ CREATE TABLE t5(x PRIMARY KEY, y UNIQUE);
+ SELECT * FROM t5;
+ }
+} {}
+do_test memdb-6.11 {
+ execsql {
+ SELECT * FROM t5 ORDER BY y DESC;
+ }
+} {}
+do_test memdb-6.12 {
+ execsql {
+ INSERT INTO t5 VALUES(1,2);
+ INSERT INTO t5 VALUES(3,4);
+ REPLACE INTO t5 VALUES(1,4);
+ SELECT rowid,* FROM t5;
+ }
+} {3 1 4}
+do_test memdb-6.13 {
+ execsql {
+ DELETE FROM t5 WHERE x>5;
+ SELECT * FROM t5;
+ }
+} {1 4}
+do_test memdb-6.14 {
+ execsql {
+ DELETE FROM t5 WHERE y<3;
+ SELECT * FROM t5;
+ }
+} {1 4}
+do_test memdb-6.15 {
+ execsql {
+ DELETE FROM t5 WHERE x>0;
+ SELECT * FROM t5;
+ }
+} {}
+
+do_test memdb-7.1 {
+ execsql {
+ CREATE TABLE t6(x);
+ INSERT INTO t6 VALUES(1);
+ INSERT INTO t6 SELECT x+1 FROM t6;
+ INSERT INTO t6 SELECT x+2 FROM t6;
+ INSERT INTO t6 SELECT x+4 FROM t6;
+ INSERT INTO t6 SELECT x+8 FROM t6;
+ INSERT INTO t6 SELECT x+16 FROM t6;
+ INSERT INTO t6 SELECT x+32 FROM t6;
+ INSERT INTO t6 SELECT x+64 FROM t6;
+ INSERT INTO t6 SELECT x+128 FROM t6;
+ SELECT count(*) FROM (SELECT DISTINCT x FROM t6);
+ }
+} {256}
+for {set i 1} {$i<=256} {incr i} {
+ do_test memdb-7.2.$i {
+ execsql "DELETE FROM t6 WHERE x=\
+ (SELECT x FROM t6 ORDER BY random() LIMIT 1)"
+ execsql {SELECT count(*) FROM t6}
+ } [expr {256-$i}]
+}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/memleak.test b/usr/src/cmd/svc/configd/sqlite/test/memleak.test
new file mode 100644
index 0000000000..96c7783481
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/memleak.test
@@ -0,0 +1,94 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file runs all tests.
+#
+# $Id: memleak.test,v 1.3 2004/02/12 18:46:39 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+rename finish_test really_finish_test
+proc finish_test {} {
+ catch {db close}
+ memleak_check
+}
+
+if {[file exists ./sqlite_test_count]} {
+ set COUNT [exec cat ./sqlite_test_count]
+} else {
+ set COUNT 3
+}
+
+# LeakList will hold a list of the number of unfreed mallocs after
+# each round of the test. This number should be constant. If it
+# grows, it may mean there is a memory leak in the library.
+#
+set LeakList {}
+
+set EXCLUDE {
+ all.test
+ quick.test
+ malloc.test
+ misuse.test
+ memleak.test
+ btree2.test
+ trans.test
+}
+if {[sqlite -has-codec]} {
+ lappend EXCLUDE \
+ attach.test \
+ attach2.test \
+ auth.test \
+ format3.test \
+ version.test
+}
+if {[llength $argv]>0} {
+ set FILELIST $argv
+ set argv {}
+} else {
+ set FILELIST [lsort -dictionary [glob $testdir/*.test]]
+}
+
+foreach testfile $FILELIST {
+ set tail [file tail $testfile]
+ if {[lsearch -exact $EXCLUDE $tail]>=0} continue
+ set LeakList {}
+ for {set COUNTER 0} {$COUNTER<$COUNT} {incr COUNTER} {
+ source $testfile
+ if {[info exists Leak]} {
+ lappend LeakList $Leak
+ }
+ }
+ if {$LeakList!=""} {
+ puts -nonewline memory-leak-test-$tail...
+ incr ::nTest
+ foreach x $LeakList {
+ if {$x!=[lindex $LeakList 0]} {
+ puts " failed! ($LeakList)"
+ incr ::nErr
+ lappend ::failList memory-leak-test-$tail
+ break
+ }
+ }
+ puts " Ok"
+ }
+}
+really_finish_test
+
+# Run the malloc tests and the misuse test after memory leak detection.
+# Both tests leak memory.
+#
+#catch {source $testdir/misuse.test}
+#catch {source $testdir/malloc.test}
+
+really_finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/minmax.test b/usr/src/cmd/svc/configd/sqlite/test/minmax.test
new file mode 100644
index 0000000000..8235983f0d
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/minmax.test
@@ -0,0 +1,362 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing SELECT statements that contain
+# aggregate min() and max() functions and which are handled as
+# as a special case.
+#
+# $Id: minmax.test,v 1.9.2.2 2004/07/18 21:14:05 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test minmax-1.0 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(x, y);
+ INSERT INTO t1 VALUES(1,1);
+ INSERT INTO t1 VALUES(2,2);
+ INSERT INTO t1 VALUES(3,2);
+ INSERT INTO t1 VALUES(4,3);
+ INSERT INTO t1 VALUES(5,3);
+ INSERT INTO t1 VALUES(6,3);
+ INSERT INTO t1 VALUES(7,3);
+ INSERT INTO t1 VALUES(8,4);
+ INSERT INTO t1 VALUES(9,4);
+ INSERT INTO t1 VALUES(10,4);
+ INSERT INTO t1 VALUES(11,4);
+ INSERT INTO t1 VALUES(12,4);
+ INSERT INTO t1 VALUES(13,4);
+ INSERT INTO t1 VALUES(14,4);
+ INSERT INTO t1 VALUES(15,4);
+ INSERT INTO t1 VALUES(16,5);
+ INSERT INTO t1 VALUES(17,5);
+ INSERT INTO t1 VALUES(18,5);
+ INSERT INTO t1 VALUES(19,5);
+ INSERT INTO t1 VALUES(20,5);
+ COMMIT;
+ SELECT DISTINCT y FROM t1 ORDER BY y;
+ }
+} {1 2 3 4 5}
+
+do_test minmax-1.1 {
+ set sqlite_search_count 0
+ execsql {SELECT min(x) FROM t1}
+} {1}
+do_test minmax-1.2 {
+ set sqlite_search_count
+} {19}
+do_test minmax-1.3 {
+ set sqlite_search_count 0
+ execsql {SELECT max(x) FROM t1}
+} {20}
+do_test minmax-1.4 {
+ set sqlite_search_count
+} {19}
+do_test minmax-1.5 {
+ execsql {CREATE INDEX t1i1 ON t1(x)}
+ set sqlite_search_count 0
+ execsql {SELECT min(x) FROM t1}
+} {1}
+do_test minmax-1.6 {
+ set sqlite_search_count
+} {2}
+do_test minmax-1.7 {
+ set sqlite_search_count 0
+ execsql {SELECT max(x) FROM t1}
+} {20}
+do_test minmax-1.8 {
+ set sqlite_search_count
+} {1}
+do_test minmax-1.9 {
+ set sqlite_search_count 0
+ execsql {SELECT max(y) FROM t1}
+} {5}
+do_test minmax-1.10 {
+ set sqlite_search_count
+} {19}
+
+do_test minmax-2.0 {
+ execsql {
+ CREATE TABLE t2(a INTEGER PRIMARY KEY, b);
+ INSERT INTO t2 SELECT * FROM t1;
+ }
+ set sqlite_search_count 0
+ execsql {SELECT min(a) FROM t2}
+} {1}
+do_test minmax-2.1 {
+ set sqlite_search_count
+} {0}
+do_test minmax-2.2 {
+ set sqlite_search_count 0
+ execsql {SELECT max(a) FROM t2}
+} {20}
+do_test minmax-2.3 {
+ set sqlite_search_count
+} {0}
+
+do_test minmax-3.0 {
+ execsql {INSERT INTO t2 VALUES((SELECT max(a) FROM t2)+1,999)}
+ set sqlite_search_count 0
+ execsql {SELECT max(a) FROM t2}
+} {21}
+do_test minmax-3.1 {
+ set sqlite_search_count
+} {0}
+do_test minmax-3.2 {
+ execsql {INSERT INTO t2 VALUES((SELECT max(a) FROM t2)+1,999)}
+ set sqlite_search_count 0
+ execsql {
+ SELECT b FROM t2 WHERE a=(SELECT max(a) FROM t2)
+ }
+} {999}
+do_test minmax-3.3 {
+ set sqlite_search_count
+} {0}
+
+do_test minmax-4.1 {
+ execsql {
+ SELECT coalesce(min(x+0),-1), coalesce(max(x+0),-1) FROM
+ (SELECT * FROM t1 UNION SELECT NULL as 'x', NULL as 'y')
+ }
+} {1 20}
+do_test minmax-4.2 {
+ execsql {
+ SELECT y, sum(x) FROM
+ (SELECT null, y+1 FROM t1 UNION SELECT * FROM t1)
+ GROUP BY y ORDER BY y;
+ }
+} {1 1 2 5 3 22 4 92 5 90 6 0}
+do_test minmax-4.3 {
+ execsql {
+ SELECT y, count(x), count(*) FROM
+ (SELECT null, y+1 FROM t1 UNION SELECT * FROM t1)
+ GROUP BY y ORDER BY y;
+ }
+} {1 1 1 2 2 3 3 4 5 4 8 9 5 5 6 6 0 1}
+
+# Make sure the min(x) and max(x) optimizations work on empty tables
+# including empty tables with indices. Ticket #296.
+#
+do_test minmax-5.1 {
+ execsql {
+ CREATE TABLE t3(x INTEGER UNIQUE NOT NULL);
+ SELECT coalesce(min(x),999) FROM t3;
+ }
+} {999}
+do_test minmax-5.2 {
+ execsql {
+ SELECT coalesce(min(rowid),999) FROM t3;
+ }
+} {999}
+do_test minmax-5.3 {
+ execsql {
+ SELECT coalesce(max(x),999) FROM t3;
+ }
+} {999}
+do_test minmax-5.4 {
+ execsql {
+ SELECT coalesce(max(rowid),999) FROM t3;
+ }
+} {999}
+do_test minmax-5.5 {
+ execsql {
+ SELECT coalesce(max(rowid),999) FROM t3 WHERE rowid<25;
+ }
+} {999}
+
+# Make sure the min(x) and max(x) optimizations work when there
+# is a LIMIT clause. Ticket #396.
+#
+do_test minmax-6.1 {
+ execsql {
+ SELECT min(a) FROM t2 LIMIT 1
+ }
+} {1}
+do_test minmax-6.2 {
+ execsql {
+ SELECT max(a) FROM t2 LIMIT 3
+ }
+} {22}
+do_test minmax-6.3 {
+ execsql {
+ SELECT min(a) FROM t2 LIMIT 0,100
+ }
+} {1}
+do_test minmax-6.4 {
+ execsql {
+ SELECT max(a) FROM t2 LIMIT 1,100
+ }
+} {}
+do_test minmax-6.5 {
+ execsql {
+ SELECT min(x) FROM t3 LIMIT 1
+ }
+} {{}}
+do_test minmax-6.6 {
+ execsql {
+ SELECT max(x) FROM t3 LIMIT 0
+ }
+} {}
+do_test minmax-6.7 {
+ execsql {
+ SELECT max(a) FROM t2 LIMIT 0
+ }
+} {}
+
+# Make sure the max(x) and min(x) optimizations work for nested
+# queries. Ticket #587.
+#
+do_test minmax-7.1 {
+ execsql {
+ SELECT max(x) FROM t1;
+ }
+} 20
+do_test minmax-7.2 {
+ execsql {
+ SELECT * FROM (SELECT max(x) FROM t1);
+ }
+} 20
+do_test minmax-7.3 {
+ execsql {
+ SELECT min(x) FROM t1;
+ }
+} 1
+do_test minmax-7.4 {
+ execsql {
+ SELECT * FROM (SELECT min(x) FROM t1);
+ }
+} 1
+
+# Make sure min(x) and max(x) work correctly when the datatype is
+# TEXT instead of NUMERIC. Ticket #623.
+#
+do_test minmax-8.1 {
+ execsql {
+ CREATE TABLE t4(a TEXT);
+ INSERT INTO t4 VALUES('1234');
+ INSERT INTO t4 VALUES('234');
+ INSERT INTO t4 VALUES('34');
+ SELECT min(a), max(a) FROM t4;
+ }
+} {1234 34}
+do_test minmax-8.2 {
+ execsql {
+ CREATE TABLE t5(a INTEGER);
+ INSERT INTO t5 VALUES('1234');
+ INSERT INTO t5 VALUES('234');
+ INSERT INTO t5 VALUES('34');
+ SELECT min(a), max(a) FROM t5;
+ }
+} {34 1234}
+
+# Ticket #658: Test the min()/max() optimization when the FROM clause
+# is a subquery.
+#
+do_test minmax-9.1 {
+ execsql {
+ SELECT max(rowid) FROM (
+ SELECT max(rowid) FROM t4 UNION SELECT max(rowid) FROM t5
+ )
+ }
+} {1}
+do_test minmax-9.2 {
+ execsql {
+ SELECT max(rowid) FROM (
+ SELECT max(rowid) FROM t4 EXCEPT SELECT max(rowid) FROM t5
+ )
+ }
+} {{}}
+
+# If there is a NULL in an aggregate max() or min(), ignore it. An
+# aggregate min() or max() will only return NULL if all values are NULL.
+#
+do_test minmax-10.1 {
+ execsql {
+ CREATE TABLE t6(x);
+ INSERT INTO t6 VALUES(1);
+ INSERT INTO t6 VALUES(2);
+ INSERT INTO t6 VALUES(NULL);
+ SELECT coalesce(min(x),-1) FROM t6;
+ }
+} {1}
+do_test minmax-10.2 {
+ execsql {
+ SELECT max(x) FROM t6;
+ }
+} {2}
+do_test minmax-10.3 {
+ execsql {
+ CREATE INDEX i6 ON t6(x);
+ SELECT coalesce(min(x),-1) FROM t6;
+ }
+} {1}
+do_test minmax-10.4 {
+ execsql {
+ SELECT max(x) FROM t6;
+ }
+} {2}
+do_test minmax-10.5 {
+ execsql {
+ DELETE FROM t6 WHERE x NOT NULL;
+ SELECT count(*) FROM t6;
+ }
+} 1
+do_test minmax-10.6 {
+ execsql {
+ SELECT count(x) FROM t6;
+ }
+} 0
+do_test minmax-10.7 {
+ execsql {
+ SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6);
+ }
+} {{} {}}
+do_test minmax-10.8 {
+ execsql {
+ SELECT min(x), max(x) FROM t6;
+ }
+} {{} {}}
+do_test minmax-10.9 {
+ execsql {
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ INSERT INTO t6 SELECT * FROM t6;
+ SELECT count(*) FROM t6;
+ }
+} 1024
+do_test minmax-10.10 {
+ execsql {
+ SELECT count(x) FROM t6;
+ }
+} 0
+do_test minmax-10.11 {
+ execsql {
+ SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6);
+ }
+} {{} {}}
+do_test minmax-10.12 {
+ execsql {
+ SELECT min(x), max(x) FROM t6;
+ }
+} {{} {}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/misc1.test b/usr/src/cmd/svc/configd/sqlite/test/misc1.test
new file mode 100644
index 0000000000..9e75a087b2
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/misc1.test
@@ -0,0 +1,543 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for miscellanous features that were
+# left out of other test files.
+#
+# $Id: misc1.test,v 1.23 2003/08/05 13:13:39 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Test the creation and use of tables that have a large number
+# of columns.
+#
+do_test misc1-1.1 {
+ set cmd "CREATE TABLE manycol(x0 text"
+ for {set i 1} {$i<=99} {incr i} {
+ append cmd ",x$i text"
+ }
+ append cmd ")";
+ execsql $cmd
+ set cmd "INSERT INTO manycol VALUES(0"
+ for {set i 1} {$i<=99} {incr i} {
+ append cmd ",$i"
+ }
+ append cmd ")";
+ execsql $cmd
+ execsql "SELECT x99 FROM manycol"
+} 99
+do_test misc1-1.2 {
+ execsql {SELECT x0, x10, x25, x50, x75 FROM manycol}
+} {0 10 25 50 75}
+do_test misc1-1.3.1 {
+ for {set j 100} {$j<=1000} {incr j 100} {
+ set cmd "INSERT INTO manycol VALUES($j"
+ for {set i 1} {$i<=99} {incr i} {
+ append cmd ",[expr {$i+$j}]"
+ }
+ append cmd ")"
+ execsql $cmd
+ }
+ execsql {SELECT x50 FROM manycol ORDER BY x80+0}
+} {50 150 250 350 450 550 650 750 850 950 1050}
+do_test misc1-1.3.2 {
+ execsql {SELECT x50 FROM manycol ORDER BY x80}
+} {1050 150 250 350 450 550 650 750 50 850 950}
+do_test misc1-1.4 {
+ execsql {SELECT x75 FROM manycol WHERE x50=350}
+} 375
+do_test misc1-1.5 {
+ execsql {SELECT x50 FROM manycol WHERE x99=599}
+} 550
+do_test misc1-1.6 {
+ execsql {CREATE INDEX manycol_idx1 ON manycol(x99)}
+ execsql {SELECT x50 FROM manycol WHERE x99=899}
+} 850
+do_test misc1-1.7 {
+ execsql {SELECT count(*) FROM manycol}
+} 11
+do_test misc1-1.8 {
+ execsql {DELETE FROM manycol WHERE x98=1234}
+ execsql {SELECT count(*) FROM manycol}
+} 11
+do_test misc1-1.9 {
+ execsql {DELETE FROM manycol WHERE x98=998}
+ execsql {SELECT count(*) FROM manycol}
+} 10
+do_test misc1-1.10 {
+ execsql {DELETE FROM manycol WHERE x99=500}
+ execsql {SELECT count(*) FROM manycol}
+} 10
+do_test misc1-1.11 {
+ execsql {DELETE FROM manycol WHERE x99=599}
+ execsql {SELECT count(*) FROM manycol}
+} 9
+
+# Check GROUP BY expressions that name two or more columns.
+#
+do_test misc1-2.1 {
+ execsql {
+ BEGIN TRANSACTION;
+ CREATE TABLE agger(one text, two text, three text, four text);
+ INSERT INTO agger VALUES(1, 'one', 'hello', 'yes');
+ INSERT INTO agger VALUES(2, 'two', 'howdy', 'no');
+ INSERT INTO agger VALUES(3, 'thr', 'howareya', 'yes');
+ INSERT INTO agger VALUES(4, 'two', 'lothere', 'yes');
+ INSERT INTO agger VALUES(5, 'one', 'atcha', 'yes');
+ INSERT INTO agger VALUES(6, 'two', 'hello', 'no');
+ COMMIT
+ }
+ execsql {SELECT count(*) FROM agger}
+} 6
+do_test misc1-2.2 {
+ execsql {SELECT sum(one), two, four FROM agger
+ GROUP BY two, four ORDER BY sum(one) desc}
+} {8 two no 6 one yes 4 two yes 3 thr yes}
+do_test misc1-2.3 {
+ execsql {SELECT sum((one)), (two), (four) FROM agger
+ GROUP BY (two), (four) ORDER BY sum(one) desc}
+} {8 two no 6 one yes 4 two yes 3 thr yes}
+
+# Here's a test for a bug found by Joel Lucsy. The code below
+# was causing an assertion failure.
+#
+do_test misc1-3.1 {
+ set r [execsql {
+ CREATE TABLE t1(a);
+ INSERT INTO t1 VALUES('hi');
+ PRAGMA full_column_names=on;
+ SELECT rowid, * FROM t1;
+ }]
+ lindex $r 1
+} {hi}
+
+# Here's a test for yet another bug found by Joel Lucsy. The code
+# below was causing an assertion failure.
+#
+do_test misc1-4.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t2(a);
+ INSERT INTO t2 VALUES('This is a long string to use up a lot of disk -');
+ UPDATE t2 SET a=a||a||a||a;
+ INSERT INTO t2 SELECT '1 - ' || a FROM t2;
+ INSERT INTO t2 SELECT '2 - ' || a FROM t2;
+ INSERT INTO t2 SELECT '3 - ' || a FROM t2;
+ INSERT INTO t2 SELECT '4 - ' || a FROM t2;
+ INSERT INTO t2 SELECT '5 - ' || a FROM t2;
+ INSERT INTO t2 SELECT '6 - ' || a FROM t2;
+ COMMIT;
+ SELECT count(*) FROM t2;
+ }
+} {64}
+
+# Make sure we actually see a semicolon or end-of-file in the SQL input
+# before executing a command. Thus if "WHERE" is misspelled on an UPDATE,
+# the user won't accidently update every record.
+#
+do_test misc1-5.1 {
+ catchsql {
+ CREATE TABLE t3(a,b);
+ INSERT INTO t3 VALUES(1,2);
+ INSERT INTO t3 VALUES(3,4);
+ UPDATE t3 SET a=0 WHEREwww b=2;
+ }
+} {1 {near "WHEREwww": syntax error}}
+do_test misc1-5.2 {
+ execsql {
+ SELECT * FROM t3 ORDER BY a;
+ }
+} {1 2 3 4}
+
+# Certain keywords (especially non-standard keywords like "REPLACE") can
+# also be used as identifiers. The way this works in the parser is that
+# the parser first detects a syntax error, the error handling routine
+# sees that the special keyword caused the error, then replaces the keyword
+# with "ID" and tries again.
+#
+# Check the operation of this logic.
+#
+do_test misc1-6.1 {
+ catchsql {
+ CREATE TABLE t4(
+ abort, asc, begin, cluster, conflict, copy, delimiters, desc, end,
+ explain, fail, ignore, key, offset, pragma, replace, temp,
+ vacuum, view
+ );
+ }
+} {0 {}}
+do_test misc1-6.2 {
+ catchsql {
+ INSERT INTO t4
+ VALUES(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19);
+ }
+} {0 {}}
+do_test misc1-6.3 {
+ execsql {
+ SELECT * FROM t4
+ }
+} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19}
+do_test misc1-6.4 {
+ execsql {
+ SELECT abort+asc,max(key,pragma,temp) FROM t4
+ }
+} {3 17}
+
+# Test for multi-column primary keys, and for multiple primary keys.
+#
+do_test misc1-7.1 {
+ catchsql {
+ CREATE TABLE error1(
+ a TYPE PRIMARY KEY,
+ b TYPE PRIMARY KEY
+ );
+ }
+} {1 {table "error1" has more than one primary key}}
+do_test misc1-7.2 {
+ catchsql {
+ CREATE TABLE error1(
+ a INTEGER PRIMARY KEY,
+ b TYPE PRIMARY KEY
+ );
+ }
+} {1 {table "error1" has more than one primary key}}
+do_test misc1-7.3 {
+ execsql {
+ CREATE TABLE t5(a,b,c,PRIMARY KEY(a,b));
+ INSERT INTO t5 VALUES(1,2,3);
+ SELECT * FROM t5 ORDER BY a;
+ }
+} {1 2 3}
+do_test misc1-7.4 {
+ catchsql {
+ INSERT INTO t5 VALUES(1,2,4);
+ }
+} {1 {columns a, b are not unique}}
+do_test misc1-7.5 {
+ catchsql {
+ INSERT INTO t5 VALUES(0,2,4);
+ }
+} {0 {}}
+do_test misc1-7.6 {
+ execsql {
+ SELECT * FROM t5 ORDER BY a;
+ }
+} {0 2 4 1 2 3}
+
+do_test misc1-8.1 {
+ catchsql {
+ SELECT *;
+ }
+} {1 {no tables specified}}
+do_test misc1-8.2 {
+ catchsql {
+ SELECT t1.*;
+ }
+} {1 {no such table: t1}}
+
+execsql {
+ DROP TABLE t1;
+ DROP TABLE t2;
+ DROP TABLE t3;
+ DROP TABLE t4;
+}
+
+# If an integer is too big to be represented as a 32-bit machine integer,
+# then treat it as a string.
+#
+do_test misc1-9.1 {
+ catchsql {
+ CREATE TABLE t1(a unique not null, b unique not null);
+ INSERT INTO t1 VALUES('a',12345678901234567890);
+ INSERT INTO t1 VALUES('b',12345678911234567890);
+ INSERT INTO t1 VALUES('c',12345678921234567890);
+ SELECT * FROM t1;
+ }
+} {0 {a 12345678901234567890 b 12345678911234567890 c 12345678921234567890}}
+
+# A WHERE clause is not allowed to contain more than 99 terms. Check to
+# make sure this limit is enforced.
+#
+do_test misc1-10.0 {
+ execsql {SELECT count(*) FROM manycol}
+} {9}
+do_test misc1-10.1 {
+ set ::where {WHERE x0>=0}
+ for {set i 1} {$i<=99} {incr i} {
+ append ::where " AND x$i<>0"
+ }
+ catchsql "SELECT count(*) FROM manycol $::where"
+} {0 9}
+do_test misc1-10.2 {
+ catchsql "SELECT count(*) FROM manycol $::where AND rowid>0"
+} {1 {WHERE clause too complex - no more than 100 terms allowed}}
+do_test misc1-10.3 {
+ regsub "x0>=0" $::where "x0=0" ::where
+ catchsql "DELETE FROM manycol $::where"
+} {0 {}}
+do_test misc1-10.4 {
+ execsql {SELECT count(*) FROM manycol}
+} {8}
+do_test misc1-10.5 {
+ catchsql "DELETE FROM manycol $::where AND rowid>0"
+} {1 {WHERE clause too complex - no more than 100 terms allowed}}
+do_test misc1-10.6 {
+ execsql {SELECT x1 FROM manycol WHERE x0=100}
+} {101}
+do_test misc1-10.7 {
+ regsub "x0=0" $::where "x0=100" ::where
+ catchsql "UPDATE manycol SET x1=x1+1 $::where"
+} {0 {}}
+do_test misc1-10.8 {
+ execsql {SELECT x1 FROM manycol WHERE x0=100}
+} {102}
+do_test misc1-10.9 {
+ catchsql "UPDATE manycol SET x1=x1+1 $::where AND rowid>0"
+} {1 {WHERE clause too complex - no more than 100 terms allowed}}
+do_test misc1-10.10 {
+ execsql {SELECT x1 FROM manycol WHERE x0=100}
+} {102}
+
+# Make sure the initialization works even if a database is opened while
+# another process has the database locked.
+#
+do_test misc1-11.1 {
+ execsql {BEGIN}
+ sqlite db2 test.db
+ set rc [catch {db2 eval {SELECT count(*) FROM t1}} msg]
+ lappend rc $msg
+} {1 {database is locked}}
+do_test misc1-11.2 {
+ execsql {COMMIT}
+ set rc [catch {db2 eval {SELECT count(*) FROM t1}} msg]
+ db2 close
+ lappend rc $msg
+} {0 3}
+
+# Make sure string comparisons really do compare strings in format4+.
+# Similar tests in the format3.test file show that for format3 and earlier
+# all comparisions where numeric if either operand looked like a number.
+#
+do_test misc1-12.1 {
+ execsql {SELECT '0'=='0.0'}
+} {0}
+do_test misc1-12.2 {
+ execsql {SELECT '0'==0.0}
+} {1}
+do_test misc1-12.3 {
+ execsql {SELECT '12345678901234567890'=='12345678901234567891'}
+} {0}
+do_test misc1-12.4 {
+ execsql {
+ CREATE TABLE t6(a INT UNIQUE, b TEXT UNIQUE);
+ INSERT INTO t6 VALUES('0','0.0');
+ SELECT * FROM t6;
+ }
+} {0 0.0}
+do_test misc1-12.5 {
+ execsql {
+ INSERT OR IGNORE INTO t6 VALUES(0.0,'x');
+ SELECT * FROM t6;
+ }
+} {0 0.0}
+do_test misc1-12.6 {
+ execsql {
+ INSERT OR IGNORE INTO t6 VALUES('y',0);
+ SELECT * FROM t6;
+ }
+} {0 0.0 y 0}
+do_test misc1-12.7 {
+ execsql {
+ CREATE TABLE t7(x INTEGER, y TEXT, z);
+ INSERT INTO t7 VALUES(0,0,1);
+ INSERT INTO t7 VALUES(0.0,0,2);
+ INSERT INTO t7 VALUES(0,0.0,3);
+ INSERT INTO t7 VALUES(0.0,0.0,4);
+ SELECT DISTINCT x, y FROM t7 ORDER BY z;
+ }
+} {0 0 0 0.0}
+do_test misc1-12.8 {
+ execsql {
+ SELECT min(z), max(z), count(z) FROM t7 GROUP BY x ORDER BY 1;
+ }
+} {1 4 4}
+do_test misc1-12.9 {
+ execsql {
+ SELECT min(z), max(z), count(z) FROM t7 GROUP BY y ORDER BY 1;
+ }
+} {1 2 2 3 4 2}
+
+# This used to be an error. But we changed the code so that arbitrary
+# identifiers can be used as a collating sequence. Collation is by text
+# if the identifier contains "text", "blob", or "clob" and is numeric
+# otherwise.
+do_test misc1-12.10 {
+ catchsql {
+ SELECT * FROM t6 ORDER BY a COLLATE unknown;
+ }
+} {0 {0 0.0 y 0}}
+do_test misc1-12.11 {
+ execsql {
+ CREATE TABLE t8(x TEXT COLLATE numeric, y INTEGER COLLATE text, z);
+ INSERT INTO t8 VALUES(0,0,1);
+ INSERT INTO t8 VALUES(0.0,0,2);
+ INSERT INTO t8 VALUES(0,0.0,3);
+ INSERT INTO t8 VALUES(0.0,0.0,4);
+ SELECT DISTINCT x, y FROM t8 ORDER BY z;
+ }
+} {0 0 0 0.0}
+do_test misc1-12.12 {
+ execsql {
+ SELECT min(z), max(z), count(z) FROM t8 GROUP BY x ORDER BY 1;
+ }
+} {1 4 4}
+do_test misc1-12.13 {
+ execsql {
+ SELECT min(z), max(z), count(z) FROM t8 GROUP BY y ORDER BY 1;
+ }
+} {1 2 2 3 4 2}
+
+# There was a problem with realloc() in the OP_MemStore operation of
+# the VDBE. A buffer was being reallocated but some pointers into
+# the old copy of the buffer were not being moved over to the new copy.
+# The following code tests for the problem.
+#
+do_test misc1-13.1 {
+ execsql {
+ CREATE TABLE t9(x,y);
+ INSERT INTO t9 VALUES('one',1);
+ INSERT INTO t9 VALUES('two',2);
+ INSERT INTO t9 VALUES('three',3);
+ INSERT INTO t9 VALUES('four',4);
+ INSERT INTO t9 VALUES('five',5);
+ INSERT INTO t9 VALUES('six',6);
+ INSERT INTO t9 VALUES('seven',7);
+ INSERT INTO t9 VALUES('eight',8);
+ INSERT INTO t9 VALUES('nine',9);
+ INSERT INTO t9 VALUES('ten',10);
+ INSERT INTO t9 VALUES('eleven',11);
+ SELECT y FROM t9
+ WHERE x=(SELECT x FROM t9 WHERE y=1)
+ OR x=(SELECT x FROM t9 WHERE y=2)
+ OR x=(SELECT x FROM t9 WHERE y=3)
+ OR x=(SELECT x FROM t9 WHERE y=4)
+ OR x=(SELECT x FROM t9 WHERE y=5)
+ OR x=(SELECT x FROM t9 WHERE y=6)
+ OR x=(SELECT x FROM t9 WHERE y=7)
+ OR x=(SELECT x FROM t9 WHERE y=8)
+ OR x=(SELECT x FROM t9 WHERE y=9)
+ OR x=(SELECT x FROM t9 WHERE y=10)
+ OR x=(SELECT x FROM t9 WHERE y=11)
+ OR x=(SELECT x FROM t9 WHERE y=12)
+ OR x=(SELECT x FROM t9 WHERE y=13)
+ OR x=(SELECT x FROM t9 WHERE y=14)
+ ;
+ }
+} {1 2 3 4 5 6 7 8 9 10 11}
+
+# Make sure a database connection still works after changing the
+# working directory.
+#
+do_test misc1-14.1 {
+ file mkdir tempdir
+ cd tempdir
+ execsql {BEGIN}
+ file exists ./test.db-journal
+} {0}
+do_test misc1-14.2 {
+ file exists ../test.db-journal
+} {1}
+do_test misc1-14.3 {
+ cd ..
+ file delete tempdir
+ execsql {COMMIT}
+ file exists ./test.db-journal
+} {0}
+
+# A failed create table should not leave the table in the internal
+# data structures. Ticket #238.
+#
+do_test misc1-15.1 {
+ catchsql {
+ CREATE TABLE t10 AS SELECT c1;
+ }
+} {1 {no such column: c1}}
+do_test misc1-15.2 {
+ catchsql {
+ CREATE TABLE t10 AS SELECT 1;
+ }
+ # The bug in ticket #238 causes the statement above to fail with
+ # the error "table t10 alread exists"
+} {0 {}}
+
+# Test for memory leaks when a CREATE TABLE containing a primary key
+# fails. Ticket #249.
+#
+do_test misc1-16.1 {
+ catchsql {SELECT name FROM sqlite_master LIMIT 1}
+ catchsql {
+ CREATE TABLE test(a integer, primary key(a));
+ }
+} {0 {}}
+do_test misc1-16.2 {
+ catchsql {
+ CREATE TABLE test(a integer, primary key(a));
+ }
+} {1 {table test already exists}}
+do_test misc1-16.3 {
+ catchsql {
+ CREATE TABLE test2(a text primary key, b text, primary key(a,b));
+ }
+} {1 {table "test2" has more than one primary key}}
+do_test misc1-16.4 {
+ execsql {
+ INSERT INTO test VALUES(1);
+ SELECT rowid, a FROM test;
+ }
+} {1 1}
+do_test misc1-16.5 {
+ execsql {
+ INSERT INTO test VALUES(5);
+ SELECT rowid, a FROM test;
+ }
+} {1 1 5 5}
+do_test misc1-16.6 {
+ execsql {
+ INSERT INTO test VALUES(NULL);
+ SELECT rowid, a FROM test;
+ }
+} {1 1 5 5 6 6}
+
+# Ticket #333: Temp triggers that modify persistent tables.
+#
+do_test misc1-17.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE RealTable(TestID INTEGER PRIMARY KEY, TestString TEXT);
+ CREATE TEMP TABLE TempTable(TestID INTEGER PRIMARY KEY, TestString TEXT);
+ CREATE TEMP TRIGGER trigTest_1 AFTER UPDATE ON TempTable BEGIN
+ INSERT INTO RealTable(TestString)
+ SELECT new.TestString FROM TempTable LIMIT 1;
+ END;
+ INSERT INTO TempTable(TestString) VALUES ('1');
+ INSERT INTO TempTable(TestString) VALUES ('2');
+ UPDATE TempTable SET TestString = TestString + 1 WHERE TestID IN (1, 2);
+ COMMIT;
+ SELECT TestString FROM RealTable ORDER BY 1;
+ }
+} {2 3}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/misc2.test b/usr/src/cmd/svc/configd/sqlite/test/misc2.test
new file mode 100644
index 0000000000..23ddc3dfef
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/misc2.test
@@ -0,0 +1,238 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 June 21
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for miscellanous features that were
+# left out of other test files.
+#
+# $Id: misc2.test,v 1.11 2003/12/17 23:57:36 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Test for ticket #360
+#
+do_test misc2-1.1 {
+ catchsql {
+ CREATE TABLE FOO(bar integer);
+ CREATE TRIGGER foo_insert BEFORE INSERT ON foo BEGIN
+ SELECT CASE WHEN (NOT new.bar BETWEEN 0 AND 20)
+ THEN raise(rollback, 'aiieee') END;
+ END;
+ INSERT INTO foo(bar) VALUES (1);
+ }
+} {0 {}}
+do_test misc2-1.2 {
+ catchsql {
+ INSERT INTO foo(bar) VALUES (111);
+ }
+} {1 aiieee}
+
+# Make sure ROWID works on a view and a subquery. Ticket #364
+#
+do_test misc2-2.1 {
+ execsql {
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(1,2,3);
+ CREATE TABLE t2(a,b,c);
+ INSERT INTO t2 VALUES(7,8,9);
+ SELECT rowid, * FROM (SELECT * FROM t1, t2);
+ }
+} {{} 1 2 3 7 8 9}
+do_test misc2-2.2 {
+ execsql {
+ CREATE VIEW v1 AS SELECT * FROM t1, t2;
+ SELECT rowid, * FROM v1;
+ }
+} {{} 1 2 3 7 8 9}
+
+# Check name binding precedence. Ticket #387
+#
+do_test misc2-3.1 {
+ catchsql {
+ SELECT t1.b+t2.b AS a, t1.a, t2.a FROM t1, t2 WHERE a==10
+ }
+} {1 {ambiguous column name: a}}
+
+# Make sure 32-bit integer overflow is handled properly in queries.
+# ticket #408
+#
+do_test misc2-4.1 {
+ execsql {
+ INSERT INTO t1 VALUES(4000000000,'a','b');
+ SELECT a FROM t1 WHERE a>1;
+ }
+} {4000000000}
+do_test misc2-4.2 {
+ execsql {
+ INSERT INTO t1 VALUES(2147483648,'b2','c2');
+ INSERT INTO t1 VALUES(2147483647,'b3','c3');
+ SELECT a FROM t1 WHERE a>2147483647;
+ }
+} {4000000000 2147483648}
+do_test misc2-4.3 {
+ execsql {
+ SELECT a FROM t1 WHERE a<2147483648;
+ }
+} {1 2147483647}
+do_test misc2-4.4 {
+ execsql {
+ SELECT a FROM t1 WHERE a<=2147483648;
+ }
+} {1 2147483648 2147483647}
+do_test misc2-4.5 {
+ execsql {
+ SELECT a FROM t1 WHERE a<10000000000;
+ }
+} {1 4000000000 2147483648 2147483647}
+do_test misc2-4.6 {
+ execsql {
+ SELECT a FROM t1 WHERE a<1000000000000 ORDER BY 1;
+ }
+} {1 2147483647 2147483648 4000000000}
+
+# There were some issues with expanding a SrcList object using a call
+# to sqliteSrcListAppend() if the SrcList had previously been duplicated
+# using a call to sqliteSrcListDup(). Ticket #416. The following test
+# makes sure the problem has been fixed.
+#
+do_test misc2-5.1 {
+ execsql {
+ CREATE TABLE x(a,b);
+ CREATE VIEW y AS
+ SELECT x1.b AS p, x2.b AS q FROM x AS x1, x AS x2 WHERE x1.a=x2.a;
+ CREATE VIEW z AS
+ SELECT y1.p, y2.p FROM y AS y1, y AS y2 WHERE y1.q=y2.q;
+ SELECT * from z;
+ }
+} {}
+
+# Make sure we can open a database with an empty filename. What this
+# does is store the database in a temporary file that is deleted when
+# the database is closed. Ticket #432.
+#
+do_test misc2-6.1 {
+ db close
+ sqlite db {}
+ execsql {
+ CREATE TABLE t1(a,b);
+ INSERT INTO t1 VALUES(1,2);
+ SELECT * FROM t1;
+ }
+} {1 2}
+
+# Make sure we get an error message (not a segfault) on an attempt to
+# update a table from within the callback of a select on that same
+# table.
+#
+do_test misc2-7.1 {
+ db close
+ file delete -force test.db
+ sqlite db test.db
+ execsql {
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES(1);
+ }
+ set rc [catch {
+ db eval {SELECT rowid FROM t1} {} {
+ db eval "DELETE FROM t1 WHERE rowid=$rowid"
+ }
+ } msg]
+ lappend rc $msg
+} {1 {database table is locked}}
+do_test misc2-7.2 {
+ set rc [catch {
+ db eval {SELECT rowid FROM t1} {} {
+ db eval "INSERT INTO t1 VALUES(3)"
+ }
+ } msg]
+ lappend rc $msg
+} {1 {database table is locked}}
+do_test misc2-7.3 {
+ db close
+ file delete -force test.db
+ sqlite db :memory:
+ execsql {
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES(1);
+ }
+ set rc [catch {
+ db eval {SELECT rowid FROM t1} {} {
+ db eval "DELETE FROM t1 WHERE rowid=$rowid"
+ }
+ } msg]
+ lappend rc $msg
+} {1 {database table is locked}}
+do_test misc2-7.4 {
+ set rc [catch {
+ db eval {SELECT rowid FROM t1} {} {
+ db eval "INSERT INTO t1 VALUES(3)"
+ }
+ } msg]
+ lappend rc $msg
+} {1 {database table is locked}}
+
+# Ticket #453. If the SQL ended with "-", the tokenizer was calling that
+# an incomplete token, which caused problem. The solution was to just call
+# it a minus sign.
+#
+do_test misc2-8.1 {
+ catchsql {-}
+} {1 {near "-": syntax error}}
+
+# Ticket #513. Make sure the VDBE stack does not grow on a 3-way join.
+#
+do_test misc2-9.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE counts(n INTEGER PRIMARY KEY);
+ INSERT INTO counts VALUES(0);
+ INSERT INTO counts VALUES(1);
+ INSERT INTO counts SELECT n+2 FROM counts;
+ INSERT INTO counts SELECT n+4 FROM counts;
+ INSERT INTO counts SELECT n+8 FROM counts;
+ COMMIT;
+
+ CREATE TEMP TABLE x AS
+ SELECT dim1.n, dim2.n, dim3.n
+ FROM counts AS dim1, counts AS dim2, counts AS dim3
+ WHERE dim1.n<10 AND dim2.n<10 AND dim3.n<10;
+
+ SELECT count(*) FROM x;
+ }
+} {1000}
+do_test misc2-9.2 {
+ execsql {
+ DROP TABLE x;
+ CREATE TEMP TABLE x AS
+ SELECT dim1.n, dim2.n, dim3.n
+ FROM counts AS dim1, counts AS dim2, counts AS dim3
+ WHERE dim1.n>=6 AND dim2.n>=6 AND dim3.n>=6;
+
+ SELECT count(*) FROM x;
+ }
+} {1000}
+do_test misc2-9.3 {
+ execsql {
+ DROP TABLE x;
+ CREATE TEMP TABLE x AS
+ SELECT dim1.n, dim2.n, dim3.n, dim4.n
+ FROM counts AS dim1, counts AS dim2, counts AS dim3, counts AS dim4
+ WHERE dim1.n<5 AND dim2.n<5 AND dim3.n<5 AND dim4.n<5;
+
+ SELECT count(*) FROM x;
+ }
+} [expr 5*5*5*5]
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/misc3.test b/usr/src/cmd/svc/configd/sqlite/test/misc3.test
new file mode 100644
index 0000000000..a91da5feb9
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/misc3.test
@@ -0,0 +1,307 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 December 17
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for miscellanous features that were
+# left out of other test files.
+#
+# $Id: misc3.test,v 1.10 2004/03/17 23:32:08 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Ticket #529. Make sure an ABORT does not damage the in-memory cache
+# that will be used by subsequent statements in the same transaction.
+#
+do_test misc3-1.1 {
+ execsql {
+ CREATE TABLE t1(a UNIQUE,b);
+ INSERT INTO t1
+ VALUES(1,'a23456789_b23456789_c23456789_d23456789_e23456789_');
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ INSERT INTO t1 VALUES(2,'x');
+ UPDATE t1 SET b=substr(b,1,500);
+ BEGIN;
+ }
+ catchsql {UPDATE t1 SET a=CASE a WHEN 2 THEN 1 ELSE a END, b='y';}
+ execsql {
+ CREATE TABLE t2(x,y);
+ COMMIT;
+ PRAGMA integrity_check;
+ }
+} ok
+do_test misc3-1.2 {
+ execsql {
+ DROP TABLE t1;
+ DROP TABLE t2;
+ VACUUM;
+ CREATE TABLE t1(a UNIQUE,b);
+ INSERT INTO t1
+ VALUES(1,'a23456789_b23456789_c23456789_d23456789_e23456789_');
+ INSERT INTO t1 SELECT a+1, b||b FROM t1;
+ INSERT INTO t1 SELECT a+2, b||b FROM t1;
+ INSERT INTO t1 SELECT a+4, b FROM t1;
+ INSERT INTO t1 SELECT a+8, b FROM t1;
+ INSERT INTO t1 SELECT a+16, b FROM t1;
+ INSERT INTO t1 SELECT a+32, b FROM t1;
+ INSERT INTO t1 SELECT a+64, b FROM t1;
+
+ BEGIN;
+ }
+ catchsql {UPDATE t1 SET a=CASE a WHEN 128 THEN 127 ELSE a END, b='';}
+ execsql {
+ INSERT INTO t1 VALUES(200,'hello out there');
+ COMMIT;
+ PRAGMA integrity_check;
+ }
+} ok
+
+# Tests of the sqliteAtoF() function in util.c
+#
+do_test misc3-2.1 {
+ execsql {SELECT 2e-25*0.5e25}
+} 1
+do_test misc3-2.2 {
+ execsql {SELECT 2.0e-25*000000.500000000000000000000000000000e+00025}
+} 1
+do_test misc3-2.3 {
+ execsql {SELECT 000000000002e-0000000025*0.5e25}
+} 1
+do_test misc3-2.4 {
+ execsql {SELECT 2e-25*0.5e250}
+} 1e+225
+do_test misc3-2.5 {
+ execsql {SELECT 2.0e-250*0.5e25}
+} 1e-225
+do_test misc3-2.6 {
+ execsql {SELECT '-2.0e-127' * '-0.5e27'}
+} 1e-100
+do_test misc3-2.7 {
+ execsql {SELECT '+2.0e-127' * '-0.5e27'}
+} -1e-100
+do_test misc3-2.8 {
+ execsql {SELECT 2.0e-27 * '+0.5e+127'}
+} 1e+100
+do_test misc3-2.9 {
+ execsql {SELECT 2.0e-27 * '+0.000005e+132'}
+} 1e+100
+
+# Ticket #522. Make sure integer overflow is handled properly in
+# indices.
+#
+do_test misc3-3.1 {
+ execsql {PRAGMA integrity_check}
+} ok
+do_test misc3-3.2 {
+ execsql {
+ CREATE TABLE t2(a INT UNIQUE);
+ PRAGMA integrity_check;
+ }
+} ok
+do_test misc3-3.3 {
+ execsql {
+ INSERT INTO t2 VALUES(2147483648);
+ PRAGMA integrity_check;
+ }
+} ok
+do_test misc3-3.4 {
+ execsql {
+ INSERT INTO t2 VALUES(-2147483649);
+ PRAGMA integrity_check;
+ }
+} ok
+do_test misc3-3.5 {
+ execsql {
+ INSERT INTO t2 VALUES(+2147483649);
+ PRAGMA integrity_check;
+ }
+} ok
+do_test misc3-3.6 {
+ execsql {
+ INSERT INTO t2 VALUES(+2147483647);
+ INSERT INTO t2 VALUES(-2147483648);
+ INSERT INTO t2 VALUES(-2147483647);
+ INSERT INTO t2 VALUES(2147483646);
+ SELECT * FROM t2 ORDER BY a;
+ }
+} {-2147483649 -2147483648 -2147483647 2147483646 2147483647 2147483648 2147483649}
+do_test misc3-3.7 {
+ execsql {
+ SELECT * FROM t2 WHERE a>=-2147483648 ORDER BY a;
+ }
+} {-2147483648 -2147483647 2147483646 2147483647 2147483648 2147483649}
+do_test misc3-3.8 {
+ execsql {
+ SELECT * FROM t2 WHERE a>-2147483648 ORDER BY a;
+ }
+} {-2147483647 2147483646 2147483647 2147483648 2147483649}
+do_test misc3-3.9 {
+ execsql {
+ SELECT * FROM t2 WHERE a>-2147483649 ORDER BY a;
+ }
+} {-2147483648 -2147483647 2147483646 2147483647 2147483648 2147483649}
+do_test misc3-3.10 {
+ execsql {
+ SELECT * FROM t2 WHERE a>=0 AND a<2147483649 ORDER BY a DESC;
+ }
+} {2147483648 2147483647 2147483646}
+do_test misc3-3.11 {
+ execsql {
+ SELECT * FROM t2 WHERE a>=0 AND a<=2147483648 ORDER BY a DESC;
+ }
+} {2147483648 2147483647 2147483646}
+do_test misc3-3.12 {
+ execsql {
+ SELECT * FROM t2 WHERE a>=0 AND a<2147483648 ORDER BY a DESC;
+ }
+} {2147483647 2147483646}
+do_test misc3-3.13 {
+ execsql {
+ SELECT * FROM t2 WHERE a>=0 AND a<=2147483647 ORDER BY a DESC;
+ }
+} {2147483647 2147483646}
+do_test misc3-3.14 {
+ execsql {
+ SELECT * FROM t2 WHERE a>=0 AND a<2147483647 ORDER BY a DESC;
+ }
+} {2147483646}
+
+# Ticket #565. A stack overflow is occurring when the subquery to the
+# right of an IN operator contains many NULLs
+#
+do_test misc3-4.1 {
+ execsql {
+ CREATE TABLE t3(a INTEGER PRIMARY KEY, b);
+ INSERT INTO t3(b) VALUES('abc');
+ INSERT INTO t3(b) VALUES('xyz');
+ INSERT INTO t3(b) VALUES(NULL);
+ INSERT INTO t3(b) VALUES(NULL);
+ INSERT INTO t3(b) SELECT b||'d' FROM t3;
+ INSERT INTO t3(b) SELECT b||'e' FROM t3;
+ INSERT INTO t3(b) SELECT b||'f' FROM t3;
+ INSERT INTO t3(b) SELECT b||'g' FROM t3;
+ INSERT INTO t3(b) SELECT b||'h' FROM t3;
+ SELECT count(a), count(b) FROM t3;
+ }
+} {128 64}
+do_test misc3-4.2 {
+ execsql {
+ SELECT count(a) FROM t3 WHERE b IN (SELECT b FROM t3);
+ }
+} {64}
+do_test misc3-4.3 {
+ execsql {
+ SELECT count(a) FROM t3 WHERE b IN (SELECT b FROM t3 ORDER BY a+1);
+ }
+} {64}
+
+# Ticket #601: Putting a left join inside "SELECT * FROM (<join-here>)"
+# gives different results that if the outer "SELECT * FROM ..." is omitted.
+#
+do_test misc3-5.1 {
+ execsql {
+ CREATE TABLE x1 (b, c);
+ INSERT INTO x1 VALUES('dog',3);
+ INSERT INTO x1 VALUES('cat',1);
+ INSERT INTO x1 VALUES('dog',4);
+ CREATE TABLE x2 (c, e);
+ INSERT INTO x2 VALUES(1,'one');
+ INSERT INTO x2 VALUES(2,'two');
+ INSERT INTO x2 VALUES(3,'three');
+ INSERT INTO x2 VALUES(4,'four');
+ SELECT x2.c AS c, e, b FROM x2 LEFT JOIN
+ (SELECT b, max(c) AS c FROM x1 GROUP BY b)
+ USING(c);
+ }
+} {1 one cat 2 two {} 3 three {} 4 four dog}
+do_test misc4-5.2 {
+ execsql {
+ SELECT * FROM (
+ SELECT x2.c AS c, e, b FROM x2 LEFT JOIN
+ (SELECT b, max(c) AS c FROM x1 GROUP BY b)
+ USING(c)
+ );
+ }
+} {1 one cat 2 two {} 3 three {} 4 four dog}
+
+# Ticket #626: make sure EXPLAIN prevents BEGIN and COMMIT from working.
+#
+do_test misc3-6.1 {
+ execsql {EXPLAIN BEGIN}
+ catchsql {BEGIN}
+} {0 {}}
+do_test misc3-6.2 {
+ execsql {EXPLAIN COMMIT}
+ catchsql {COMMIT}
+} {0 {}}
+do_test misc3-6.3 {
+ execsql {BEGIN; EXPLAIN ROLLBACK}
+ catchsql {ROLLBACK}
+} {0 {}}
+
+# Ticket #640: vdbe stack overflow with a LIMIT clause on a SELECT inside
+# of a trigger.
+#
+do_test misc3-7.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE y1(a);
+ CREATE TABLE y2(b);
+ CREATE TABLE y3(c);
+ CREATE TRIGGER r1 AFTER DELETE ON y1 FOR EACH ROW BEGIN
+ INSERT INTO y3(c) SELECT b FROM y2 ORDER BY b LIMIT 1;
+ END;
+ INSERT INTO y1 VALUES(1);
+ INSERT INTO y1 VALUES(2);
+ INSERT INTO y1 SELECT a+2 FROM y1;
+ INSERT INTO y1 SELECT a+4 FROM y1;
+ INSERT INTO y1 SELECT a+8 FROM y1;
+ INSERT INTO y1 SELECT a+16 FROM y1;
+ INSERT INTO y2 SELECT a FROM y1;
+ COMMIT;
+ SELECT count(*) FROM y1;
+ }
+} 32
+do_test misc3-7.2 {
+ execsql {
+ DELETE FROM y1;
+ SELECT count(*) FROM y1;
+ }
+} 0
+do_test misc3-7.3 {
+ execsql {
+ SELECT count(*) FROM y3;
+ }
+} 32
+
+# Ticket #668: VDBE stack overflow occurs when the left-hand side
+# of an IN expression is NULL and the result is used as an integer, not
+# as a jump.
+#
+do_test misc-8.1 {
+ execsql {
+ SELECT count(CASE WHEN b IN ('abc','xyz') THEN 'x' END) FROM t3
+ }
+} {2}
+do_test misc-8.2 {
+ execsql {
+ SELECT count(*) FROM t3 WHERE 1+(b IN ('abc','xyz'))==2
+ }
+} {2}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/misuse.test b/usr/src/cmd/svc/configd/sqlite/test/misuse.test
new file mode 100644
index 0000000000..f4d15be304
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/misuse.test
@@ -0,0 +1,169 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 May 10
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for the SQLITE_MISUSE detection logic.
+# This test file leaks memory and file descriptors.
+#
+# $Id: misuse.test,v 1.4 2004/01/07 19:24:48 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Make sure the test logic works
+#
+do_test misuse-1.1 {
+ db close
+ catch {file delete -force test2.db}
+ set ::DB [sqlite db test2.db]
+ execsql {
+ CREATE TABLE t1(a,b);
+ INSERT INTO t1 VALUES(1,2);
+ }
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {0 {a b 1 2}}
+do_test misuse-1.2 {
+ sqlite_exec_printf $::DB {SELECT x_coalesce(NULL,a) AS 'xyz' FROM t1} {}
+} {1 {no such function: x_coalesce}}
+do_test misuse-1.3 {
+ sqlite_create_function $::DB
+ sqlite_exec_printf $::DB {SELECT x_coalesce(NULL,a) AS 'xyz' FROM t1} {}
+} {0 {xyz 1}}
+
+# Use the x_sqlite_exec() SQL function to simulate the effect of two
+# threads trying to use the same database at the same time.
+#
+# It used to be prohibited to invoke sqlite_exec() from within a function,
+# but that has changed. The following tests used to cause errors but now
+# they do not.
+#
+do_test misuse-1.4 {
+ sqlite_exec_printf $::DB {
+ SELECT x_sqlite_exec('SELECT * FROM t1') AS xyz;
+ } {}
+} {0 {xyz {1 2}}}
+do_test misuse-1.5 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {0 {a b 1 2}}
+do_test misuse-1.6 {
+ catchsql {
+ SELECT * FROM t1
+ }
+} {0 {1 2}}
+
+# Attempt to register a new SQL function while an sqlite_exec() is active.
+#
+do_test misuse-2.1 {
+ db close
+ set ::DB [sqlite db test2.db]
+ execsql {
+ SELECT * FROM t1
+ }
+} {1 2}
+do_test misuse-2.2 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {0 {a b 1 2}}
+do_test misuse-2.3 {
+ set v [catch {
+ db eval {SELECT * FROM t1} {} {
+ sqlite_create_function $::DB
+ }
+ } msg]
+ lappend v $msg
+} {1 {library routine called out of sequence}}
+do_test misuse-2.4 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {21 {library routine called out of sequence}}
+do_test misuse-2.5 {
+ catchsql {
+ SELECT * FROM t1
+ }
+} {1 {library routine called out of sequence}}
+
+# Attempt to register a new SQL aggregate while an sqlite_exec() is active.
+#
+do_test misuse-3.1 {
+ db close
+ set ::DB [sqlite db test2.db]
+ execsql {
+ SELECT * FROM t1
+ }
+} {1 2}
+do_test misuse-3.2 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {0 {a b 1 2}}
+do_test misuse-3.3 {
+ set v [catch {
+ db eval {SELECT * FROM t1} {} {
+ sqlite_create_aggregate $::DB
+ }
+ } msg]
+ lappend v $msg
+} {1 {library routine called out of sequence}}
+do_test misuse-3.4 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {21 {library routine called out of sequence}}
+do_test misuse-3.5 {
+ catchsql {
+ SELECT * FROM t1
+ }
+} {1 {library routine called out of sequence}}
+
+# Attempt to close the database from an sqlite_exec callback.
+#
+do_test misuse-4.1 {
+ db close
+ set ::DB [sqlite db test2.db]
+ execsql {
+ SELECT * FROM t1
+ }
+} {1 2}
+do_test misuse-4.2 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {0 {a b 1 2}}
+do_test misuse-4.3 {
+ set v [catch {
+ db eval {SELECT * FROM t1} {} {
+ sqlite_close $::DB
+ }
+ } msg]
+ lappend v $msg
+} {1 {library routine called out of sequence}}
+do_test misuse-4.4 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {21 {library routine called out of sequence}}
+do_test misuse-4.5 {
+ catchsql {
+ SELECT * FROM t1
+ }
+} {1 {library routine called out of sequence}}
+
+# Attempt to use a database after it has been closed.
+#
+do_test misuse-5.1 {
+ db close
+ set ::DB [sqlite db test2.db]
+ execsql {
+ SELECT * FROM t1
+ }
+} {1 2}
+do_test misuse-5.2 {
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {0 {a b 1 2}}
+do_test misuse-5.3 {
+ db close
+ sqlite_exec_printf $::DB {SELECT * FROM t1} {}
+} {21 {library routine called out of sequence}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/notnull.test b/usr/src/cmd/svc/configd/sqlite/test/notnull.test
new file mode 100644
index 0000000000..cd2691a084
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/notnull.test
@@ -0,0 +1,503 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 January 29
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for the NOT NULL constraint.
+#
+# $Id: notnull.test,v 1.3 2003/01/29 18:46:54 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test notnull-1.0 {
+ execsql {
+ CREATE TABLE t1 (
+ a NOT NULL,
+ b NOT NULL DEFAULT 5,
+ c NOT NULL ON CONFLICT REPLACE DEFAULT 6,
+ d NOT NULL ON CONFLICT IGNORE DEFAULT 7,
+ e NOT NULL ON CONFLICT ABORT DEFAULT 8
+ );
+ SELECT * FROM t1;
+ }
+} {}
+do_test notnull-1.1 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-1.2 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-1.3 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {}}
+do_test notnull-1.4 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-1.5 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-1.6 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-1.7 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-1.8 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-1.9 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-1.10 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.b may not be NULL}}
+do_test notnull-1.11 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {}}
+do_test notnull-1.12 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-1.13 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 6 4 5}}
+do_test notnull-1.14 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {}}
+do_test notnull-1.15 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 6 4 5}}
+do_test notnull-1.16 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.c may not be NULL}}
+do_test notnull-1.17 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,3,null,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.d may not be NULL}}
+do_test notnull-1.18 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,b,c,e) VALUES(1,2,3,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 3 7 5}}
+do_test notnull-1.19 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d) VALUES(1,2,3,4);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 3 4 8}}
+do_test notnull-1.20 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,null);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.e may not be NULL}}
+do_test notnull-1.21 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(e,d,c,b,a) VALUES(1,2,3,null,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {5 5 3 2 1}}
+
+do_test notnull-2.1 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-2.2 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR REPLACE t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-2.3 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR IGNORE t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-2.4 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR ABORT t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-2.5 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET b=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.b may not be NULL}}
+do_test notnull-2.6 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR REPLACE t1 SET b=null, d=e, e=d;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 5 3 5 4}}
+do_test notnull-2.7 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR IGNORE t1 SET b=null, d=e, e=d;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-2.8 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET c=null, d=e, e=d;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 6 5 4}}
+do_test notnull-2.9 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET d=null, a=b, b=a;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-2.10 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET e=null, a=b, b=a;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.e may not be NULL}}
+
+do_test notnull-3.0 {
+ execsql {
+ CREATE INDEX t1a ON t1(a);
+ CREATE INDEX t1b ON t1(b);
+ CREATE INDEX t1c ON t1(c);
+ CREATE INDEX t1d ON t1(d);
+ CREATE INDEX t1e ON t1(e);
+ CREATE INDEX t1abc ON t1(a,b,c);
+ }
+} {}
+do_test notnull-3.1 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-3.2 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-3.3 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {}}
+do_test notnull-3.4 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-3.5 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(b,c,d,e) VALUES(2,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-3.6 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-3.7 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-3.8 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-3.9 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,c,d,e) VALUES(1,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-3.10 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.b may not be NULL}}
+do_test notnull-3.11 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {}}
+do_test notnull-3.12 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 5 3 4 5}}
+do_test notnull-3.13 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 6 4 5}}
+do_test notnull-3.14 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {}}
+do_test notnull-3.15 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 6 4 5}}
+do_test notnull-3.16 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.c may not be NULL}}
+do_test notnull-3.17 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,3,null,5);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.d may not be NULL}}
+do_test notnull-3.18 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR ABORT INTO t1(a,b,c,e) VALUES(1,2,3,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 3 7 5}}
+do_test notnull-3.19 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d) VALUES(1,2,3,4);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {1 2 3 4 8}}
+do_test notnull-3.20 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,null);
+ SELECT * FROM t1 order by a;
+ }
+} {1 {t1.e may not be NULL}}
+do_test notnull-3.21 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT OR REPLACE INTO t1(e,d,c,b,a) VALUES(1,2,3,null,5);
+ SELECT * FROM t1 order by a;
+ }
+} {0 {5 5 3 2 1}}
+
+do_test notnull-4.1 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-4.2 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR REPLACE t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-4.3 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR IGNORE t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-4.4 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR ABORT t1 SET a=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.a may not be NULL}}
+do_test notnull-4.5 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET b=null;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.b may not be NULL}}
+do_test notnull-4.6 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR REPLACE t1 SET b=null, d=e, e=d;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 5 3 5 4}}
+do_test notnull-4.7 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE OR IGNORE t1 SET b=null, d=e, e=d;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-4.8 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET c=null, d=e, e=d;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 6 5 4}}
+do_test notnull-4.9 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET d=null, a=b, b=a;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {0 {1 2 3 4 5}}
+do_test notnull-4.10 {
+ catchsql {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1,2,3,4,5);
+ UPDATE t1 SET e=null, a=b, b=a;
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 {t1.e may not be NULL}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/null.test b/usr/src/cmd/svc/configd/sqlite/test/null.test
new file mode 100644
index 0000000000..6c816d8584
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/null.test
@@ -0,0 +1,240 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for proper treatment of the special
+# value NULL.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table and some data to work with.
+#
+do_test null-1.0 {
+ execsql {
+ begin;
+ create table t1(a,b,c);
+ insert into t1 values(1,0,0);
+ insert into t1 values(2,0,1);
+ insert into t1 values(3,1,0);
+ insert into t1 values(4,1,1);
+ insert into t1 values(5,null,0);
+ insert into t1 values(6,null,1);
+ insert into t1 values(7,null,null);
+ commit;
+ select * from t1;
+ }
+} {1 0 0 2 0 1 3 1 0 4 1 1 5 {} 0 6 {} 1 7 {} {}}
+
+# Check for how arithmetic expressions handle NULL
+#
+do_test null-1.1 {
+ execsql {
+ select ifnull(a+b,99) from t1;
+ }
+} {1 2 4 5 99 99 99}
+do_test null-1.2 {
+ execsql {
+ select ifnull(b*c,99) from t1;
+ }
+} {0 0 0 1 99 99 99}
+
+# Check to see how the CASE expression handles NULL values. The
+# first WHEN for which the test expression is TRUE is selected.
+# FALSE and UNKNOWN test expressions are skipped.
+#
+do_test null-2.1 {
+ execsql {
+ select ifnull(case when b<>0 then 1 else 0 end, 99) from t1;
+ }
+} {0 0 1 1 0 0 0}
+do_test null-2.2 {
+ execsql {
+ select ifnull(case when not b<>0 then 1 else 0 end, 99) from t1;
+ }
+} {1 1 0 0 0 0 0}
+do_test null-2.3 {
+ execsql {
+ select ifnull(case when b<>0 and c<>0 then 1 else 0 end, 99) from t1;
+ }
+} {0 0 0 1 0 0 0}
+do_test null-2.4 {
+ execsql {
+ select ifnull(case when not (b<>0 and c<>0) then 1 else 0 end, 99) from t1;
+ }
+} {1 1 1 0 1 0 0}
+do_test null-2.5 {
+ execsql {
+ select ifnull(case when b<>0 or c<>0 then 1 else 0 end, 99) from t1;
+ }
+} {0 1 1 1 0 1 0}
+do_test null-2.6 {
+ execsql {
+ select ifnull(case when not (b<>0 or c<>0) then 1 else 0 end, 99) from t1;
+ }
+} {1 0 0 0 0 0 0}
+do_test null-2.7 {
+ execsql {
+ select ifnull(case b when c then 1 else 0 end, 99) from t1;
+ }
+} {1 0 0 1 0 0 0}
+do_test null-2.8 {
+ execsql {
+ select ifnull(case c when b then 1 else 0 end, 99) from t1;
+ }
+} {1 0 0 1 0 0 0}
+
+# Check to see that NULL values are ignored in aggregate functions.
+# (except for min().)
+#
+do_test null-3.1 {
+ execsql {
+ select count(*), count(b), count(c), sum(b), sum(c),
+ avg(b), avg(c), min(b), max(b) from t1;
+ }
+} {7 4 6 2 3 0.5 0.5 0 1}
+
+# Check to see how WHERE clauses handle NULL values. A NULL value
+# is the same as UNKNOWN. The WHERE clause should only select those
+# rows that are TRUE. FALSE and UNKNOWN rows are rejected.
+#
+do_test null-4.1 {
+ execsql {
+ select a from t1 where b<10
+ }
+} {1 2 3 4}
+do_test null-4.2 {
+ execsql {
+ select a from t1 where not b>10
+ }
+} {1 2 3 4}
+do_test null-4.3 {
+ execsql {
+ select a from t1 where b<10 or c=1;
+ }
+} {1 2 3 4 6}
+do_test null-4.4 {
+ execsql {
+ select a from t1 where b<10 and c=1;
+ }
+} {2 4}
+do_test null-4.5 {
+ execsql {
+ select a from t1 where not (b<10 and c=1);
+ }
+} {1 3 5}
+
+# The DISTINCT keyword on a SELECT statement should treat NULL values
+# as distinct
+#
+do_test null-5.1 {
+ execsql {
+ select distinct b from t1 order by b;
+ }
+} {{} 0 1}
+
+# A UNION to two queries should treat NULL values
+# as distinct
+#
+do_test null-6.1 {
+ execsql {
+ select b from t1 union select c from t1 order by c;
+ }
+} {{} 0 1}
+
+# The UNIQUE constraint only applies to non-null values
+#
+do_test null-7.1 {
+ execsql {
+ create table t2(a, b unique on conflict ignore);
+ insert into t2 values(1,1);
+ insert into t2 values(2,null);
+ insert into t2 values(3,null);
+ insert into t2 values(4,1);
+ select a from t2;
+ }
+} {1 2 3}
+do_test null-7.2 {
+ execsql {
+ create table t3(a, b, c, unique(b,c) on conflict ignore);
+ insert into t3 values(1,1,1);
+ insert into t3 values(2,null,1);
+ insert into t3 values(3,null,1);
+ insert into t3 values(4,1,1);
+ select a from t3;
+ }
+} {1 2 3}
+
+# Ticket #461 - Make sure nulls are handled correctly when doing a
+# lookup using an index.
+#
+do_test null-8.1 {
+ execsql {
+ CREATE TABLE t4(x,y);
+ INSERT INTO t4 VALUES(1,11);
+ INSERT INTO t4 VALUES(2,NULL);
+ SELECT x FROM t4 WHERE y=NULL;
+ }
+} {}
+do_test null-8.2 {
+ execsql {
+ SELECT x FROM t4 WHERE y IN (33,NULL);
+ }
+} {}
+do_test null-8.3 {
+ execsql {
+ SELECT x FROM t4 WHERE y<33 ORDER BY x;
+ }
+} {1}
+do_test null-8.4 {
+ execsql {
+ SELECT x FROM t4 WHERE y>6 ORDER BY x;
+ }
+} {1}
+do_test null-8.5 {
+ execsql {
+ SELECT x FROM t4 WHERE y!=33 ORDER BY x;
+ }
+} {1}
+do_test null-8.11 {
+ execsql {
+ CREATE INDEX t4i1 ON t4(y);
+ SELECT x FROM t4 WHERE y=NULL;
+ }
+} {}
+do_test null-8.12 {
+ execsql {
+ SELECT x FROM t4 WHERE y IN (33,NULL);
+ }
+} {}
+do_test null-8.13 {
+ execsql {
+ SELECT x FROM t4 WHERE y<33 ORDER BY x;
+ }
+} {1}
+do_test null-8.14 {
+ execsql {
+ SELECT x FROM t4 WHERE y>6 ORDER BY x;
+ }
+} {1}
+do_test null-8.15 {
+ execsql {
+ SELECT x FROM t4 WHERE y!=33 ORDER BY x;
+ }
+} {1}
+
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/pager.test b/usr/src/cmd/svc/configd/sqlite/test/pager.test
new file mode 100644
index 0000000000..3f68719e34
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/pager.test
@@ -0,0 +1,426 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is page cache subsystem.
+#
+# $Id: pager.test,v 1.14 2004/02/25 02:20:42 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {[info commands pager_open]!=""} {
+db close
+
+# Basic sanity check. Open and close a pager.
+#
+do_test pager-1.0 {
+ catch {file delete -force ptf1.db}
+ catch {file delete -force ptf1.db-journal}
+ set v [catch {
+ set ::p1 [pager_open ptf1.db 10]
+ } msg]
+} {0}
+do_test pager-1.1 {
+ pager_stats $::p1
+} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 0 ovfl 0}
+do_test pager-1.2 {
+ pager_pagecount $::p1
+} {0}
+do_test pager-1.3 {
+ pager_stats $::p1
+} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 0 ovfl 0}
+do_test pager-1.4 {
+ pager_close $::p1
+} {}
+
+# Try to write a few pages.
+#
+do_test pager-2.1 {
+ set v [catch {
+ set ::p1 [pager_open ptf1.db 10]
+ } msg]
+} {0}
+#do_test pager-2.2 {
+# set v [catch {
+# set ::g1 [page_get $::p1 0]
+# } msg]
+# lappend v $msg
+#} {1 SQLITE_ERROR}
+do_test pager-2.3.1 {
+ set ::gx [page_lookup $::p1 1]
+} {}
+do_test pager-2.3.2 {
+ pager_stats $::p1
+} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 0 ovfl 0}
+do_test pager-2.3.3 {
+ set v [catch {
+ set ::g1 [page_get $::p1 1]
+ } msg]
+ if {$v} {lappend v $msg}
+ set v
+} {0}
+do_test pager-2.3.3 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0}
+do_test pager-2.3.4 {
+ set ::gx [page_lookup $::p1 1]
+ expr {$::gx!=""}
+} {1}
+do_test pager-2.3.5 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0}
+do_test pager-2.3.6 {
+ expr $::g1==$::gx
+} {1}
+do_test pager-2.3.7 {
+ page_unref $::gx
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0}
+do_test pager-2.4 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0}
+do_test pager-2.5 {
+ pager_pagecount $::p1
+} {0}
+do_test pager-2.6 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0}
+do_test pager-2.7 {
+ page_number $::g1
+} {1}
+do_test pager-2.8 {
+ page_read $::g1
+} {}
+do_test pager-2.9 {
+ page_unref $::g1
+} {}
+do_test pager-2.10 {
+ pager_stats $::p1
+} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 1 ovfl 0}
+do_test pager-2.11 {
+ set ::g1 [page_get $::p1 1]
+ expr {$::g1!=0}
+} {1}
+do_test pager-2.12 {
+ page_number $::g1
+} {1}
+do_test pager-2.13 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 2 ovfl 0}
+do_test pager-2.14 {
+ set v [catch {
+ page_write $::g1 "Page-One"
+ } msg]
+ lappend v $msg
+} {0 {}}
+do_test pager-2.15 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 1 state 2 err 0 hit 0 miss 2 ovfl 0}
+do_test pager-2.16 {
+ page_read $::g1
+} {Page-One}
+do_test pager-2.17 {
+ set v [catch {
+ pager_commit $::p1
+ } msg]
+ lappend v $msg
+} {0 {}}
+do_test pager-2.20 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size -1 state 1 err 0 hit 0 miss 2 ovfl 0}
+do_test pager-2.19 {
+ pager_pagecount $::p1
+} {1}
+do_test pager-2.21 {
+ pager_stats $::p1
+} {ref 1 page 1 max 10 size 1 state 1 err 0 hit 0 miss 2 ovfl 0}
+do_test pager-2.22 {
+ page_unref $::g1
+} {}
+do_test pager-2.23 {
+ pager_stats $::p1
+} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 2 ovfl 0}
+do_test pager-2.24 {
+ set v [catch {
+ page_get $::p1 1
+ } ::g1]
+ if {$v} {lappend v $::g1}
+ set v
+} {0}
+do_test pager-2.25 {
+ page_read $::g1
+} {Page-One}
+do_test pager-2.26 {
+ set v [catch {
+ page_write $::g1 {page-one}
+ } msg]
+ lappend v $msg
+} {0 {}}
+do_test pager-2.27 {
+ page_read $::g1
+} {page-one}
+do_test pager-2.28 {
+ set v [catch {
+ pager_rollback $::p1
+ } msg]
+ lappend v $msg
+} {0 {}}
+do_test pager-2.29 {
+ page_unref $::g1
+ set ::g1 [page_get $::p1 1]
+ page_read $::g1
+} {Page-One}
+do_test pager-2.99 {
+ pager_close $::p1
+} {}
+
+do_test pager-3.1 {
+ set v [catch {
+ set ::p1 [pager_open ptf1.db 15]
+ } msg]
+ if {$v} {lappend v $msg}
+ set v
+} {0}
+do_test pager-3.2 {
+ pager_pagecount $::p1
+} {1}
+do_test pager-3.3 {
+ set v [catch {
+ set ::g(1) [page_get $::p1 1]
+ } msg]
+ if {$v} {lappend v $msg}
+ set v
+} {0}
+do_test pager-3.4 {
+ page_read $::g(1)
+} {Page-One}
+do_test pager-3.5 {
+ for {set i 2} {$i<=20} {incr i} {
+ set gx [page_get $::p1 $i]
+ page_write $gx "Page-$i"
+ page_unref $gx
+ }
+ pager_commit $::p1
+} {}
+for {set i 2} {$i<=20} {incr i} {
+ do_test pager-3.6.[expr {$i-1}] [subst {
+ set gx \[page_get $::p1 $i\]
+ set v \[page_read \$gx\]
+ page_unref \$gx
+ set v
+ }] "Page-$i"
+}
+for {set i 1} {$i<=20} {incr i} {
+ regsub -all CNT {
+ set ::g1 [page_get $::p1 CNT]
+ set ::g2 [page_get $::p1 CNT]
+ set ::vx [page_read $::g2]
+ expr {$::g1==$::g2}
+ } $i body;
+ do_test pager-3.7.$i.1 $body {1}
+ regsub -all CNT {
+ page_unref $::g2
+ set vy [page_read $::g1]
+ expr {$vy==$::vx}
+ } $i body;
+ do_test pager-3.7.$i.2 $body {1}
+ regsub -all CNT {
+ page_unref $::g1
+ set gx [page_get $::p1 CNT]
+ set vy [page_read $gx]
+ page_unref $gx
+ expr {$vy==$::vx}
+ } $i body;
+ do_test pager-3.7.$i.3 $body {1}
+}
+do_test pager-3.99 {
+ pager_close $::p1
+} {}
+
+# tests of the checkpoint mechanism and api
+#
+do_test pager-4.0 {
+ set v [catch {
+ file delete -force ptf1.db
+ set ::p1 [pager_open ptf1.db 15]
+ } msg]
+ if {$v} {lappend v $msg}
+ set v
+} {0}
+do_test pager-4.1 {
+ set g1 [page_get $::p1 1]
+ page_write $g1 "Page-1 v0"
+ for {set i 2} {$i<=20} {incr i} {
+ set gx [page_get $::p1 $i]
+ page_write $gx "Page-$i v0"
+ page_unref $gx
+ }
+ pager_commit $::p1
+} {}
+for {set i 1} {$i<=20} {incr i} {
+ do_test pager-4.2.$i {
+ set gx [page_get $p1 $i]
+ set v [page_read $gx]
+ page_unref $gx
+ set v
+ } "Page-$i v0"
+}
+do_test pager-4.3 {
+ lrange [pager_stats $::p1] 0 1
+} {ref 1}
+do_test pager-4.4 {
+ lrange [pager_stats $::p1] 8 9
+} {state 1}
+
+for {set i 1} {$i<20} {incr i} {
+ do_test pager-4.5.$i.0 {
+ set res {}
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ set value [page_read $gx]
+ page_unref $gx
+ set shouldbe "Page-$j v[expr {$i-1}]"
+ if {$value!=$shouldbe} {
+ lappend res $value $shouldbe
+ }
+ }
+ set res
+ } {}
+ do_test pager-4.5.$i.1 {
+ page_write $g1 "Page-1 v$i"
+ lrange [pager_stats $p1] 8 9
+ } {state 2}
+ do_test pager-4.5.$i.2 {
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ page_write $gx "Page-$j v$i"
+ page_unref $gx
+ if {$j==$i} {
+ pager_ckpt_begin $p1
+ }
+ }
+ } {}
+ do_test pager-4.5.$i.3 {
+ set res {}
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ set value [page_read $gx]
+ page_unref $gx
+ set shouldbe "Page-$j v$i"
+ if {$value!=$shouldbe} {
+ lappend res $value $shouldbe
+ }
+ }
+ set res
+ } {}
+ do_test pager-4.5.$i.4 {
+ pager_rollback $p1
+ set res {}
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ set value [page_read $gx]
+ page_unref $gx
+ set shouldbe "Page-$j v[expr {$i-1}]"
+ if {$value!=$shouldbe} {
+ lappend res $value $shouldbe
+ }
+ }
+ set res
+ } {}
+ do_test pager-4.5.$i.5 {
+ page_write $g1 "Page-1 v$i"
+ lrange [pager_stats $p1] 8 9
+ } {state 2}
+ do_test pager-4.5.$i.6 {
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ page_write $gx "Page-$j v$i"
+ page_unref $gx
+ if {$j==$i} {
+ pager_ckpt_begin $p1
+ }
+ }
+ } {}
+ do_test pager-4.5.$i.7 {
+ pager_ckpt_rollback $p1
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ set value [page_read $gx]
+ page_unref $gx
+ if {$j<=$i || $i==1} {
+ set shouldbe "Page-$j v$i"
+ } else {
+ set shouldbe "Page-$j v[expr {$i-1}]"
+ }
+ if {$value!=$shouldbe} {
+ lappend res $value $shouldbe
+ }
+ }
+ set res
+ } {}
+ do_test pager-4.5.$i.8 {
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ page_write $gx "Page-$j v$i"
+ page_unref $gx
+ if {$j==$i} {
+ pager_ckpt_begin $p1
+ }
+ }
+ } {}
+ do_test pager-4.5.$i.9 {
+ pager_ckpt_commit $p1
+ for {set j 2} {$j<=20} {incr j} {
+ set gx [page_get $p1 $j]
+ set value [page_read $gx]
+ page_unref $gx
+ set shouldbe "Page-$j v$i"
+ if {$value!=$shouldbe} {
+ lappend res $value $shouldbe
+ }
+ }
+ set res
+ } {}
+ do_test pager-4.5.$i.10 {
+ pager_commit $p1
+ lrange [pager_stats $p1] 8 9
+ } {state 1}
+}
+
+do_test pager-4.99 {
+ pager_close $::p1
+} {}
+
+
+
+ file delete -force ptf1.db
+
+} ;# end if( not mem: and has pager_open command );
+
+# Ticket #615: an assertion fault inside the pager. It is a benign
+# fault, but we might as well test for it.
+#
+do_test pager-5.1 {
+ sqlite db test.db
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(x);
+ PRAGMA synchronous=off;
+ COMMIT;
+ }
+} {}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/pragma.test b/usr/src/cmd/svc/configd/sqlite/test/pragma.test
new file mode 100644
index 0000000000..f7cf3fa6a3
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/pragma.test
@@ -0,0 +1,420 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 March 6
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for the PRAGMA command.
+#
+# $Id: pragma.test,v 1.9 2004/04/23 17:04:45 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Delete the preexisting database to avoid the special setup
+# that the "all.test" script does.
+#
+db close
+file delete test.db
+set DB [sqlite db test.db]
+
+do_test pragma-1.1 {
+ execsql {
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {2000 2000 1 1}
+do_test pragma-1.2 {
+ execsql {
+ PRAGMA cache_size=1234;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {1234 2000 1 1}
+do_test pragma-1.3 {
+ db close
+ sqlite db test.db
+ execsql {
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {2000 2000 1 1}
+do_test pragma-1.4 {
+ execsql {
+ PRAGMA synchronous=OFF;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {2000 2000 0 1}
+do_test pragma-1.5 {
+ execsql {
+ PRAGMA cache_size=4321;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {4321 2000 0 1}
+do_test pragma-1.6 {
+ execsql {
+ PRAGMA synchronous=ON;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {4321 2000 1 1}
+do_test pragma-1.7 {
+ db close
+ sqlite db test.db
+ execsql {
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {2000 2000 1 1}
+do_test pragma-1.8 {
+ execsql {
+ PRAGMA default_synchronous=OFF;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {2000 2000 0 0}
+do_test pragma-1.9 {
+ execsql {
+ PRAGMA default_cache_size=123;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 0 0}
+do_test pragma-1.10 {
+ db close
+ set ::DB [sqlite db test.db]
+ execsql {
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 0 0}
+do_test pragma-1.11 {
+ execsql {
+ PRAGMA synchronous=NORMAL;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 1 0}
+do_test pragma-1.12 {
+ execsql {
+ PRAGMA synchronous=FULL;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 2 0}
+do_test pragma-1.13 {
+ db close
+ set ::DB [sqlite db test.db]
+ execsql {
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 0 0}
+do_test pragma-1.14 {
+ execsql {
+ PRAGMA default_synchronous=FULL;
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 2 2}
+do_test pragma-1.15 {
+ db close
+ set ::DB [sqlite db test.db]
+ execsql {
+ PRAGMA cache_size;
+ PRAGMA default_cache_size;
+ PRAGMA synchronous;
+ PRAGMA default_synchronous;
+ }
+} {123 123 2 2}
+
+do_test pragma-2.1 {
+ execsql {
+ PRAGMA show_datatypes=on;
+ PRAGMA empty_result_callbacks=off;
+ }
+ sqlite_datatypes $::DB {SELECT * FROM sqlite_master}
+} {}
+do_test pragma-2.2 {
+ execsql {
+ PRAGMA empty_result_callbacks=on;
+ }
+ sqlite_datatypes $::DB {SELECT * FROM sqlite_master}
+} {text text text integer text}
+
+# Make sure we can read the schema when empty_result_callbacks are
+# turned on. Ticket #406
+do_test pragma-2.2.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE tabx(a,b,c,d);
+ ROLLBACK;
+ SELECT count(*) FROM sqlite_master;
+ }
+} {0}
+
+do_test pragma-2.3 {
+ execsql {
+ CREATE TABLE t1(
+ a INTEGER,
+ b TEXT,
+ c WHATEVER,
+ d CLOB,
+ e BLOB,
+ f VARCHAR(123),
+ g nVaRcHaR(432)
+ );
+ }
+ sqlite_datatypes $::DB {SELECT * FROM t1}
+} {INTEGER TEXT WHATEVER CLOB BLOB VARCHAR(123) nVaRcHaR(432)}
+do_test pragma-2.4 {
+ sqlite_datatypes $::DB {
+ SELECT 1, 'hello', NULL
+ }
+} {NUMERIC TEXT TEXT}
+do_test pragma-2.5 {
+ sqlite_datatypes $::DB {
+ SELECT 1+2 AS X, 'hello' || 5 AS Y, NULL AS Z
+ }
+} {NUMERIC TEXT TEXT}
+do_test pragma-2.6 {
+ execsql {
+ CREATE VIEW v1 AS SELECT a+b, b||c, * FROM t1;
+ }
+ sqlite_datatypes $::DB {SELECT * FROM v1}
+} {NUMERIC TEXT INTEGER TEXT WHATEVER CLOB BLOB VARCHAR(123) nVaRcHaR(432)}
+do_test pragma-2.7 {
+ sqlite_datatypes $::DB {
+ SELECT d,e FROM t1 UNION SELECT a,c FROM t1
+ }
+} {INTEGER WHATEVER}
+do_test pragma-2.8 {
+ sqlite_datatypes $::DB {
+ SELECT d,e FROM t1 EXCEPT SELECT c,e FROM t1
+ }
+} {WHATEVER BLOB}
+do_test pragma-2.9 {
+ sqlite_datatypes $::DB {
+ SELECT d,e FROM t1 INTERSECT SELECT c,e FROM t1
+ }
+} {WHATEVER BLOB}
+do_test pragma-2.10 {
+ sqlite_datatypes $::DB {
+ SELECT d,e FROM t1 INTERSECT SELECT c,e FROM v1
+ }
+} {WHATEVER BLOB}
+
+# Construct a corrupted index and make sure the integrity_check
+# pragma finds it.
+#
+if {![sqlite -has-codec]} {
+do_test pragma-3.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t2(a,b,c);
+ CREATE INDEX i2 ON t2(a);
+ INSERT INTO t2 VALUES(11,2,3);
+ INSERT INTO t2 VALUES(22,3,4);
+ COMMIT;
+ SELECT rowid, * from t2;
+ }
+} {1 11 2 3 2 22 3 4}
+do_test pragma-3.2 {
+ set rootpage [execsql {SELECT rootpage FROM sqlite_master WHERE name='i2'}]
+ set db [btree_open test.db]
+ btree_begin_transaction $db
+ set c [btree_cursor $db $rootpage 1]
+ btree_first $c
+ btree_delete $c
+ btree_commit $db
+ btree_close $db
+ execsql {PRAGMA integrity_check}
+} {{rowid 1 missing from index i2} {wrong # of entries in index i2}}
+}; # endif has-codec
+
+# Test the temp_store and default_temp_store pragmas
+#
+do_test pragma-4.2 {
+ execsql {
+ PRAGMA temp_store='default';
+ PRAGMA temp_store;
+ }
+} {0}
+do_test pragma-4.3 {
+ execsql {
+ PRAGMA temp_store='file';
+ PRAGMA temp_store;
+ }
+} {1}
+do_test pragma-4.4 {
+ execsql {
+ PRAGMA temp_store='memory';
+ PRAGMA temp_store;
+ }
+} {2}
+do_test pragma-4.5 {
+ execsql {
+ PRAGMA default_temp_store='default';
+ PRAGMA default_temp_store;
+ }
+} {0}
+do_test pragma-4.6 {
+ execsql {
+ PRAGMA temp_store;
+ }
+} {2}
+do_test pragma-4.7 {
+ db close
+ sqlite db test.db
+ execsql {
+ PRAGMA temp_store;
+ }
+} {0}
+do_test pragma-4.8 {
+ execsql {
+ PRAGMA default_temp_store;
+ }
+} {0}
+do_test pragma-4.9 {
+ execsql {
+ PRAGMA default_temp_store='file';
+ PRAGMA default_temp_store;
+ }
+} {1}
+do_test pragma-4.10 {
+ execsql {
+ PRAGMA temp_store;
+ }
+} {0}
+do_test pragma-4.11 {
+ db close
+ sqlite db test.db
+ execsql {
+ PRAGMA temp_store;
+ }
+} {1}
+do_test pragma-4.12 {
+ execsql {
+ PRAGMA default_temp_store;
+ }
+} {1}
+do_test pragma-4.13 {
+ execsql {
+ PRAGMA default_temp_store='memory';
+ PRAGMA default_temp_store;
+ }
+} {2}
+do_test pragma-4.14 {
+ execsql {
+ PRAGMA temp_store;
+ }
+} {1}
+do_test pragma-4.15 {
+ db close
+ sqlite db test.db
+ execsql {
+ PRAGMA temp_store;
+ }
+} {2}
+do_test pragma-4.16 {
+ execsql {
+ PRAGMA default_temp_store;
+ }
+} {2}
+do_test pragma-4.17 {
+ execsql {
+ PRAGMA temp_store='file';
+ PRAGMA temp_store
+ }
+} {1}
+do_test pragma-4.18 {
+ execsql {
+ PRAGMA default_temp_store
+ }
+} {2}
+do_test pragma-4.19 {
+ db close
+ sqlite db test.db
+ execsql {
+ PRAGMA temp_store
+ }
+} {2}
+
+# Changing the TEMP_STORE deletes any existing temporary tables
+#
+do_test pragma-4.20 {
+ execsql {SELECT name FROM sqlite_temp_master}
+} {}
+do_test pragma-4.21 {
+ execsql {
+ CREATE TEMP TABLE test1(a,b,c);
+ SELECT name FROM sqlite_temp_master;
+ }
+} {test1}
+do_test pragma-4.22 {
+ execsql {
+ PRAGMA temp_store='file';
+ SELECT name FROM sqlite_temp_master;
+ }
+} {}
+do_test pragma-4.23 {
+ execsql {
+ CREATE TEMP TABLE test1(a,b,c);
+ SELECT name FROM sqlite_temp_master;
+ }
+} {test1}
+do_test pragma-4.24 {
+ execsql {
+ PRAGMA temp_store='memory';
+ SELECT name FROM sqlite_temp_master;
+ }
+} {}
+do_test pragma-4.25 {
+ catchsql {
+ BEGIN;
+ PRAGMA temp_store='default';
+ COMMIT;
+ }
+} {1 {temporary storage cannot be changed from within a transaction}}
+catchsql {COMMIT}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/printf.test b/usr/src/cmd/svc/configd/sqlite/test/printf.test
new file mode 100644
index 0000000000..a0adc72435
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/printf.test
@@ -0,0 +1,129 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the sqlite_*_printf() interface.
+#
+# $Id: printf.test,v 1.8 2004/02/21 19:41:05 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+set n 1
+foreach v {1 2 5 10 99 100 1000000 999999999 0 -1 -2 -5 -10 -99 -100 -9999999} {
+ do_test printf-1.$n.1 [subst {
+ sqlite_mprintf_int {Three integers: %d %x %o} $v $v $v
+ }] [format {Three integers: %d %x %o} $v $v $v]
+ do_test printf-1.$n.2 [subst {
+ sqlite_mprintf_int {Three integers: (%6d) (%6x) (%6o)} $v $v $v
+ }] [format {Three integers: (%6d) (%6x) (%6o)} $v $v $v]
+ do_test printf-1.$n.3 [subst {
+ sqlite_mprintf_int {Three integers: (%-6d) (%-6x) (%-6o)} $v $v $v
+ }] [format {Three integers: (%-6d) (%-6x) (%-6o)} $v $v $v]
+ do_test printf-1.$n.4 [subst {
+ sqlite_mprintf_int {Three integers: (%+6d) (%+6x) (%+6o)} $v $v $v
+ }] [format {Three integers: (%+6d) (%+6x) (%+6o)} $v $v $v]
+ do_test printf-1.$n.5 [subst {
+ sqlite_mprintf_int {Three integers: (%06d) (%06x) (%06o)} $v $v $v
+ }] [format {Three integers: (%06d) (%06x) (%06o)} $v $v $v]
+ do_test printf-1.$n.6 [subst {
+ sqlite_mprintf_int {Three integers: (% 6d) (% 6x) (% 6o)} $v $v $v
+ }] [format {Three integers: (% 6d) (% 6x) (% 6o)} $v $v $v]
+ incr n
+}
+
+
+if {$::tcl_platform(platform)!="windows"} {
+
+set m 1
+foreach {a b} {1 1 5 5 10 10 10 5} {
+ set n 1
+ foreach x {0.001 1.0e-20 1.0 0.0 100.0 9.99999 -0.00543 -1.0 -99.99999} {
+ do_test printf-2.$m.$n.1 [subst {
+ sqlite_mprintf_double {A double: %*.*f} $a $b $x
+ }] [format {A double: %*.*f} $a $b $x]
+ do_test printf-2.$m.$n.2 [subst {
+ sqlite_mprintf_double {A double: %*.*e} $a $b $x
+ }] [format {A double: %*.*e} $a $b $x]
+ do_test printf-2.$m.$n.3 [subst {
+ sqlite_mprintf_double {A double: %*.*g} $a $b $x
+ }] [format {A double: %*.*g} $a $b $x]
+ do_test printf-2.$m.$n.4 [subst {
+ sqlite_mprintf_double {A double: %d %d %g} $a $b $x
+ }] [format {A double: %d %d %g} $a $b $x]
+ do_test printf-2.$m.$n.5 [subst {
+ sqlite_mprintf_double {A double: %d %d %#g} $a $b $x
+ }] [format {A double: %d %d %#g} $a $b $x]
+ incr n
+ }
+ incr m
+}
+
+}
+
+do_test printf-3.1 {
+ sqlite_mprintf_str {A String: (%*.*s)} 10 10 {This is the string}
+} [format {A String: (%*.*s)} 10 10 {This is the string}]
+do_test printf-3.2 {
+ sqlite_mprintf_str {A String: (%*.*s)} 10 5 {This is the string}
+} [format {A String: (%*.*s)} 10 5 {This is the string}]
+do_test printf-3.3 {
+ sqlite_mprintf_str {A String: (%*.*s)} -10 5 {This is the string}
+} [format {A String: (%*.*s)} -10 5 {This is the string}]
+do_test printf-3.4 {
+ sqlite_mprintf_str {%d %d A String: (%s)} 1 2 {This is the string}
+} [format {%d %d A String: (%s)} 1 2 {This is the string}]
+do_test printf-3.5 {
+ sqlite_mprintf_str {%d %d A String: (%30s)} 1 2 {This is the string}
+} [format {%d %d A String: (%30s)} 1 2 {This is the string}]
+do_test printf-3.6 {
+ sqlite_mprintf_str {%d %d A String: (%-30s)} 1 2 {This is the string}
+} [format {%d %d A String: (%-30s)} 1 2 {This is the string}]
+
+do_test printf-4.1 {
+ sqlite_mprintf_str {%d %d A quoted string: '%q'} 1 2 {Hi Y'all}
+} {1 2 A quoted string: 'Hi Y''all'}
+do_test printf-4.2 {
+ sqlite_mprintf_str {%d %d A NULL pointer in %%q: '%q'} 1 2
+} {1 2 A NULL pointer in %q: '(NULL)'}
+do_test printf-4.3 {
+ sqlite_mprintf_str {%d %d A quoted string: %Q} 1 2 {Hi Y'all}
+} {1 2 A quoted string: 'Hi Y''all'}
+do_test printf-4.4 {
+ sqlite_mprintf_str {%d %d A NULL pointer in %%Q: %Q} 1 2
+} {1 2 A NULL pointer in %Q: NULL}
+
+do_test printf-5.1 {
+ set x [sqlite_mprintf_str {%d %d %100000s} 0 0 {Hello}]
+ string length $x
+} {994}
+do_test printf-5.2 {
+ sqlite_mprintf_str {%d %d (%-10.10s) %} -9 -10 {HelloHelloHello}
+} {-9 -10 (HelloHello) %}
+
+do_test printf-6.1 {
+ sqlite_mprintf_z_test , one two three four five six
+} {,one,two,three,four,five,six}
+
+
+do_test printf-7.1 {
+ sqlite_mprintf_scaled {A double: %g} 1.0e307 1.0
+} {A double: 1e+307}
+do_test printf-7.2 {
+ sqlite_mprintf_scaled {A double: %g} 1.0e307 10.0
+} {A double: 1e+308}
+do_test printf-7.3 {
+ sqlite_mprintf_scaled {A double: %g} 1.0e307 100.0
+} {A double: NaN}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/progress.test b/usr/src/cmd/svc/configd/sqlite/test/progress.test
new file mode 100644
index 0000000000..4d94239a04
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/progress.test
@@ -0,0 +1,121 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the 'progress callback'.
+#
+# $Id: progress.test,v 1.1 2003/10/18 09:37:27 danielk1977 Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Build some test data
+#
+execsql {
+ BEGIN;
+ CREATE TABLE t1(a);
+ INSERT INTO t1 VALUES(1);
+ INSERT INTO t1 VALUES(2);
+ INSERT INTO t1 VALUES(3);
+ INSERT INTO t1 VALUES(4);
+ INSERT INTO t1 VALUES(5);
+ INSERT INTO t1 VALUES(6);
+ INSERT INTO t1 VALUES(7);
+ INSERT INTO t1 VALUES(8);
+ INSERT INTO t1 VALUES(9);
+ INSERT INTO t1 VALUES(10);
+ COMMIT;
+}
+
+
+# Test that the progress callback is invoked.
+do_test progress-1.0 {
+ set counter 0
+ db progress 1 "[namespace code {incr counter}] ; expr 0"
+ execsql {
+ SELECT * FROM t1
+ }
+ expr $counter > 1
+} 1
+
+# Test that the query is abandoned when the progress callback returns non-zero
+do_test progress1.1 {
+ set counter 0
+ db progress 1 "[namespace code {incr counter}] ; expr 1"
+ execsql {
+ SELECT * FROM t1
+ }
+ set counter
+} 1
+
+# Test that the query is rolled back when the progress callback returns
+# non-zero.
+do_test progress1.2 {
+
+ # This figures out how many opcodes it takes to copy 5 extra rows into t1.
+ db progress 1 "[namespace code {incr five_rows}] ; expr 0"
+ set five_rows 0
+ execsql {
+ INSERT INTO t1 SELECT a+10 FROM t1 WHERE a < 6
+ }
+ db progress 0 ""
+ execsql {
+ DELETE FROM t1 WHERE a > 10
+ }
+
+ # Now set up the progress callback to abandon the query after the number of
+ # opcodes to copy 5 rows. That way, when we try to copy 6 rows, we know
+ # some data will have been inserted into the table by the time the progress
+ # callback abandons the query.
+ db progress $five_rows "expr 1"
+ execsql {
+ INSERT INTO t1 SELECT a+10 FROM t1 WHERE a < 7
+ }
+ execsql {
+ SELECT count(*) FROM t1
+ }
+} 10
+
+# Test that an active transaction remains active and not rolled back after the
+# progress query abandons a query.
+do_test progress1.3 {
+
+ db progress 0 ""
+ execsql BEGIN
+ execsql {
+ INSERT INTO t1 VALUES(11)
+ }
+ db progress 1 "expr 1"
+ execsql {
+ INSERT INTO t1 VALUES(12)
+ }
+ db progress 0 ""
+ execsql COMMIT
+ execsql {
+ SELECT count(*) FROM t1
+ }
+} 11
+
+# Check that a value of 0 for N means no progress callback
+do_test progress1.4 {
+ set counter 0
+ db progress 0 "[namespace code {incr counter}] ; expr 0"
+ execsql {
+ SELECT * FROM t1;
+ }
+ set counter
+} 0
+
+db progress 0 ""
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/quick.test b/usr/src/cmd/svc/configd/sqlite/test/quick.test
new file mode 100644
index 0000000000..5faf95a466
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/quick.test
@@ -0,0 +1,56 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file runs all tests.
+#
+# $Id: quick.test,v 1.6 2004/02/11 02:18:07 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+rename finish_test really_finish_test
+proc finish_test {} {}
+set ISQUICK 1
+
+set EXCLUDE {
+ all.test
+ quick.test
+ btree2.test
+ malloc.test
+ memleak.test
+ misuse.test
+}
+
+if {[sqlite -has-codec]} {
+ lappend EXCLUDE \
+ attach.test \
+ attach2.test \
+ auth.test \
+ format3.test \
+ version.test
+}
+
+foreach testfile [lsort -dictionary [glob $testdir/*.test]] {
+ set tail [file tail $testfile]
+ if {[lsearch -exact $EXCLUDE $tail]>=0} continue
+ source $testfile
+ catch {db close}
+ if {$sqlite_open_file_count>0} {
+ puts "$tail did not close all files: $sqlite_open_file_count"
+ incr nErr
+ lappend ::failList $tail
+ }
+}
+source $testdir/misuse.test
+
+set sqlite_open_file_count 0
+really_finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/quote.test b/usr/src/cmd/svc/configd/sqlite/test/quote.test
new file mode 100644
index 0000000000..5fb9e85736
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/quote.test
@@ -0,0 +1,91 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is the ability to specify table and column names
+# as quoted strings.
+#
+# $Id: quote.test,v 1.3 2002/05/21 13:43:04 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table with a strange name and with strange column names.
+#
+do_test quote-1.0 {
+ set r [catch {
+ execsql {CREATE TABLE '@abc' ( '#xyz' int, '!pqr' text );}
+ } msg]
+ lappend r $msg
+} {0 {}}
+
+# Insert, update and query the table.
+#
+do_test quote-1.1 {
+ set r [catch {
+ execsql {INSERT INTO '@abc' VALUES(5,'hello')}
+ } msg]
+ lappend r $msg
+} {0 {}}
+do_test quote-1.2 {
+ set r [catch {
+ execsql {SELECT * FROM '@abc'}
+ } msg ]
+ lappend r $msg
+} {0 {5 hello}}
+do_test quote-1.3 {
+ set r [catch {
+ execsql {SELECT '@abc'.'!pqr', '@abc'.'#xyz'+5 FROM '@abc'}
+ } msg ]
+ lappend r $msg
+} {0 {hello 10}}
+do_test quote-1.3.1 {
+ catchsql {
+ SELECT '!pqr', '#xyz'+5 FROM '@abc'
+ }
+} {0 {!pqr 5}}
+do_test quote-1.3.2 {
+ catchsql {
+ SELECT "!pqr", "#xyz"+5 FROM '@abc'
+ }
+} {0 {hello 10}}
+do_test quote-1.3 {
+ set r [catch {
+ execsql {SELECT '@abc'.'!pqr', '@abc'.'#xyz'+5 FROM '@abc'}
+ } msg ]
+ lappend r $msg
+} {0 {hello 10}}
+do_test quote-1.4 {
+ set r [catch {
+ execsql {UPDATE '@abc' SET '#xyz'=11}
+ } msg ]
+ lappend r $msg
+} {0 {}}
+do_test quote-1.5 {
+ set r [catch {
+ execsql {SELECT '@abc'.'!pqr', '@abc'.'#xyz'+5 FROM '@abc'}
+ } msg ]
+ lappend r $msg
+} {0 {hello 16}}
+
+# Drop the table with the strange name.
+#
+do_test quote-1.6 {
+ set r [catch {
+ execsql {DROP TABLE '@abc'}
+ } msg ]
+ lappend r $msg
+} {0 {}}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/rowid.test b/usr/src/cmd/svc/configd/sqlite/test/rowid.test
new file mode 100644
index 0000000000..7cb5dc57c8
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/rowid.test
@@ -0,0 +1,636 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the magic ROWID column that is
+# found on all tables.
+#
+# $Id: rowid.test,v 1.13 2004/01/14 21:59:24 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Basic ROWID functionality tests.
+#
+do_test rowid-1.1 {
+ execsql {
+ CREATE TABLE t1(x int, y int);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t1 VALUES(3,4);
+ SELECT x FROM t1 ORDER BY y;
+ }
+} {1 3}
+do_test rowid-1.2 {
+ set r [execsql {SELECT rowid FROM t1 ORDER BY x}]
+ global x2rowid rowid2x
+ set x2rowid(1) [lindex $r 0]
+ set x2rowid(3) [lindex $r 1]
+ set rowid2x($x2rowid(1)) 1
+ set rowid2x($x2rowid(3)) 3
+ llength $r
+} {2}
+do_test rowid-1.3 {
+ global x2rowid
+ set sql "SELECT x FROM t1 WHERE rowid==$x2rowid(1)"
+ execsql $sql
+} {1}
+do_test rowid-1.4 {
+ global x2rowid
+ set sql "SELECT x FROM t1 WHERE rowid==$x2rowid(3)"
+ execsql $sql
+} {3}
+do_test rowid-1.5 {
+ global x2rowid
+ set sql "SELECT x FROM t1 WHERE oid==$x2rowid(1)"
+ execsql $sql
+} {1}
+do_test rowid-1.6 {
+ global x2rowid
+ set sql "SELECT x FROM t1 WHERE OID==$x2rowid(3)"
+ execsql $sql
+} {3}
+do_test rowid-1.7 {
+ global x2rowid
+ set sql "SELECT x FROM t1 WHERE _rowid_==$x2rowid(1)"
+ execsql $sql
+} {1}
+do_test rowid-1.7.1 {
+ while 1 {
+ set norow [expr {int(rand()*1000000)}]
+ if {$norow!=$x2rowid(1) && $norow!=$x2rowid(3)} break
+ }
+ execsql "SELECT x FROM t1 WHERE rowid=$norow"
+} {}
+do_test rowid-1.8 {
+ global x2rowid
+ set v [execsql {SELECT x, oid FROM t1 order by x}]
+ set v2 [list 1 $x2rowid(1) 3 $x2rowid(3)]
+ expr {$v==$v2}
+} {1}
+do_test rowid-1.9 {
+ global x2rowid
+ set v [execsql {SELECT x, RowID FROM t1 order by x}]
+ set v2 [list 1 $x2rowid(1) 3 $x2rowid(3)]
+ expr {$v==$v2}
+} {1}
+do_test rowid-1.9 {
+ global x2rowid
+ set v [execsql {SELECT x, _rowid_ FROM t1 order by x}]
+ set v2 [list 1 $x2rowid(1) 3 $x2rowid(3)]
+ expr {$v==$v2}
+} {1}
+
+# We can insert or update the ROWID column.
+#
+do_test rowid-2.1 {
+ catchsql {
+ INSERT INTO t1(rowid,x,y) VALUES(1234,5,6);
+ SELECT rowid, * FROM t1;
+ }
+} {0 {1 1 2 2 3 4 1234 5 6}}
+do_test rowid-2.2 {
+ catchsql {
+ UPDATE t1 SET rowid=12345 WHERE x==1;
+ SELECT rowid, * FROM t1
+ }
+} {0 {2 3 4 1234 5 6 12345 1 2}}
+do_test rowid-2.3 {
+ catchsql {
+ INSERT INTO t1(y,x,oid) VALUES(8,7,1235);
+ SELECT rowid, * FROM t1 WHERE rowid>1000;
+ }
+} {0 {1234 5 6 1235 7 8 12345 1 2}}
+do_test rowid-2.4 {
+ catchsql {
+ UPDATE t1 SET oid=12346 WHERE x==1;
+ SELECT rowid, * FROM t1;
+ }
+} {0 {2 3 4 1234 5 6 1235 7 8 12346 1 2}}
+do_test rowid-2.5 {
+ catchsql {
+ INSERT INTO t1(x,_rowid_,y) VALUES(9,1236,10);
+ SELECT rowid, * FROM t1 WHERE rowid>1000;
+ }
+} {0 {1234 5 6 1235 7 8 1236 9 10 12346 1 2}}
+do_test rowid-2.6 {
+ catchsql {
+ UPDATE t1 SET _rowid_=12347 WHERE x==1;
+ SELECT rowid, * FROM t1 WHERE rowid>1000;
+ }
+} {0 {1234 5 6 1235 7 8 1236 9 10 12347 1 2}}
+
+# But we can use ROWID in the WHERE clause of an UPDATE that does not
+# change the ROWID.
+#
+do_test rowid-2.7 {
+ global x2rowid
+ set sql "UPDATE t1 SET x=2 WHERE OID==$x2rowid(3)"
+ execsql $sql
+ execsql {SELECT x FROM t1 ORDER BY x}
+} {1 2 5 7 9}
+do_test rowid-2.8 {
+ global x2rowid
+ set sql "UPDATE t1 SET x=3 WHERE _rowid_==$x2rowid(3)"
+ execsql $sql
+ execsql {SELECT x FROM t1 ORDER BY x}
+} {1 3 5 7 9}
+
+# We cannot index by ROWID
+#
+do_test rowid-2.9 {
+ set v [catch {execsql {CREATE INDEX idxt1 ON t1(rowid)}} msg]
+ lappend v $msg
+} {1 {table t1 has no column named rowid}}
+do_test rowid-2.10 {
+ set v [catch {execsql {CREATE INDEX idxt1 ON t1(_rowid_)}} msg]
+ lappend v $msg
+} {1 {table t1 has no column named _rowid_}}
+do_test rowid-2.11 {
+ set v [catch {execsql {CREATE INDEX idxt1 ON t1(oid)}} msg]
+ lappend v $msg
+} {1 {table t1 has no column named oid}}
+do_test rowid-2.12 {
+ set v [catch {execsql {CREATE INDEX idxt1 ON t1(x, rowid)}} msg]
+ lappend v $msg
+} {1 {table t1 has no column named rowid}}
+
+# Columns defined in the CREATE statement override the buildin ROWID
+# column names.
+#
+do_test rowid-3.1 {
+ execsql {
+ CREATE TABLE t2(rowid int, x int, y int);
+ INSERT INTO t2 VALUES(0,2,3);
+ INSERT INTO t2 VALUES(4,5,6);
+ INSERT INTO t2 VALUES(7,8,9);
+ SELECT * FROM t2 ORDER BY x;
+ }
+} {0 2 3 4 5 6 7 8 9}
+do_test rowid-3.2 {
+ execsql {SELECT * FROM t2 ORDER BY rowid}
+} {0 2 3 4 5 6 7 8 9}
+do_test rowid-3.3 {
+ execsql {SELECT rowid, x, y FROM t2 ORDER BY rowid}
+} {0 2 3 4 5 6 7 8 9}
+do_test rowid-3.4 {
+ set r1 [execsql {SELECT _rowid_, rowid FROM t2 ORDER BY rowid}]
+ foreach {a b c d e f} $r1 {}
+ set r2 [execsql {SELECT _rowid_, rowid FROM t2 ORDER BY x DESC}]
+ foreach {u v w x y z} $r2 {}
+ expr {$u==$e && $w==$c && $y==$a}
+} {1}
+do_probtest rowid-3.5 {
+ set r1 [execsql {SELECT _rowid_, rowid FROM t2 ORDER BY rowid}]
+ foreach {a b c d e f} $r1 {}
+ expr {$a!=$b && $c!=$d && $e!=$f}
+} {1}
+
+# Let's try some more complex examples, including some joins.
+#
+do_test rowid-4.1 {
+ execsql {
+ DELETE FROM t1;
+ DELETE FROM t2;
+ }
+ for {set i 1} {$i<=50} {incr i} {
+ execsql "INSERT INTO t1(x,y) VALUES($i,[expr {$i*$i}])"
+ }
+ execsql {INSERT INTO t2 SELECT _rowid_, x*y, y*y FROM t1}
+ execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1.rowid==t2.rowid}
+} {256}
+do_test rowid-4.2 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1.rowid==t2.rowid}
+} {256}
+do_test rowid-4.2.1 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1.oid==t2.rowid}
+} {256}
+do_test rowid-4.2.2 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1._rowid_==t2.rowid}
+} {256}
+do_test rowid-4.2.3 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t2.rowid==t1.rowid}
+} {256}
+do_test rowid-4.2.4 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t2.rowid==t1.oid AND t1.x==4}
+} {256}
+do_test rowid-4.2.5 {
+ execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1._rowid_==t2.rowid}
+} {256}
+do_test rowid-4.2.6 {
+ execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t2.rowid==t1.rowid}
+} {256}
+do_test rowid-4.2.7 {
+ execsql {SELECT t2.y FROM t1, t2 WHERE t2.rowid==t1.oid AND t1.x==4}
+} {256}
+do_test rowid-4.3 {
+ execsql {CREATE INDEX idxt1 ON t1(x)}
+ execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1.rowid==t2.rowid}
+} {256}
+do_test rowid-4.3.1 {
+ execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1._rowid_==t2.rowid}
+} {256}
+do_test rowid-4.3.2 {
+ execsql {SELECT t2.y FROM t1, t2 WHERE t2.rowid==t1.oid AND 4==t1.x}
+} {256}
+do_test rowid-4.4 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1.rowid==t2.rowid}
+} {256}
+do_test rowid-4.4.1 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1._rowid_==t2.rowid}
+} {256}
+do_test rowid-4.4.2 {
+ execsql {SELECT t2.y FROM t2, t1 WHERE t2.rowid==t1.oid AND 4==t1.x}
+} {256}
+do_test rowid-4.5 {
+ execsql {CREATE INDEX idxt2 ON t2(y)}
+ set sqlite_search_count 0
+ concat [execsql {
+ SELECT t1.x FROM t2, t1
+ WHERE t2.y==256 AND t1.rowid==t2.rowid
+ }] $sqlite_search_count
+} {4 3}
+do_test rowid-4.5.1 {
+ set sqlite_search_count 0
+ concat [execsql {
+ SELECT t1.x FROM t2, t1
+ WHERE t1.OID==t2.rowid AND t2.y==81
+ }] $sqlite_search_count
+} {3 3}
+do_test rowid-4.6 {
+ execsql {
+ SELECT t1.x FROM t1, t2
+ WHERE t2.y==256 AND t1.rowid==t2.rowid
+ }
+} {4}
+
+do_test rowid-5.1 {
+ execsql {DELETE FROM t1 WHERE _rowid_ IN (SELECT oid FROM t1 WHERE x>8)}
+ execsql {SELECT max(x) FROM t1}
+} {8}
+
+# Make sure a "WHERE rowid=X" clause works when there is no ROWID of X.
+#
+do_test rowid-6.1 {
+ execsql {
+ SELECT x FROM t1
+ }
+} {1 2 3 4 5 6 7 8}
+do_test rowid-6.2 {
+ for {set ::norow 1} {1} {incr ::norow} {
+ if {[execsql "SELECT x FROM t1 WHERE rowid=$::norow"]==""} break
+ }
+ execsql [subst {
+ DELETE FROM t1 WHERE rowid=$::norow
+ }]
+} {}
+do_test rowid-6.3 {
+ execsql {
+ SELECT x FROM t1
+ }
+} {1 2 3 4 5 6 7 8}
+
+# Beginning with version 2.3.4, SQLite computes rowids of new rows by
+# finding the maximum current rowid and adding one. It falls back to
+# the old random algorithm if the maximum rowid is the largest integer.
+# The following tests are for this new behavior.
+#
+do_test rowid-7.0 {
+ execsql {
+ DELETE FROM t1;
+ DROP TABLE t2;
+ DROP INDEX idxt1;
+ INSERT INTO t1 VALUES(1,2);
+ SELECT rowid, * FROM t1;
+ }
+} {1 1 2}
+do_test rowid-7.1 {
+ execsql {
+ INSERT INTO t1 VALUES(99,100);
+ SELECT rowid,* FROM t1
+ }
+} {1 1 2 2 99 100}
+do_test rowid-7.2 {
+ execsql {
+ CREATE TABLE t2(a INTEGER PRIMARY KEY, b);
+ INSERT INTO t2(b) VALUES(55);
+ SELECT * FROM t2;
+ }
+} {1 55}
+do_test rowid-7.3 {
+ execsql {
+ INSERT INTO t2(b) VALUES(66);
+ SELECT * FROM t2;
+ }
+} {1 55 2 66}
+do_test rowid-7.4 {
+ execsql {
+ INSERT INTO t2(a,b) VALUES(1000000,77);
+ INSERT INTO t2(b) VALUES(88);
+ SELECT * FROM t2;
+ }
+} {1 55 2 66 1000000 77 1000001 88}
+do_test rowid-7.5 {
+ execsql {
+ INSERT INTO t2(a,b) VALUES(2147483647,99);
+ INSERT INTO t2(b) VALUES(11);
+ SELECT b FROM t2 ORDER BY b;
+ }
+} {11 55 66 77 88 99}
+do_test rowid-7.6 {
+ execsql {
+ SELECT b FROM t2 WHERE a NOT IN(1,2,1000000,1000001,2147483647);
+ }
+} {11}
+do_test rowid-7.7 {
+ execsql {
+ INSERT INTO t2(b) VALUES(22);
+ INSERT INTO t2(b) VALUES(33);
+ INSERT INTO t2(b) VALUES(44);
+ INSERT INTO t2(b) VALUES(55);
+ SELECT b FROM t2 WHERE a NOT IN(1,2,1000000,1000001,2147483647) ORDER BY b;
+ }
+} {11 22 33 44 55}
+do_test rowid-7.8 {
+ execsql {
+ DELETE FROM t2 WHERE a!=2;
+ INSERT INTO t2(b) VALUES(111);
+ SELECT * FROM t2;
+ }
+} {2 66 3 111}
+
+# Make sure AFTER triggers that do INSERTs do not change the last_insert_rowid.
+# Ticket #290
+#
+do_test rowid-8.1 {
+ execsql {
+ CREATE TABLE t3(a integer primary key);
+ CREATE TABLE t4(x);
+ INSERT INTO t4 VALUES(1);
+ CREATE TRIGGER r3 AFTER INSERT on t3 FOR EACH ROW BEGIN
+ INSERT INTO t4 VALUES(NEW.a+10);
+ END;
+ SELECT * FROM t3;
+ }
+} {}
+do_test rowid-8.2 {
+ execsql {
+ SELECT rowid, * FROM t4;
+ }
+} {1 1}
+do_test rowid-8.3 {
+ execsql {
+ INSERT INTO t3 VALUES(123);
+ SELECT last_insert_rowid();
+ }
+} {123}
+do_test rowid-8.4 {
+ execsql {
+ SELECT * FROM t3;
+ }
+} {123}
+do_test rowid-8.5 {
+ execsql {
+ SELECT rowid, * FROM t4;
+ }
+} {1 1 2 133}
+do_test rowid-8.6 {
+ execsql {
+ INSERT INTO t3 VALUES(NULL);
+ SELECT last_insert_rowid();
+ }
+} {124}
+do_test rowid-8.7 {
+ execsql {
+ SELECT * FROM t3;
+ }
+} {123 124}
+do_test rowid-8.8 {
+ execsql {
+ SELECT rowid, * FROM t4;
+ }
+} {1 1 2 133 3 134}
+
+# ticket #377: Comparison between integer primiary key and floating point
+# values.
+#
+do_test rowid-9.1 {
+ execsql {
+ SELECT * FROM t3 WHERE a<123.5
+ }
+} {123}
+do_test rowid-9.2 {
+ execsql {
+ SELECT * FROM t3 WHERE a<124.5
+ }
+} {123 124}
+do_test rowid-9.3 {
+ execsql {
+ SELECT * FROM t3 WHERE a>123.5
+ }
+} {124}
+do_test rowid-9.4 {
+ execsql {
+ SELECT * FROM t3 WHERE a>122.5
+ }
+} {123 124}
+do_test rowid-9.5 {
+ execsql {
+ SELECT * FROM t3 WHERE a==123.5
+ }
+} {}
+do_test rowid-9.6 {
+ execsql {
+ SELECT * FROM t3 WHERE a==123.000
+ }
+} {123}
+do_test rowid-9.7 {
+ execsql {
+ SELECT * FROM t3 WHERE a>100.5 AND a<200.5
+ }
+} {123 124}
+do_test rowid-9.8 {
+ execsql {
+ SELECT * FROM t3 WHERE a>'xyz';
+ }
+} {}
+do_test rowid-9.9 {
+ execsql {
+ SELECT * FROM t3 WHERE a<'xyz';
+ }
+} {123 124}
+do_test rowid-9.10 {
+ execsql {
+ SELECT * FROM t3 WHERE a>=122.9 AND a<=123.1
+ }
+} {123}
+
+# Ticket #567. Comparisons of ROWID or integery primary key against
+# floating point numbers still do not always work.
+#
+do_test rowid-10.1 {
+ execsql {
+ CREATE TABLE t5(a);
+ INSERT INTO t5 VALUES(1);
+ INSERT INTO t5 VALUES(2);
+ INSERT INTO t5 SELECT a+2 FROM t5;
+ INSERT INTO t5 SELECT a+4 FROM t5;
+ SELECT rowid, * FROM t5;
+ }
+} {1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8}
+do_test rowid-10.2 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.5}
+} {6 6 7 7 8 8}
+do_test rowid-10.3 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.0}
+} {5 5 6 6 7 7 8 8}
+do_test rowid-10.4 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>5.5}
+} {6 6 7 7 8 8}
+do_test rowid-10.3.2 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>5.0}
+} {6 6 7 7 8 8}
+do_test rowid-10.5 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5<=rowid}
+} {6 6 7 7 8 8}
+do_test rowid-10.6 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5<rowid}
+} {6 6 7 7 8 8}
+do_test rowid-10.7 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid<=5.5}
+} {1 1 2 2 3 3 4 4 5 5}
+do_test rowid-10.8 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid<5.5}
+} {1 1 2 2 3 3 4 4 5 5}
+do_test rowid-10.9 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5>=rowid}
+} {1 1 2 2 3 3 4 4 5 5}
+do_test rowid-10.10 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5>rowid}
+} {1 1 2 2 3 3 4 4 5 5}
+do_test rowid-10.11 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.5 ORDER BY rowid DESC}
+} {8 8 7 7 6 6}
+do_test rowid-10.11.2 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.0 ORDER BY rowid DESC}
+} {8 8 7 7 6 6 5 5}
+do_test rowid-10.12 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>5.5 ORDER BY rowid DESC}
+} {8 8 7 7 6 6}
+do_test rowid-10.12.2 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>5.0 ORDER BY rowid DESC}
+} {8 8 7 7 6 6}
+do_test rowid-10.13 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5<=rowid ORDER BY rowid DESC}
+} {8 8 7 7 6 6}
+do_test rowid-10.14 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5<rowid ORDER BY rowid DESC}
+} {8 8 7 7 6 6}
+do_test rowid-10.15 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid<=5.5 ORDER BY rowid DESC}
+} {5 5 4 4 3 3 2 2 1 1}
+do_test rowid-10.16 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid<5.5 ORDER BY rowid DESC}
+} {5 5 4 4 3 3 2 2 1 1}
+do_test rowid-10.17 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5>=rowid ORDER BY rowid DESC}
+} {5 5 4 4 3 3 2 2 1 1}
+do_test rowid-10.18 {
+ execsql {SELECT rowid, a FROM t5 WHERE 5.5>rowid ORDER BY rowid DESC}
+} {5 5 4 4 3 3 2 2 1 1}
+
+do_test rowid-10.30 {
+ execsql {
+ CREATE TABLE t6(a);
+ INSERT INTO t6(rowid,a) SELECT -a,a FROM t5;
+ SELECT rowid, * FROM t6;
+ }
+} {-8 8 -7 7 -6 6 -5 5 -4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.31.1 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.5}
+} {-5 5 -4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.31.2 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.0}
+} {-5 5 -4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.32.1 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.5 ORDER BY rowid DESC}
+} {-1 1 -2 2 -3 3 -4 4 -5 5}
+do_test rowid-10.32.1 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.0 ORDER BY rowid DESC}
+} {-1 1 -2 2 -3 3 -4 4 -5 5}
+do_test rowid-10.33 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5<=rowid}
+} {-5 5 -4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.34 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5<=rowid ORDER BY rowid DESC}
+} {-1 1 -2 2 -3 3 -4 4 -5 5}
+do_test rowid-10.35.1 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.5}
+} {-5 5 -4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.35.2 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.0}
+} {-4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.36.1 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.5 ORDER BY rowid DESC}
+} {-1 1 -2 2 -3 3 -4 4 -5 5}
+do_test rowid-10.36.2 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.0 ORDER BY rowid DESC}
+} {-1 1 -2 2 -3 3 -4 4}
+do_test rowid-10.37 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5<rowid}
+} {-5 5 -4 4 -3 3 -2 2 -1 1}
+do_test rowid-10.38 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5<rowid ORDER BY rowid DESC}
+} {-1 1 -2 2 -3 3 -4 4 -5 5}
+do_test rowid-10.39 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid<=-5.5}
+} {-8 8 -7 7 -6 6}
+do_test rowid-10.40 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid<=-5.5 ORDER BY rowid DESC}
+} {-6 6 -7 7 -8 8}
+do_test rowid-10.41 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5>=rowid}
+} {-8 8 -7 7 -6 6}
+do_test rowid-10.42 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5>=rowid ORDER BY rowid DESC}
+} {-6 6 -7 7 -8 8}
+do_test rowid-10.43 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid<-5.5}
+} {-8 8 -7 7 -6 6}
+do_test rowid-10.44 {
+ execsql {SELECT rowid, a FROM t6 WHERE rowid<-5.5 ORDER BY rowid DESC}
+} {-6 6 -7 7 -8 8}
+do_test rowid-10.44 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5>rowid}
+} {-8 8 -7 7 -6 6}
+do_test rowid-10.46 {
+ execsql {SELECT rowid, a FROM t6 WHERE -5.5>rowid ORDER BY rowid DESC}
+} {-6 6 -7 7 -8 8}
+
+# Comparison of rowid against string values.
+#
+do_test rowid-11.1 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>'abc'}
+} {}
+do_test rowid-11.2 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid>='abc'}
+} {}
+do_test rowid-11.3 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid<'abc'}
+} {1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8}
+do_test rowid-11.4 {
+ execsql {SELECT rowid, a FROM t5 WHERE rowid<='abc'}
+} {1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8}
+
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/select1.test b/usr/src/cmd/svc/configd/sqlite/test/select1.test
new file mode 100644
index 0000000000..0d770adec7
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/select1.test
@@ -0,0 +1,744 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the SELECT statement.
+#
+# $Id: select1.test,v 1.30.2.3 2004/07/20 01:45:49 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Try to select on a non-existant table.
+#
+do_test select1-1.1 {
+ set v [catch {execsql {SELECT * FROM test1}} msg]
+ lappend v $msg
+} {1 {no such table: test1}}
+
+execsql {CREATE TABLE test1(f1 int, f2 int)}
+
+do_test select1-1.2 {
+ set v [catch {execsql {SELECT * FROM test1, test2}} msg]
+ lappend v $msg
+} {1 {no such table: test2}}
+do_test select1-1.3 {
+ set v [catch {execsql {SELECT * FROM test2, test1}} msg]
+ lappend v $msg
+} {1 {no such table: test2}}
+
+execsql {INSERT INTO test1(f1,f2) VALUES(11,22)}
+
+
+# Make sure the columns are extracted correctly.
+#
+do_test select1-1.4 {
+ execsql {SELECT f1 FROM test1}
+} {11}
+do_test select1-1.5 {
+ execsql {SELECT f2 FROM test1}
+} {22}
+do_test select1-1.6 {
+ execsql {SELECT f2, f1 FROM test1}
+} {22 11}
+do_test select1-1.7 {
+ execsql {SELECT f1, f2 FROM test1}
+} {11 22}
+do_test select1-1.8 {
+ execsql {SELECT * FROM test1}
+} {11 22}
+do_test select1-1.8.1 {
+ execsql {SELECT *, * FROM test1}
+} {11 22 11 22}
+do_test select1-1.8.2 {
+ execsql {SELECT *, min(f1,f2), max(f1,f2) FROM test1}
+} {11 22 11 22}
+do_test select1-1.8.3 {
+ execsql {SELECT 'one', *, 'two', * FROM test1}
+} {one 11 22 two 11 22}
+
+execsql {CREATE TABLE test2(r1 real, r2 real)}
+execsql {INSERT INTO test2(r1,r2) VALUES(1.1,2.2)}
+
+do_test select1-1.9 {
+ execsql {SELECT * FROM test1, test2}
+} {11 22 1.1 2.2}
+do_test select1-1.9.1 {
+ execsql {SELECT *, 'hi' FROM test1, test2}
+} {11 22 1.1 2.2 hi}
+do_test select1-1.9.2 {
+ execsql {SELECT 'one', *, 'two', * FROM test1, test2}
+} {one 11 22 1.1 2.2 two 11 22 1.1 2.2}
+do_test select1-1.10 {
+ execsql {SELECT test1.f1, test2.r1 FROM test1, test2}
+} {11 1.1}
+do_test select1-1.11 {
+ execsql {SELECT test1.f1, test2.r1 FROM test2, test1}
+} {11 1.1}
+do_test select1-1.11.1 {
+ execsql {SELECT * FROM test2, test1}
+} {1.1 2.2 11 22}
+do_test select1-1.11.2 {
+ execsql {SELECT * FROM test1 AS a, test1 AS b}
+} {11 22 11 22}
+do_test select1-1.12 {
+ execsql {SELECT max(test1.f1,test2.r1), min(test1.f2,test2.r2)
+ FROM test2, test1}
+} {11 2.2}
+do_test select1-1.13 {
+ execsql {SELECT min(test1.f1,test2.r1), max(test1.f2,test2.r2)
+ FROM test1, test2}
+} {1.1 22}
+
+set long {This is a string that is too big to fit inside a NBFS buffer}
+do_test select1-2.0 {
+ execsql "
+ DROP TABLE test2;
+ DELETE FROM test1;
+ INSERT INTO test1 VALUES(11,22);
+ INSERT INTO test1 VALUES(33,44);
+ CREATE TABLE t3(a,b);
+ INSERT INTO t3 VALUES('abc',NULL);
+ INSERT INTO t3 VALUES(NULL,'xyz');
+ INSERT INTO t3 SELECT * FROM test1;
+ CREATE TABLE t4(a,b);
+ INSERT INTO t4 VALUES(NULL,'$long');
+ SELECT * FROM t3;
+ "
+} {abc {} {} xyz 11 22 33 44}
+
+# Error messges from sqliteExprCheck
+#
+do_test select1-2.1 {
+ set v [catch {execsql {SELECT count(f1,f2) FROM test1}} msg]
+ lappend v $msg
+} {1 {wrong number of arguments to function count()}}
+do_test select1-2.2 {
+ set v [catch {execsql {SELECT count(f1) FROM test1}} msg]
+ lappend v $msg
+} {0 2}
+do_test select1-2.3 {
+ set v [catch {execsql {SELECT Count() FROM test1}} msg]
+ lappend v $msg
+} {0 2}
+do_test select1-2.4 {
+ set v [catch {execsql {SELECT COUNT(*) FROM test1}} msg]
+ lappend v $msg
+} {0 2}
+do_test select1-2.5 {
+ set v [catch {execsql {SELECT COUNT(*)+1 FROM test1}} msg]
+ lappend v $msg
+} {0 3}
+do_test select1-2.5.1 {
+ execsql {SELECT count(*),count(a),count(b) FROM t3}
+} {4 3 3}
+do_test select1-2.5.2 {
+ execsql {SELECT count(*),count(a),count(b) FROM t4}
+} {1 0 1}
+do_test select1-2.5.3 {
+ execsql {SELECT count(*),count(a),count(b) FROM t4 WHERE b=5}
+} {0 0 0}
+do_test select1-2.6 {
+ set v [catch {execsql {SELECT min(*) FROM test1}} msg]
+ lappend v $msg
+} {1 {wrong number of arguments to function min()}}
+do_test select1-2.7 {
+ set v [catch {execsql {SELECT Min(f1) FROM test1}} msg]
+ lappend v $msg
+} {0 11}
+do_test select1-2.8 {
+ set v [catch {execsql {SELECT MIN(f1,f2) FROM test1}} msg]
+ lappend v [lsort $msg]
+} {0 {11 33}}
+do_test select1-2.8.1 {
+ execsql {SELECT coalesce(min(a),'xyzzy') FROM t3}
+} {11}
+do_test select1-2.8.2 {
+ execsql {SELECT min(coalesce(a,'xyzzy')) FROM t3}
+} {11}
+do_test select1-2.8.3 {
+ execsql {SELECT min(b), min(b) FROM t4}
+} [list $long $long]
+do_test select1-2.9 {
+ set v [catch {execsql {SELECT MAX(*) FROM test1}} msg]
+ lappend v $msg
+} {1 {wrong number of arguments to function MAX()}}
+do_test select1-2.10 {
+ set v [catch {execsql {SELECT Max(f1) FROM test1}} msg]
+ lappend v $msg
+} {0 33}
+do_test select1-2.11 {
+ set v [catch {execsql {SELECT max(f1,f2) FROM test1}} msg]
+ lappend v [lsort $msg]
+} {0 {22 44}}
+do_test select1-2.12 {
+ set v [catch {execsql {SELECT MAX(f1,f2)+1 FROM test1}} msg]
+ lappend v [lsort $msg]
+} {0 {23 45}}
+do_test select1-2.13 {
+ set v [catch {execsql {SELECT MAX(f1)+1 FROM test1}} msg]
+ lappend v $msg
+} {0 34}
+do_test select1-2.13.1 {
+ execsql {SELECT coalesce(max(a),'xyzzy') FROM t3}
+} {abc}
+do_test select1-2.13.2 {
+ execsql {SELECT max(coalesce(a,'xyzzy')) FROM t3}
+} {xyzzy}
+do_test select1-2.14 {
+ set v [catch {execsql {SELECT SUM(*) FROM test1}} msg]
+ lappend v $msg
+} {1 {wrong number of arguments to function SUM()}}
+do_test select1-2.15 {
+ set v [catch {execsql {SELECT Sum(f1) FROM test1}} msg]
+ lappend v $msg
+} {0 44}
+do_test select1-2.16 {
+ set v [catch {execsql {SELECT sum(f1,f2) FROM test1}} msg]
+ lappend v $msg
+} {1 {wrong number of arguments to function sum()}}
+do_test select1-2.17 {
+ set v [catch {execsql {SELECT SUM(f1)+1 FROM test1}} msg]
+ lappend v $msg
+} {0 45}
+do_test select1-2.17.1 {
+ execsql {SELECT sum(a) FROM t3}
+} {44}
+do_test select1-2.18 {
+ set v [catch {execsql {SELECT XYZZY(f1) FROM test1}} msg]
+ lappend v $msg
+} {1 {no such function: XYZZY}}
+do_test select1-2.19 {
+ set v [catch {execsql {SELECT SUM(min(f1,f2)) FROM test1}} msg]
+ lappend v $msg
+} {0 44}
+do_test select1-2.20 {
+ set v [catch {execsql {SELECT SUM(min(f1)) FROM test1}} msg]
+ lappend v $msg
+} {1 {misuse of aggregate function min()}}
+
+# WHERE clause expressions
+#
+do_test select1-3.1 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE f1<11}} msg]
+ lappend v $msg
+} {0 {}}
+do_test select1-3.2 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE f1<=11}} msg]
+ lappend v $msg
+} {0 11}
+do_test select1-3.3 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE f1=11}} msg]
+ lappend v $msg
+} {0 11}
+do_test select1-3.4 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE f1>=11}} msg]
+ lappend v [lsort $msg]
+} {0 {11 33}}
+do_test select1-3.5 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE f1>11}} msg]
+ lappend v [lsort $msg]
+} {0 33}
+do_test select1-3.6 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE f1!=11}} msg]
+ lappend v [lsort $msg]
+} {0 33}
+do_test select1-3.7 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE min(f1,f2)!=11}} msg]
+ lappend v [lsort $msg]
+} {0 33}
+do_test select1-3.8 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE max(f1,f2)!=11}} msg]
+ lappend v [lsort $msg]
+} {0 {11 33}}
+do_test select1-3.9 {
+ set v [catch {execsql {SELECT f1 FROM test1 WHERE count(f1,f2)!=11}} msg]
+ lappend v $msg
+} {1 {wrong number of arguments to function count()}}
+
+# ORDER BY expressions
+#
+do_test select1-4.1 {
+ set v [catch {execsql {SELECT f1 FROM test1 ORDER BY f1}} msg]
+ lappend v $msg
+} {0 {11 33}}
+do_test select1-4.2 {
+ set v [catch {execsql {SELECT f1 FROM test1 ORDER BY -f1}} msg]
+ lappend v $msg
+} {0 {33 11}}
+do_test select1-4.3 {
+ set v [catch {execsql {SELECT f1 FROM test1 ORDER BY min(f1,f2)}} msg]
+ lappend v $msg
+} {0 {11 33}}
+do_test select1-4.4 {
+ set v [catch {execsql {SELECT f1 FROM test1 ORDER BY min(f1)}} msg]
+ lappend v $msg
+} {1 {misuse of aggregate function min()}}
+do_test select1-4.5 {
+ catchsql {
+ SELECT f1 FROM test1 ORDER BY 8.4;
+ }
+} {1 {ORDER BY terms must not be non-integer constants}}
+do_test select1-4.6 {
+ catchsql {
+ SELECT f1 FROM test1 ORDER BY '8.4';
+ }
+} {1 {ORDER BY terms must not be non-integer constants}}
+do_test select1-4.7 {
+ catchsql {
+ SELECT f1 FROM test1 ORDER BY 'xyz';
+ }
+} {1 {ORDER BY terms must not be non-integer constants}}
+do_test select1-4.8 {
+ execsql {
+ CREATE TABLE t5(a,b);
+ INSERT INTO t5 VALUES(1,10);
+ INSERT INTO t5 VALUES(2,9);
+ SELECT * FROM t5 ORDER BY 1;
+ }
+} {1 10 2 9}
+do_test select1-4.9 {
+ execsql {
+ SELECT * FROM t5 ORDER BY 2;
+ }
+} {2 9 1 10}
+do_test select1-4.10 {
+ catchsql {
+ SELECT * FROM t5 ORDER BY 3;
+ }
+} {1 {ORDER BY column number 3 out of range - should be between 1 and 2}}
+do_test select1-4.11 {
+ execsql {
+ INSERT INTO t5 VALUES(3,10);
+ SELECT * FROM t5 ORDER BY 2, 1 DESC;
+ }
+} {2 9 3 10 1 10}
+do_test select1-4.12 {
+ execsql {
+ SELECT * FROM t5 ORDER BY 1 DESC, b;
+ }
+} {3 10 2 9 1 10}
+do_test select1-4.13 {
+ execsql {
+ SELECT * FROM t5 ORDER BY b DESC, 1;
+ }
+} {1 10 3 10 2 9}
+
+
+# ORDER BY ignored on an aggregate query
+#
+do_test select1-5.1 {
+ set v [catch {execsql {SELECT max(f1) FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 33}
+
+execsql {CREATE TABLE test2(t1 test, t2 text)}
+execsql {INSERT INTO test2 VALUES('abc','xyz')}
+
+# Check for column naming
+#
+do_test select1-6.1 {
+ set v [catch {execsql2 {SELECT f1 FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {f1 11 f1 33}}
+do_test select1-6.1.1 {
+ execsql {PRAGMA full_column_names=on}
+ set v [catch {execsql2 {SELECT f1 FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {test1.f1 11 test1.f1 33}}
+do_test select1-6.1.2 {
+ set v [catch {execsql2 {SELECT f1 as 'f1' FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {f1 11 f1 33}}
+do_test select1-6.1.3 {
+ set v [catch {execsql2 {SELECT * FROM test1 WHERE f1==11}} msg]
+ lappend v $msg
+} {0 {test1.f1 11 test1.f2 22}}
+do_test select1-6.1.4 {
+ set v [catch {execsql2 {SELECT DISTINCT * FROM test1 WHERE f1==11}} msg]
+ execsql {PRAGMA full_column_names=off}
+ lappend v $msg
+} {0 {test1.f1 11 test1.f2 22}}
+do_test select1-6.1.5 {
+ set v [catch {execsql2 {SELECT * FROM test1 WHERE f1==11}} msg]
+ lappend v $msg
+} {0 {f1 11 f2 22}}
+do_test select1-6.1.6 {
+ set v [catch {execsql2 {SELECT DISTINCT * FROM test1 WHERE f1==11}} msg]
+ lappend v $msg
+} {0 {f1 11 f2 22}}
+do_test select1-6.2 {
+ set v [catch {execsql2 {SELECT f1 as xyzzy FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {xyzzy 11 xyzzy 33}}
+do_test select1-6.3 {
+ set v [catch {execsql2 {SELECT f1 as "xyzzy" FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {xyzzy 11 xyzzy 33}}
+do_test select1-6.3.1 {
+ set v [catch {execsql2 {SELECT f1 as 'xyzzy ' FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {{xyzzy } 11 {xyzzy } 33}}
+do_test select1-6.4 {
+ set v [catch {execsql2 {SELECT f1+F2 as xyzzy FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {xyzzy 33 xyzzy 77}}
+do_test select1-6.4a {
+ set v [catch {execsql2 {SELECT f1+F2 FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {f1+F2 33 f1+F2 77}}
+do_test select1-6.5 {
+ set v [catch {execsql2 {SELECT test1.f1+F2 FROM test1 ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {test1.f1+F2 33 test1.f1+F2 77}}
+do_test select1-6.5.1 {
+ execsql2 {PRAGMA full_column_names=on}
+ set v [catch {execsql2 {SELECT test1.f1+F2 FROM test1 ORDER BY f2}} msg]
+ execsql2 {PRAGMA full_column_names=off}
+ lappend v $msg
+} {0 {test1.f1+F2 33 test1.f1+F2 77}}
+do_test select1-6.6 {
+ set v [catch {execsql2 {SELECT test1.f1+F2, t1 FROM test1, test2
+ ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {test1.f1+F2 33 t1 abc test1.f1+F2 77 t1 abc}}
+do_test select1-6.7 {
+ set v [catch {execsql2 {SELECT A.f1, t1 FROM test1 as A, test2
+ ORDER BY f2}} msg]
+ lappend v $msg
+} {0 {A.f1 11 t1 abc A.f1 33 t1 abc}}
+do_test select1-6.8 {
+ set v [catch {execsql2 {SELECT A.f1, f1 FROM test1 as A, test1 as B
+ ORDER BY f2}} msg]
+ lappend v $msg
+} {1 {ambiguous column name: f1}}
+do_test select1-6.8b {
+ set v [catch {execsql2 {SELECT A.f1, B.f1 FROM test1 as A, test1 as B
+ ORDER BY f2}} msg]
+ lappend v $msg
+} {1 {ambiguous column name: f2}}
+do_test select1-6.8c {
+ set v [catch {execsql2 {SELECT A.f1, f1 FROM test1 as A, test1 as A
+ ORDER BY f2}} msg]
+ lappend v $msg
+} {1 {ambiguous column name: A.f1}}
+do_test select1-6.9 {
+ set v [catch {execsql2 {SELECT A.f1, B.f1 FROM test1 as A, test1 as B
+ ORDER BY A.f1, B.f1}} msg]
+ lappend v $msg
+} {0 {A.f1 11 B.f1 11 A.f1 11 B.f1 33 A.f1 33 B.f1 11 A.f1 33 B.f1 33}}
+do_test select1-6.10 {
+ set v [catch {execsql2 {
+ SELECT f1 FROM test1 UNION SELECT f2 FROM test1
+ ORDER BY f2;
+ }} msg]
+ lappend v $msg
+} {0 {f2 11 f2 22 f2 33 f2 44}}
+do_test select1-6.11 {
+ set v [catch {execsql2 {
+ SELECT f1 FROM test1 UNION SELECT f2+100 FROM test1
+ ORDER BY f2+100;
+ }} msg]
+ lappend v $msg
+} {0 {f2+100 11 f2+100 33 f2+100 122 f2+100 144}}
+
+do_test select1-7.1 {
+ set v [catch {execsql {
+ SELECT f1 FROM test1 WHERE f2=;
+ }} msg]
+ lappend v $msg
+} {1 {near ";": syntax error}}
+do_test select1-7.2 {
+ set v [catch {execsql {
+ SELECT f1 FROM test1 UNION SELECT WHERE;
+ }} msg]
+ lappend v $msg
+} {1 {near "WHERE": syntax error}}
+do_test select1-7.3 {
+ set v [catch {execsql {SELECT f1 FROM test1 as 'hi', test2 as}} msg]
+ lappend v $msg
+} {1 {near "as": syntax error}}
+do_test select1-7.4 {
+ set v [catch {execsql {
+ SELECT f1 FROM test1 ORDER BY;
+ }} msg]
+ lappend v $msg
+} {1 {near ";": syntax error}}
+do_test select1-7.5 {
+ set v [catch {execsql {
+ SELECT f1 FROM test1 ORDER BY f1 desc, f2 where;
+ }} msg]
+ lappend v $msg
+} {1 {near "where": syntax error}}
+do_test select1-7.6 {
+ set v [catch {execsql {
+ SELECT count(f1,f2 FROM test1;
+ }} msg]
+ lappend v $msg
+} {1 {near "FROM": syntax error}}
+do_test select1-7.7 {
+ set v [catch {execsql {
+ SELECT count(f1,f2+) FROM test1;
+ }} msg]
+ lappend v $msg
+} {1 {near ")": syntax error}}
+do_test select1-7.8 {
+ set v [catch {execsql {
+ SELECT f1 FROM test1 ORDER BY f2, f1+;
+ }} msg]
+ lappend v $msg
+} {1 {near ";": syntax error}}
+
+do_test select1-8.1 {
+ execsql {SELECT f1 FROM test1 WHERE 4.3+2.4 OR 1 ORDER BY f1}
+} {11 33}
+do_test select1-8.2 {
+ execsql {
+ SELECT f1 FROM test1 WHERE ('x' || f1) BETWEEN 'x10' AND 'x20'
+ ORDER BY f1
+ }
+} {11}
+do_test select1-8.3 {
+ execsql {
+ SELECT f1 FROM test1 WHERE 5-3==2
+ ORDER BY f1
+ }
+} {11 33}
+do_test select1-8.4 {
+ execsql {
+ SELECT coalesce(f1/(f1-11),'x'),
+ coalesce(min(f1/(f1-11),5),'y'),
+ coalesce(max(f1/(f1-33),6),'z')
+ FROM test1 ORDER BY f1
+ }
+} {x y 6 1.5 1.5 z}
+do_test select1-8.5 {
+ execsql {
+ SELECT min(1,2,3), -max(1,2,3)
+ FROM test1 ORDER BY f1
+ }
+} {1 -3 1 -3}
+
+
+# Check the behavior when the result set is empty
+#
+do_test select1-9.1 {
+ catch {unset r}
+ set r(*) {}
+ db eval {SELECT * FROM test1 WHERE f1<0} r {}
+ set r(*)
+} {}
+do_test select1-9.2 {
+ execsql {PRAGMA empty_result_callbacks=on}
+ set r(*) {}
+ db eval {SELECT * FROM test1 WHERE f1<0} r {}
+ set r(*)
+} {f1 f2}
+do_test select1-9.3 {
+ set r(*) {}
+ db eval {SELECT * FROM test1 WHERE f1<(select count(*) from test2)} r {}
+ set r(*)
+} {f1 f2}
+do_test select1-9.4 {
+ set r(*) {}
+ db eval {SELECT * FROM test1 ORDER BY f1} r {}
+ set r(*)
+} {f1 f2}
+do_test select1-9.5 {
+ set r(*) {}
+ db eval {SELECT * FROM test1 WHERE f1<0 ORDER BY f1} r {}
+ set r(*)
+} {f1 f2}
+unset r
+
+# Check for ORDER BY clauses that refer to an AS name in the column list
+#
+do_test select1-10.1 {
+ execsql {
+ SELECT f1 AS x FROM test1 ORDER BY x
+ }
+} {11 33}
+do_test select1-10.2 {
+ execsql {
+ SELECT f1 AS x FROM test1 ORDER BY -x
+ }
+} {33 11}
+do_test select1-10.3 {
+ execsql {
+ SELECT f1-23 AS x FROM test1 ORDER BY abs(x)
+ }
+} {10 -12}
+do_test select1-10.4 {
+ execsql {
+ SELECT f1-23 AS x FROM test1 ORDER BY -abs(x)
+ }
+} {-12 10}
+do_test select1-10.5 {
+ execsql {
+ SELECT f1-22 AS x, f2-22 as y FROM test1
+ }
+} {-11 0 11 22}
+do_test select1-10.6 {
+ execsql {
+ SELECT f1-22 AS x, f2-22 as y FROM test1 WHERE x>0 AND y<50
+ }
+} {11 22}
+
+# Check the ability to specify "TABLE.*" in the result set of a SELECT
+#
+do_test select1-11.1 {
+ execsql {
+ DELETE FROM t3;
+ DELETE FROM t4;
+ INSERT INTO t3 VALUES(1,2);
+ INSERT INTO t4 VALUES(3,4);
+ SELECT * FROM t3, t4;
+ }
+} {1 2 3 4}
+do_test select1-11.2 {
+ execsql2 {
+ SELECT * FROM t3, t4;
+ }
+} {t3.a 1 t3.b 2 t4.a 3 t4.b 4}
+do_test select1-11.3 {
+ execsql2 {
+ SELECT * FROM t3 AS x, t4 AS y;
+ }
+} {x.a 1 x.b 2 y.a 3 y.b 4}
+do_test select1-11.4.1 {
+ execsql {
+ SELECT t3.*, t4.b FROM t3, t4;
+ }
+} {1 2 4}
+do_test select1-11.4.2 {
+ execsql {
+ SELECT "t3".*, t4.b FROM t3, t4;
+ }
+} {1 2 4}
+do_test select1-11.5 {
+ execsql2 {
+ SELECT t3.*, t4.b FROM t3, t4;
+ }
+} {t3.a 1 t3.b 2 t4.b 4}
+do_test select1-11.6 {
+ execsql2 {
+ SELECT x.*, y.b FROM t3 AS x, t4 AS y;
+ }
+} {x.a 1 x.b 2 y.b 4}
+do_test select1-11.7 {
+ execsql {
+ SELECT t3.b, t4.* FROM t3, t4;
+ }
+} {2 3 4}
+do_test select1-11.8 {
+ execsql2 {
+ SELECT t3.b, t4.* FROM t3, t4;
+ }
+} {t3.b 2 t4.a 3 t4.b 4}
+do_test select1-11.9 {
+ execsql2 {
+ SELECT x.b, y.* FROM t3 AS x, t4 AS y;
+ }
+} {x.b 2 y.a 3 y.b 4}
+do_test select1-11.10 {
+ catchsql {
+ SELECT t5.* FROM t3, t4;
+ }
+} {1 {no such table: t5}}
+do_test select1-11.11 {
+ catchsql {
+ SELECT t3.* FROM t3 AS x, t4;
+ }
+} {1 {no such table: t3}}
+do_test select1-11.12 {
+ execsql2 {
+ SELECT t3.* FROM t3, (SELECT max(a), max(b) FROM t4)
+ }
+} {t3.a 1 t3.b 2}
+do_test select1-11.13 {
+ execsql2 {
+ SELECT t3.* FROM (SELECT max(a), max(b) FROM t4), t3
+ }
+} {t3.a 1 t3.b 2}
+do_test select1-11.14 {
+ execsql2 {
+ SELECT * FROM t3, (SELECT max(a), max(b) FROM t4) AS 'tx'
+ }
+} {t3.a 1 t3.b 2 tx.max(a) 3 tx.max(b) 4}
+do_test select1-11.15 {
+ execsql2 {
+ SELECT y.*, t3.* FROM t3, (SELECT max(a), max(b) FROM t4) AS y
+ }
+} {y.max(a) 3 y.max(b) 4 t3.a 1 t3.b 2}
+do_test select1-11.16 {
+ execsql2 {
+ SELECT y.* FROM t3 as y, t4 as z
+ }
+} {y.a 1 y.b 2}
+
+# Tests of SELECT statements without a FROM clause.
+#
+do_test select1-12.1 {
+ execsql2 {
+ SELECT 1+2+3
+ }
+} {1+2+3 6}
+do_test select1-12.2 {
+ execsql2 {
+ SELECT 1,'hello',2
+ }
+} {1 1 'hello' hello 2 2}
+do_test select1-12.3 {
+ execsql2 {
+ SELECT 1 AS 'a','hello' AS 'b',2 AS 'c'
+ }
+} {a 1 b hello c 2}
+do_test select1-12.4 {
+ execsql {
+ DELETE FROM t3;
+ INSERT INTO t3 VALUES(1,2);
+ SELECT * FROM t3 UNION SELECT 3 AS 'a', 4 ORDER BY a;
+ }
+} {1 2 3 4}
+do_test select1-12.5 {
+ execsql {
+ SELECT 3, 4 UNION SELECT * FROM t3;
+ }
+} {1 2 3 4}
+do_test select1-12.6 {
+ execsql {
+ SELECT * FROM t3 WHERE a=(SELECT 1);
+ }
+} {1 2}
+do_test select1-12.7 {
+ execsql {
+ SELECT * FROM t3 WHERE a=(SELECT 2);
+ }
+} {}
+do_test select1-12.8 {
+ execsql2 {
+ SELECT x FROM (
+ SELECT a,b FROM t3 UNION SELECT a AS 'x', b AS 'y' FROM t4 ORDER BY a,b
+ ) ORDER BY x;
+ }
+} {x 1 x 3}
+do_test select1-12.9 {
+ execsql2 {
+ SELECT z.x FROM (
+ SELECT a,b FROM t3 UNION SELECT a AS 'x', b AS 'y' FROM t4 ORDER BY a,b
+ ) AS 'z' ORDER BY x;
+ }
+} {z.x 1 z.x 3}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/select2.test b/usr/src/cmd/svc/configd/sqlite/test/select2.test
new file mode 100644
index 0000000000..446f1b3e7b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/select2.test
@@ -0,0 +1,165 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the SELECT statement.
+#
+# $Id: select2.test,v 1.18 2002/04/02 13:26:11 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a table with some data
+#
+execsql {CREATE TABLE tbl1(f1 int, f2 int)}
+set f [open ./testdata1.txt w]
+for {set i 0} {$i<=30} {incr i} {
+ puts $f "[expr {$i%9}]\t[expr {$i%10}]"
+}
+close $f
+execsql {COPY tbl1 FROM './testdata1.txt'}
+file delete -force ./testdata1.txt
+catch {unset data}
+
+# Do a second query inside a first.
+#
+do_test select2-1.1 {
+ set sql {SELECT DISTINCT f1 FROM tbl1 ORDER BY f1}
+ set r {}
+ db eval $sql data {
+ set f1 $data(f1)
+ lappend r $f1:
+ set sql2 "SELECT f2 FROM tbl1 WHERE f1=$f1 ORDER BY f2"
+ db eval $sql2 d2 {
+ lappend r $d2(f2)
+ }
+ }
+ set r
+} {0: 0 7 8 9 1: 0 1 8 9 2: 0 1 2 9 3: 0 1 2 3 4: 2 3 4 5: 3 4 5 6: 4 5 6 7: 5 6 7 8: 6 7 8}
+
+do_test select2-1.2 {
+ set sql {SELECT DISTINCT f1 FROM tbl1 WHERE f1>3 AND f1<5}
+ set r {}
+ db eval $sql data {
+ set f1 $data(f1)
+ lappend r $f1:
+ set sql2 "SELECT f2 FROM tbl1 WHERE f1=$f1 ORDER BY f2"
+ db eval $sql2 d2 {
+ lappend r $d2(f2)
+ }
+ }
+ set r
+} {4: 2 3 4}
+
+# Create a largish table
+#
+do_test select2-2.0 {
+ execsql {CREATE TABLE tbl2(f1 int, f2 int, f3 int)}
+ set f [open ./testdata1.txt w]
+ for {set i 1} {$i<=30000} {incr i} {
+ puts $f "$i\t[expr {$i*2}]\t[expr {$i*3}]"
+ }
+ close $f
+ # execsql {--vdbe-trace-on--}
+ execsql {COPY tbl2 FROM './testdata1.txt'}
+ file delete -force ./testdata1.txt
+} {}
+
+do_test select2-2.1 {
+ execsql {SELECT count(*) FROM tbl2}
+} {30000}
+do_test select2-2.2 {
+ execsql {SELECT count(*) FROM tbl2 WHERE f2>1000}
+} {29500}
+
+do_test select2-3.1 {
+ execsql {SELECT f1 FROM tbl2 WHERE 1000=f2}
+} {500}
+
+do_test select2-3.2a {
+ execsql {CREATE INDEX idx1 ON tbl2(f2)}
+} {}
+
+do_test select2-3.2b {
+ execsql {SELECT f1 FROM tbl2 WHERE 1000=f2}
+} {500}
+do_test select2-3.2c {
+ execsql {SELECT f1 FROM tbl2 WHERE f2=1000}
+} {500}
+do_test select2-3.2d {
+ set sqlite_search_count 0
+ execsql {SELECT * FROM tbl2 WHERE 1000=f2}
+ set sqlite_search_count
+} {3}
+do_test select2-3.2e {
+ set sqlite_search_count 0
+ execsql {SELECT * FROM tbl2 WHERE f2=1000}
+ set sqlite_search_count
+} {3}
+
+# Make sure queries run faster with an index than without
+#
+do_test select2-3.3 {
+ execsql {DROP INDEX idx1}
+ set sqlite_search_count 0
+ execsql {SELECT f1 FROM tbl2 WHERE f2==2000}
+ set sqlite_search_count
+} {29999}
+
+# Make sure we can optimize functions in the WHERE clause that
+# use fields from two or more different table. (Bug #6)
+#
+do_test select2-4.1 {
+ execsql {
+ CREATE TABLE aa(a);
+ CREATE TABLE bb(b);
+ INSERT INTO aa VALUES(1);
+ INSERT INTO aa VALUES(3);
+ INSERT INTO bb VALUES(2);
+ INSERT INTO bb VALUES(4);
+ SELECT * FROM aa, bb WHERE max(a,b)>2;
+ }
+} {1 4 3 2 3 4}
+do_test select2-4.2 {
+ execsql {
+ INSERT INTO bb VALUES(0);
+ SELECT * FROM aa, bb WHERE b;
+ }
+} {1 2 1 4 3 2 3 4}
+do_test select2-4.3 {
+ execsql {
+ SELECT * FROM aa, bb WHERE NOT b;
+ }
+} {1 0 3 0}
+do_test select2-4.4 {
+ execsql {
+ SELECT * FROM aa, bb WHERE min(a,b);
+ }
+} {1 2 1 4 3 2 3 4}
+do_test select2-4.5 {
+ execsql {
+ SELECT * FROM aa, bb WHERE NOT min(a,b);
+ }
+} {1 0 3 0}
+do_test select2-4.6 {
+ execsql {
+ SELECT * FROM aa, bb WHERE CASE WHEN a=b-1 THEN 1 END;
+ }
+} {1 2 3 4}
+do_test select2-4.7 {
+ execsql {
+ SELECT * FROM aa, bb WHERE CASE WHEN a=b-1 THEN 0 ELSE 1 END;
+ }
+} {1 4 1 0 3 2 3 0}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/select3.test b/usr/src/cmd/svc/configd/sqlite/test/select3.test
new file mode 100644
index 0000000000..c7a232d076
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/select3.test
@@ -0,0 +1,228 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing aggregate functions and the
+# GROUP BY and HAVING clauses of SELECT statements.
+#
+# $Id: select3.test,v 1.8 2003/01/31 17:16:37 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Build some test data
+#
+do_test select3-1.0 {
+ set fd [open data1.txt w]
+ for {set i 1} {$i<32} {incr i} {
+ for {set j 0} {pow(2,$j)<$i} {incr j} {}
+ puts $fd "$i\t$j"
+ }
+ close $fd
+ execsql {
+ CREATE TABLE t1(n int, log int);
+ COPY t1 FROM 'data1.txt'
+ }
+ file delete data1.txt
+ execsql {SELECT DISTINCT log FROM t1 ORDER BY log}
+} {0 1 2 3 4 5}
+
+# Basic aggregate functions.
+#
+do_test select3-1.1 {
+ execsql {SELECT count(*) FROM t1}
+} {31}
+do_test select3-1.2 {
+ execsql {
+ SELECT min(n),min(log),max(n),max(log),sum(n),sum(log),avg(n),avg(log)
+ FROM t1
+ }
+} {1 0 31 5 496 124 16 4}
+do_test select3-1.3 {
+ execsql {SELECT max(n)/avg(n), max(log)/avg(log) FROM t1}
+} {1.9375 1.25}
+
+# Try some basic GROUP BY clauses
+#
+do_test select3-2.1 {
+ execsql {SELECT log, count(*) FROM t1 GROUP BY log ORDER BY log}
+} {0 1 1 1 2 2 3 4 4 8 5 15}
+do_test select3-2.2 {
+ execsql {SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log}
+} {0 1 1 2 2 3 3 5 4 9 5 17}
+do_test select3-2.3 {
+ execsql {SELECT log, avg(n) FROM t1 GROUP BY log ORDER BY log}
+} {0 1 1 2 2 3.5 3 6.5 4 12.5 5 24}
+do_test select3-2.3 {
+ execsql {SELECT log, avg(n)+1 FROM t1 GROUP BY log ORDER BY log}
+} {0 2 1 3 2 4.5 3 7.5 4 13.5 5 25}
+do_test select3-2.4 {
+ execsql {SELECT log, avg(n)-min(n) FROM t1 GROUP BY log ORDER BY log}
+} {0 0 1 0 2 0.5 3 1.5 4 3.5 5 7}
+do_test select3-2.5 {
+ execsql {SELECT log*2+1, avg(n)-min(n) FROM t1 GROUP BY log ORDER BY log}
+} {1 0 3 0 5 0.5 7 1.5 9 3.5 11 7}
+do_test select3-2.6 {
+ execsql {
+ SELECT log*2+1 as x, count(*) FROM t1 GROUP BY x ORDER BY x
+ }
+} {1 1 3 1 5 2 7 4 9 8 11 15}
+do_test select3-2.7 {
+ execsql {
+ SELECT log*2+1 AS x, count(*) AS y FROM t1 GROUP BY x ORDER BY y
+ }
+} {1 1 3 1 5 2 7 4 9 8 11 15}
+do_test select3-2.8 {
+ execsql {
+ SELECT log*2+1 AS x, count(*) AS y FROM t1 GROUP BY x ORDER BY 10-(x+y)
+ }
+} {11 15 9 8 7 4 5 2 3 1 1 1}
+do_test select3-2.9 {
+ catchsql {
+ SELECT log, count(*) FROM t1 GROUP BY 'x' ORDER BY log;
+ }
+} {1 {GROUP BY terms must not be non-integer constants}}
+do_test select3-2.10 {
+ catchsql {
+ SELECT log, count(*) FROM t1 GROUP BY 0 ORDER BY log;
+ }
+} {1 {GROUP BY column number 0 out of range - should be between 1 and 2}}
+do_test select3-2.11 {
+ catchsql {
+ SELECT log, count(*) FROM t1 GROUP BY 3 ORDER BY log;
+ }
+} {1 {GROUP BY column number 3 out of range - should be between 1 and 2}}
+do_test select3-2.12 {
+ catchsql {
+ SELECT log, count(*) FROM t1 GROUP BY 1 ORDER BY log;
+ }
+} {0 {0 1 1 1 2 2 3 4 4 8 5 15}}
+#do_test select3-2.13 {
+# catchsql {
+# SELECT log, count(*) FROM t1 GROUP BY 2 ORDER BY log;
+# }
+#} {0 {0 1 1 1 2 2 3 4 4 8 5 15}}
+#do_test select3-2.14 {
+# catchsql {
+# SELECT log, count(*) FROM t1 GROUP BY count(*) ORDER BY log;
+# }
+#} {0 {0 1 1 1 2 2 3 4 4 8 5 15}}
+
+# Cannot have a HAVING without a GROUP BY
+#
+do_test select3-3.1 {
+ set v [catch {execsql {SELECT log, count(*) FROM t1 HAVING log>=4}} msg]
+ lappend v $msg
+} {1 {a GROUP BY clause is required before HAVING}}
+
+# Toss in some HAVING clauses
+#
+do_test select3-4.1 {
+ execsql {SELECT log, count(*) FROM t1 GROUP BY log HAVING log>=4 ORDER BY log}
+} {4 8 5 15}
+do_test select3-4.2 {
+ execsql {
+ SELECT log, count(*) FROM t1
+ GROUP BY log
+ HAVING count(*)>=4
+ ORDER BY log
+ }
+} {3 4 4 8 5 15}
+do_test select3-4.3 {
+ execsql {
+ SELECT log, count(*) FROM t1
+ GROUP BY log
+ HAVING count(*)>=4
+ ORDER BY max(n)
+ }
+} {3 4 4 8 5 15}
+do_test select3-4.4 {
+ execsql {
+ SELECT log AS x, count(*) AS y FROM t1
+ GROUP BY x
+ HAVING y>=4
+ ORDER BY max(n)
+ }
+} {3 4 4 8 5 15}
+do_test select3-4.5 {
+ execsql {
+ SELECT log AS x FROM t1
+ GROUP BY x
+ HAVING count(*)>=4
+ ORDER BY max(n)
+ }
+} {3 4 5}
+
+do_test select3-5.1 {
+ execsql {
+ SELECT log, count(*), avg(n), max(n+log*2) FROM t1
+ GROUP BY log
+ ORDER BY max(n+log*2), avg(n)
+ }
+} {0 1 1 1 1 1 2 4 2 2 3.5 8 3 4 6.5 14 4 8 12.5 24 5 15 24 41}
+do_test select3-5.2 {
+ execsql {
+ SELECT log, count(*), avg(n), max(n+log*2) FROM t1
+ GROUP BY log
+ ORDER BY max(n+log*2), min(log,avg(n))
+ }
+} {0 1 1 1 1 1 2 4 2 2 3.5 8 3 4 6.5 14 4 8 12.5 24 5 15 24 41}
+
+# Test sorting of GROUP BY results in the presence of an index
+# on the GROUP BY column.
+#
+do_test select3-6.1 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log;
+ }
+} {0 1 1 2 2 3 3 5 4 9 5 17}
+do_test select3-6.2 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log DESC;
+ }
+} {5 17 4 9 3 5 2 3 1 2 0 1}
+do_test select3-6.3 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1;
+ }
+} {0 1 1 2 2 3 3 5 4 9 5 17}
+do_test select3-6.4 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1 DESC;
+ }
+} {5 17 4 9 3 5 2 3 1 2 0 1}
+do_test select3-6.5 {
+ execsql {
+ CREATE INDEX i1 ON t1(log);
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log;
+ }
+} {0 1 1 2 2 3 3 5 4 9 5 17}
+do_test select3-6.6 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log DESC;
+ }
+} {5 17 4 9 3 5 2 3 1 2 0 1}
+do_test select3-6.7 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1;
+ }
+} {0 1 1 2 2 3 3 5 4 9 5 17}
+do_test select3-6.8 {
+ execsql {
+ SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1 DESC;
+ }
+} {5 17 4 9 3 5 2 3 1 2 0 1}
+
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/select4.test b/usr/src/cmd/svc/configd/sqlite/test/select4.test
new file mode 100644
index 0000000000..8fb34a2156
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/select4.test
@@ -0,0 +1,498 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing UNION, INTERSECT and EXCEPT operators
+# in SELECT statements.
+#
+# $Id: select4.test,v 1.13 2003/02/02 12:41:27 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Build some test data
+#
+set fd [open data1.txt w]
+for {set i 1} {$i<32} {incr i} {
+ for {set j 0} {pow(2,$j)<$i} {incr j} {}
+ puts $fd "$i\t$j"
+}
+close $fd
+execsql {
+ CREATE TABLE t1(n int, log int);
+ COPY t1 FROM 'data1.txt'
+}
+file delete data1.txt
+
+do_test select4-1.0 {
+ execsql {SELECT DISTINCT log FROM t1 ORDER BY log}
+} {0 1 2 3 4 5}
+
+# Union All operator
+#
+do_test select4-1.1a {
+ lsort [execsql {SELECT DISTINCT log FROM t1}]
+} {0 1 2 3 4 5}
+do_test select4-1.1b {
+ lsort [execsql {SELECT n FROM t1 WHERE log=3}]
+} {5 6 7 8}
+do_test select4-1.1c {
+ execsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }
+} {0 1 2 3 4 5 5 6 7 8}
+do_test select4-1.1d {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ SELECT * FROM t2;
+ }
+} {0 1 2 3 4 5 5 6 7 8}
+execsql {DROP TABLE t2}
+do_test select4-1.1e {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log DESC;
+ SELECT * FROM t2;
+ }
+} {8 7 6 5 5 4 3 2 1 0}
+execsql {DROP TABLE t2}
+do_test select4-1.1f {
+ execsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=2
+ }
+} {0 1 2 3 4 5 3 4}
+do_test select4-1.1g {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=2;
+ SELECT * FROM t2;
+ }
+} {0 1 2 3 4 5 3 4}
+execsql {DROP TABLE t2}
+do_test select4-1.2 {
+ execsql {
+ SELECT log FROM t1 WHERE n IN
+ (SELECT DISTINCT log FROM t1 UNION ALL
+ SELECT n FROM t1 WHERE log=3)
+ ORDER BY log;
+ }
+} {0 1 2 2 3 3 3 3}
+do_test select4-1.3 {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1 ORDER BY log
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {1 {ORDER BY clause should come after UNION ALL not before}}
+
+# Union operator
+#
+do_test select4-2.1 {
+ execsql {
+ SELECT DISTINCT log FROM t1
+ UNION
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }
+} {0 1 2 3 4 5 6 7 8}
+do_test select4-2.2 {
+ execsql {
+ SELECT log FROM t1 WHERE n IN
+ (SELECT DISTINCT log FROM t1 UNION
+ SELECT n FROM t1 WHERE log=3)
+ ORDER BY log;
+ }
+} {0 1 2 2 3 3 3 3}
+do_test select4-2.3 {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1 ORDER BY log
+ UNION
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {1 {ORDER BY clause should come after UNION not before}}
+
+# Except operator
+#
+do_test select4-3.1.1 {
+ execsql {
+ SELECT DISTINCT log FROM t1
+ EXCEPT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }
+} {0 1 2 3 4}
+do_test select4-3.1.2 {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1
+ EXCEPT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ SELECT * FROM t2;
+ }
+} {0 1 2 3 4}
+execsql {DROP TABLE t2}
+do_test select4-3.1.3 {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1
+ EXCEPT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log DESC;
+ SELECT * FROM t2;
+ }
+} {4 3 2 1 0}
+execsql {DROP TABLE t2}
+do_test select4-3.2 {
+ execsql {
+ SELECT log FROM t1 WHERE n IN
+ (SELECT DISTINCT log FROM t1 EXCEPT
+ SELECT n FROM t1 WHERE log=3)
+ ORDER BY log;
+ }
+} {0 1 2 2}
+do_test select4-3.3 {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1 ORDER BY log
+ EXCEPT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {1 {ORDER BY clause should come after EXCEPT not before}}
+
+# Intersect operator
+#
+do_test select4-4.1.1 {
+ execsql {
+ SELECT DISTINCT log FROM t1
+ INTERSECT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }
+} {5}
+do_test select4-4.1.2 {
+ execsql {
+ SELECT DISTINCT log FROM t1 UNION ALL SELECT 6
+ INTERSECT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }
+} {5 6}
+do_test select4-4.1.3 {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1 UNION ALL SELECT 6
+ INTERSECT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ SELECT * FROM t2;
+ }
+} {5 6}
+execsql {DROP TABLE t2}
+do_test select4-4.1.4 {
+ execsql {
+ CREATE TABLE t2 AS
+ SELECT DISTINCT log FROM t1 UNION ALL SELECT 6
+ INTERSECT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log DESC;
+ SELECT * FROM t2;
+ }
+} {6 5}
+execsql {DROP TABLE t2}
+do_test select4-4.2 {
+ execsql {
+ SELECT log FROM t1 WHERE n IN
+ (SELECT DISTINCT log FROM t1 INTERSECT
+ SELECT n FROM t1 WHERE log=3)
+ ORDER BY log;
+ }
+} {3}
+do_test select4-4.3 {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1 ORDER BY log
+ INTERSECT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {1 {ORDER BY clause should come after INTERSECT not before}}
+
+# Various error messages while processing UNION or INTERSECT
+#
+do_test select4-5.1 {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t2
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {1 {no such table: t2}}
+do_test select4-5.2 {
+ set v [catch {execsql {
+ SELECT DISTINCT log AS "xyzzy" FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY xyzzy;
+ }} msg]
+ lappend v $msg
+} {0 {0 1 2 3 4 5 5 6 7 8}}
+do_test select4-5.2b {
+ set v [catch {execsql {
+ SELECT DISTINCT log AS xyzzy FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY 'xyzzy';
+ }} msg]
+ lappend v $msg
+} {0 {0 1 2 3 4 5 5 6 7 8}}
+do_test select4-5.2c {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY 'xyzzy';
+ }} msg]
+ lappend v $msg
+} {1 {ORDER BY term number 1 does not match any result column}}
+do_test select4-5.2d {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1
+ INTERSECT
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY 'xyzzy';
+ }} msg]
+ lappend v $msg
+} {1 {ORDER BY term number 1 does not match any result column}}
+do_test select4-5.2e {
+ set v [catch {execsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY n;
+ }} msg]
+ lappend v $msg
+} {0 {0 1 2 3 4 5 5 6 7 8}}
+do_test select4-5.2f {
+ catchsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }
+} {0 {0 1 2 3 4 5 5 6 7 8}}
+do_test select4-5.2g {
+ catchsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY 1;
+ }
+} {0 {0 1 2 3 4 5 5 6 7 8}}
+do_test select4-5.2h {
+ catchsql {
+ SELECT DISTINCT log FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY 2;
+ }
+} {1 {ORDER BY position 2 should be between 1 and 1}}
+do_test select4-5.2i {
+ catchsql {
+ SELECT DISTINCT 1, log FROM t1
+ UNION ALL
+ SELECT 2, n FROM t1 WHERE log=3
+ ORDER BY 2, 1;
+ }
+} {0 {1 0 1 1 1 2 1 3 1 4 1 5 2 5 2 6 2 7 2 8}}
+do_test select4-5.2j {
+ catchsql {
+ SELECT DISTINCT 1, log FROM t1
+ UNION ALL
+ SELECT 2, n FROM t1 WHERE log=3
+ ORDER BY 1, 2 DESC;
+ }
+} {0 {1 5 1 4 1 3 1 2 1 1 1 0 2 8 2 7 2 6 2 5}}
+do_test select4-5.2k {
+ catchsql {
+ SELECT DISTINCT 1, log FROM t1
+ UNION ALL
+ SELECT 2, n FROM t1 WHERE log=3
+ ORDER BY n, 1;
+ }
+} {0 {1 0 1 1 1 2 1 3 1 4 1 5 2 5 2 6 2 7 2 8}}
+do_test select4-5.3 {
+ set v [catch {execsql {
+ SELECT DISTINCT log, n FROM t1
+ UNION ALL
+ SELECT n FROM t1 WHERE log=3
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}}
+do_test select4-5.4 {
+ set v [catch {execsql {
+ SELECT log FROM t1 WHERE n=2
+ UNION ALL
+ SELECT log FROM t1 WHERE n=3
+ UNION ALL
+ SELECT log FROM t1 WHERE n=4
+ UNION ALL
+ SELECT log FROM t1 WHERE n=5
+ ORDER BY log;
+ }} msg]
+ lappend v $msg
+} {0 {1 2 2 3}}
+
+do_test select4-6.1 {
+ execsql {
+ SELECT log, count(*) as cnt FROM t1 GROUP BY log
+ UNION
+ SELECT log, n FROM t1 WHERE n=7
+ ORDER BY cnt, log;
+ }
+} {0 1 1 1 2 2 3 4 3 7 4 8 5 15}
+do_test select4-6.2 {
+ execsql {
+ SELECT log, count(*) FROM t1 GROUP BY log
+ UNION
+ SELECT log, n FROM t1 WHERE n=7
+ ORDER BY count(*), log;
+ }
+} {0 1 1 1 2 2 3 4 3 7 4 8 5 15}
+
+# NULLs are indistinct for the UNION operator.
+# Make sure the UNION operator recognizes this
+#
+do_test select4-6.3 {
+ execsql {
+ SELECT NULL UNION SELECT NULL UNION
+ SELECT 1 UNION SELECT 2 AS 'x'
+ ORDER BY x;
+ }
+} {{} 1 2}
+do_test select4-6.3.1 {
+ execsql {
+ SELECT NULL UNION ALL SELECT NULL UNION ALL
+ SELECT 1 UNION ALL SELECT 2 AS 'x'
+ ORDER BY x;
+ }
+} {{} {} 1 2}
+
+# Make sure the DISTINCT keyword treats NULLs as indistinct.
+#
+do_test select4-6.4 {
+ execsql {
+ SELECT * FROM (
+ SELECT NULL, 1 UNION ALL SELECT NULL, 1
+ );
+ }
+} {{} 1 {} 1}
+do_test select4-6.5 {
+ execsql {
+ SELECT DISTINCT * FROM (
+ SELECT NULL, 1 UNION ALL SELECT NULL, 1
+ );
+ }
+} {{} 1}
+do_test select4-6.6 {
+ execsql {
+ SELECT DISTINCT * FROM (
+ SELECT 1,2 UNION ALL SELECT 1,2
+ );
+ }
+} {1 2}
+
+# Test distinctness of NULL in other ways.
+#
+do_test select4-6.7 {
+ execsql {
+ SELECT NULL EXCEPT SELECT NULL
+ }
+} {}
+
+
+# Make sure column names are correct when a compound select appears as
+# an expression in the WHERE clause.
+#
+do_test select4-7.1 {
+ execsql {
+ CREATE TABLE t2 AS SELECT log AS 'x', count(*) AS 'y' FROM t1 GROUP BY log;
+ SELECT * FROM t2 ORDER BY x;
+ }
+} {0 1 1 1 2 2 3 4 4 8 5 15}
+do_test select4-7.2 {
+ execsql2 {
+ SELECT * FROM t1 WHERE n IN (SELECT n FROM t1 INTERSECT SELECT x FROM t2)
+ ORDER BY n
+ }
+} {n 1 log 0 n 2 log 1 n 3 log 2 n 4 log 2 n 5 log 3}
+do_test select4-7.3 {
+ execsql2 {
+ SELECT * FROM t1 WHERE n IN (SELECT n FROM t1 EXCEPT SELECT x FROM t2)
+ ORDER BY n LIMIT 2
+ }
+} {n 6 log 3 n 7 log 3}
+do_test select4-7.4 {
+ execsql2 {
+ SELECT * FROM t1 WHERE n IN (SELECT n FROM t1 UNION SELECT x FROM t2)
+ ORDER BY n LIMIT 2
+ }
+} {n 1 log 0 n 2 log 1}
+
+# Make sure DISTINCT works appropriately on TEXT and NUMERIC columns.
+#
+do_test select4-8.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t3(a text, b float, c text);
+ INSERT INTO t3 VALUES(1, 1.1, '1.1');
+ INSERT INTO t3 VALUES(2, 1.10, '1.10');
+ INSERT INTO t3 VALUES(3, 1.10, '1.1');
+ INSERT INTO t3 VALUES(4, 1.1, '1.10');
+ INSERT INTO t3 VALUES(5, 1.2, '1.2');
+ INSERT INTO t3 VALUES(6, 1.3, '1.3');
+ COMMIT;
+ }
+ execsql {
+ SELECT DISTINCT b FROM t3 ORDER BY c;
+ }
+} {1.1 1.2 1.3}
+do_test select4-8.2 {
+ execsql {
+ SELECT DISTINCT c FROM t3 ORDER BY c;
+ }
+} {1.1 1.10 1.2 1.3}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/select5.test b/usr/src/cmd/svc/configd/sqlite/test/select5.test
new file mode 100644
index 0000000000..7db30d07db
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/select5.test
@@ -0,0 +1,122 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing aggregate functions and the
+# GROUP BY and HAVING clauses of SELECT statements.
+#
+# $Id: select5.test,v 1.6 2001/10/15 00:44:36 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Build some test data
+#
+set fd [open data1.txt w]
+for {set i 1} {$i<32} {incr i} {
+ for {set j 0} {pow(2,$j)<$i} {incr j} {}
+ puts $fd "[expr {32-$i}]\t[expr {10-$j}]"
+}
+close $fd
+execsql {
+ CREATE TABLE t1(x int, y int);
+ COPY t1 FROM 'data1.txt'
+}
+file delete data1.txt
+
+do_test select5-1.0 {
+ execsql {SELECT DISTINCT y FROM t1 ORDER BY y}
+} {5 6 7 8 9 10}
+
+# Sort by an aggregate function.
+#
+do_test select5-1.1 {
+ execsql {SELECT y, count(*) FROM t1 GROUP BY y ORDER BY y}
+} {5 15 6 8 7 4 8 2 9 1 10 1}
+do_test select5-1.2 {
+ execsql {SELECT y, count(*) FROM t1 GROUP BY y ORDER BY count(*), y}
+} {9 1 10 1 8 2 7 4 6 8 5 15}
+do_test select5-1.3 {
+ execsql {SELECT count(*), y FROM t1 GROUP BY y ORDER BY count(*), y}
+} {1 9 1 10 2 8 4 7 8 6 15 5}
+
+# Some error messages associated with aggregates and GROUP BY
+#
+do_test select5-2.1 {
+ set v [catch {execsql {
+ SELECT y, count(*) FROM t1 GROUP BY z ORDER BY y
+ }} msg]
+ lappend v $msg
+} {1 {no such column: z}}
+do_test select5-2.2 {
+ set v [catch {execsql {
+ SELECT y, count(*) FROM t1 GROUP BY z(y) ORDER BY y
+ }} msg]
+ lappend v $msg
+} {1 {no such function: z}}
+do_test select5-2.3 {
+ set v [catch {execsql {
+ SELECT y, count(*) FROM t1 GROUP BY y HAVING count(*)<3 ORDER BY y
+ }} msg]
+ lappend v $msg
+} {0 {8 2 9 1 10 1}}
+do_test select5-2.4 {
+ set v [catch {execsql {
+ SELECT y, count(*) FROM t1 GROUP BY y HAVING z(y)<3 ORDER BY y
+ }} msg]
+ lappend v $msg
+} {1 {no such function: z}}
+do_test select5-2.5 {
+ set v [catch {execsql {
+ SELECT y, count(*) FROM t1 GROUP BY y HAVING count(*)<z ORDER BY y
+ }} msg]
+ lappend v $msg
+} {1 {no such column: z}}
+
+# Get the Agg function to rehash in vdbe.c
+#
+do_test select5-3.1 {
+ execsql {
+ SELECT x, count(*), avg(y) FROM t1 GROUP BY x HAVING x<4 ORDER BY x
+ }
+} {1 1 5 2 1 5 3 1 5}
+
+# Run various aggregate functions when the count is zero.
+#
+do_test select5-4.1 {
+ execsql {
+ SELECT avg(x) FROM t1 WHERE x>100
+ }
+} {{}}
+do_test select5-4.2 {
+ execsql {
+ SELECT count(x) FROM t1 WHERE x>100
+ }
+} {0}
+do_test select5-4.3 {
+ execsql {
+ SELECT min(x) FROM t1 WHERE x>100
+ }
+} {{}}
+do_test select5-4.4 {
+ execsql {
+ SELECT max(x) FROM t1 WHERE x>100
+ }
+} {{}}
+do_test select5-4.5 {
+ execsql {
+ SELECT sum(x) FROM t1 WHERE x>100
+ }
+} {0}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/select6.test b/usr/src/cmd/svc/configd/sqlite/test/select6.test
new file mode 100644
index 0000000000..13ff398e98
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/select6.test
@@ -0,0 +1,438 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing SELECT statements that contain
+# subqueries in their FROM clause.
+#
+# $Id: select6.test,v 1.11 2004/01/24 20:18:13 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test select6-1.0 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(x, y);
+ INSERT INTO t1 VALUES(1,1);
+ INSERT INTO t1 VALUES(2,2);
+ INSERT INTO t1 VALUES(3,2);
+ INSERT INTO t1 VALUES(4,3);
+ INSERT INTO t1 VALUES(5,3);
+ INSERT INTO t1 VALUES(6,3);
+ INSERT INTO t1 VALUES(7,3);
+ INSERT INTO t1 VALUES(8,4);
+ INSERT INTO t1 VALUES(9,4);
+ INSERT INTO t1 VALUES(10,4);
+ INSERT INTO t1 VALUES(11,4);
+ INSERT INTO t1 VALUES(12,4);
+ INSERT INTO t1 VALUES(13,4);
+ INSERT INTO t1 VALUES(14,4);
+ INSERT INTO t1 VALUES(15,4);
+ INSERT INTO t1 VALUES(16,5);
+ INSERT INTO t1 VALUES(17,5);
+ INSERT INTO t1 VALUES(18,5);
+ INSERT INTO t1 VALUES(19,5);
+ INSERT INTO t1 VALUES(20,5);
+ COMMIT;
+ SELECT DISTINCT y FROM t1 ORDER BY y;
+ }
+} {1 2 3 4 5}
+
+do_test select6-1.1 {
+ execsql2 {SELECT * FROM (SELECT x, y FROM t1 WHERE x<2)}
+} {x 1 y 1}
+do_test select6-1.2 {
+ execsql {SELECT count(*) FROM (SELECT y FROM t1)}
+} {20}
+do_test select6-1.3 {
+ execsql {SELECT count(*) FROM (SELECT DISTINCT y FROM t1)}
+} {5}
+do_test select6-1.4 {
+ execsql {SELECT count(*) FROM (SELECT DISTINCT * FROM (SELECT y FROM t1))}
+} {5}
+do_test select6-1.5 {
+ execsql {SELECT count(*) FROM (SELECT * FROM (SELECT DISTINCT y FROM t1))}
+} {5}
+
+do_test select6-1.6 {
+ execsql {
+ SELECT *
+ FROM (SELECT count(*),y FROM t1 GROUP BY y) AS a,
+ (SELECT max(x),y FROM t1 GROUP BY y) as b
+ WHERE a.y=b.y ORDER BY a.y
+ }
+} {1 1 1 1 2 2 3 2 4 3 7 3 8 4 15 4 5 5 20 5}
+do_test select6-1.7 {
+ execsql {
+ SELECT a.y, a.[count(*)], [max(x)], [count(*)]
+ FROM (SELECT count(*),y FROM t1 GROUP BY y) AS a,
+ (SELECT max(x),y FROM t1 GROUP BY y) as b
+ WHERE a.y=b.y ORDER BY a.y
+ }
+} {1 1 1 1 2 2 3 2 3 4 7 4 4 8 15 8 5 5 20 5}
+do_test select6-1.8 {
+ execsql {
+ SELECT q, p, r
+ FROM (SELECT count(*) as p , y as q FROM t1 GROUP BY y) AS a,
+ (SELECT max(x) as r, y as s FROM t1 GROUP BY y) as b
+ WHERE q=s ORDER BY s
+ }
+} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20}
+do_test select6-1.9 {
+ execsql {
+ SELECT q, p, r, b.[min(x)+y]
+ FROM (SELECT count(*) as p , y as q FROM t1 GROUP BY y) AS a,
+ (SELECT max(x) as r, y as s, min(x)+y FROM t1 GROUP BY y) as b
+ WHERE q=s ORDER BY s
+ }
+} {1 1 1 2 2 2 3 4 3 4 7 7 4 8 15 12 5 5 20 21}
+
+do_test select6-2.0 {
+ execsql {
+ CREATE TABLE t2(a INTEGER PRIMARY KEY, b);
+ INSERT INTO t2 SELECT * FROM t1;
+ SELECT DISTINCT b FROM t2 ORDER BY b;
+ }
+} {1 2 3 4 5}
+do_test select6-2.1 {
+ execsql2 {SELECT * FROM (SELECT a, b FROM t2 WHERE a<2)}
+} {a 1 b 1}
+do_test select6-2.2 {
+ execsql {SELECT count(*) FROM (SELECT b FROM t2)}
+} {20}
+do_test select6-2.3 {
+ execsql {SELECT count(*) FROM (SELECT DISTINCT b FROM t2)}
+} {5}
+do_test select6-2.4 {
+ execsql {SELECT count(*) FROM (SELECT DISTINCT * FROM (SELECT b FROM t2))}
+} {5}
+do_test select6-2.5 {
+ execsql {SELECT count(*) FROM (SELECT * FROM (SELECT DISTINCT b FROM t2))}
+} {5}
+
+do_test select6-2.6 {
+ execsql {
+ SELECT *
+ FROM (SELECT count(*),b FROM t2 GROUP BY b) AS a,
+ (SELECT max(a),b FROM t2 GROUP BY b) as b
+ WHERE a.b=b.b ORDER BY a.b
+ }
+} {1 1 1 1 2 2 3 2 4 3 7 3 8 4 15 4 5 5 20 5}
+do_test select6-2.7 {
+ execsql {
+ SELECT a.b, a.[count(*)], [max(a)], [count(*)]
+ FROM (SELECT count(*),b FROM t2 GROUP BY b) AS a,
+ (SELECT max(a),b FROM t2 GROUP BY b) as b
+ WHERE a.b=b.b ORDER BY a.b
+ }
+} {1 1 1 1 2 2 3 2 3 4 7 4 4 8 15 8 5 5 20 5}
+do_test select6-2.8 {
+ execsql {
+ SELECT q, p, r
+ FROM (SELECT count(*) as p , b as q FROM t2 GROUP BY b) AS a,
+ (SELECT max(a) as r, b as s FROM t2 GROUP BY b) as b
+ WHERE q=s ORDER BY s
+ }
+} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20}
+do_test select6-2.9 {
+ execsql {
+ SELECT a.q, a.p, b.r
+ FROM (SELECT count(*) as p , b as q FROM t2 GROUP BY q) AS a,
+ (SELECT max(a) as r, b as s FROM t2 GROUP BY s) as b
+ WHERE a.q=b.s ORDER BY a.q
+ }
+} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20}
+
+do_test sqlite6-3.1 {
+ execsql2 {
+ SELECT * FROM (SELECT * FROM (SELECT * FROM t1 WHERE x=3));
+ }
+} {x 3 y 2}
+do_test sqlite6-3.2 {
+ execsql {
+ SELECT * FROM
+ (SELECT a.q, a.p, b.r
+ FROM (SELECT count(*) as p , b as q FROM t2 GROUP BY q) AS a,
+ (SELECT max(a) as r, b as s FROM t2 GROUP BY s) as b
+ WHERE a.q=b.s ORDER BY a.q)
+ ORDER BY q
+ }
+} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20}
+do_test select6-3.3 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1)
+ }
+} {10.5 3.7 14.2}
+do_test select6-3.4 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1 WHERE y=4)
+ }
+} {11.5 4 15.5}
+do_test select6-3.5 {
+ execsql {
+ SELECT x,y,x+y FROM (SELECT avg(a) as 'x', avg(b) as 'y' FROM t2 WHERE a=4)
+ }
+} {4 3 7}
+do_test select6-3.6 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1)
+ WHERE a>10
+ }
+} {10.5 3.7 14.2}
+do_test select6-3.7 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1)
+ WHERE a<10
+ }
+} {}
+do_test select6-3.8 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1 WHERE y=4)
+ WHERE a>10
+ }
+} {11.5 4 15.5}
+do_test select6-3.9 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1 WHERE y=4)
+ WHERE a<10
+ }
+} {}
+do_test select6-3.10 {
+ execsql {
+ SELECT a,b,a+b FROM (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b)
+ ORDER BY a
+ }
+} {1 1 2 2.5 2 4.5 5.5 3 8.5 11.5 4 15.5 18 5 23}
+do_test select6-3.11 {
+ execsql {
+ SELECT a,b,a+b FROM
+ (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b)
+ WHERE b<4 ORDER BY a
+ }
+} {1 1 2 2.5 2 4.5 5.5 3 8.5}
+do_test select6-3.12 {
+ execsql {
+ SELECT a,b,a+b FROM
+ (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b HAVING a>1)
+ WHERE b<4 ORDER BY a
+ }
+} {2.5 2 4.5 5.5 3 8.5}
+do_test select6-3.13 {
+ execsql {
+ SELECT a,b,a+b FROM
+ (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b HAVING a>1)
+ ORDER BY a
+ }
+} {2.5 2 4.5 5.5 3 8.5 11.5 4 15.5 18 5 23}
+do_test select6-3.14 {
+ execsql {
+ SELECT [count(*)],y FROM (SELECT count(*), y FROM t1 GROUP BY y)
+ ORDER BY [count(*)]
+ }
+} {1 1 2 2 4 3 5 5 8 4}
+do_test select6-3.15 {
+ execsql {
+ SELECT [count(*)],y FROM (SELECT count(*), y FROM t1 GROUP BY y)
+ ORDER BY y
+ }
+} {1 1 2 2 4 3 8 4 5 5}
+
+do_test select6-4.1 {
+ execsql {
+ SELECT a,b,c FROM
+ (SELECT x AS 'a', y AS 'b', x+y AS 'c' FROM t1 WHERE y=4)
+ WHERE a<10 ORDER BY a;
+ }
+} {8 4 12 9 4 13}
+do_test select6-4.2 {
+ execsql {
+ SELECT y FROM (SELECT DISTINCT y FROM t1) WHERE y<5 ORDER BY y
+ }
+} {1 2 3 4}
+do_test select6-4.3 {
+ execsql {
+ SELECT DISTINCT y FROM (SELECT y FROM t1) WHERE y<5 ORDER BY y
+ }
+} {1 2 3 4}
+do_test select6-4.4 {
+ execsql {
+ SELECT avg(y) FROM (SELECT DISTINCT y FROM t1) WHERE y<5 ORDER BY y
+ }
+} {2.5}
+do_test select6-4.5 {
+ execsql {
+ SELECT avg(y) FROM (SELECT DISTINCT y FROM t1 WHERE y<5) ORDER BY y
+ }
+} {2.5}
+
+do_test select6-5.1 {
+ execsql {
+ SELECT a,x,b FROM
+ (SELECT x+3 AS 'a', x FROM t1 WHERE y=3) AS 'p',
+ (SELECT x AS 'b' FROM t1 WHERE y=4) AS 'q'
+ WHERE a=b
+ ORDER BY a
+ }
+} {8 5 8 9 6 9 10 7 10}
+do_test select6-5.2 {
+ execsql {
+ SELECT a,x,b FROM
+ (SELECT x+3 AS 'a', x FROM t1 WHERE y=3),
+ (SELECT x AS 'b' FROM t1 WHERE y=4)
+ WHERE a=b
+ ORDER BY a
+ }
+} {8 5 8 9 6 9 10 7 10}
+
+# Tests of compound sub-selects
+#
+do_test select5-6.1 {
+ execsql {
+ DELETE FROM t1 WHERE x>4;
+ SELECT * FROM t1
+ }
+} {1 1 2 2 3 2 4 3}
+do_test select6-6.2 {
+ execsql {
+ SELECT * FROM (
+ SELECT x AS 'a' FROM t1 UNION ALL SELECT x+10 AS 'a' FROM t1
+ ) ORDER BY a;
+ }
+} {1 2 3 4 11 12 13 14}
+do_test select6-6.3 {
+ execsql {
+ SELECT * FROM (
+ SELECT x AS 'a' FROM t1 UNION ALL SELECT x+1 AS 'a' FROM t1
+ ) ORDER BY a;
+ }
+} {1 2 2 3 3 4 4 5}
+do_test select6-6.4 {
+ execsql {
+ SELECT * FROM (
+ SELECT x AS 'a' FROM t1 UNION SELECT x+1 AS 'a' FROM t1
+ ) ORDER BY a;
+ }
+} {1 2 3 4 5}
+do_test select6-6.5 {
+ execsql {
+ SELECT * FROM (
+ SELECT x AS 'a' FROM t1 INTERSECT SELECT x+1 AS 'a' FROM t1
+ ) ORDER BY a;
+ }
+} {2 3 4}
+do_test select6-6.6 {
+ execsql {
+ SELECT * FROM (
+ SELECT x AS 'a' FROM t1 EXCEPT SELECT x*2 AS 'a' FROM t1
+ ) ORDER BY a;
+ }
+} {1 3}
+
+# Subselects with no FROM clause
+#
+do_test select6-7.1 {
+ execsql {
+ SELECT * FROM (SELECT 1)
+ }
+} {1}
+do_test select6-7.2 {
+ execsql {
+ SELECT c,b,a,* FROM (SELECT 1 AS 'a', 2 AS 'b', 'abc' AS 'c')
+ }
+} {abc 2 1 1 2 abc}
+do_test select6-7.3 {
+ execsql {
+ SELECT c,b,a,* FROM (SELECT 1 AS 'a', 2 AS 'b', 'abc' AS 'c' WHERE 0)
+ }
+} {}
+do_test select6-7.4 {
+ execsql2 {
+ SELECT c,b,a,* FROM (SELECT 1 AS 'a', 2 AS 'b', 'abc' AS 'c' WHERE 1)
+ }
+} {c abc b 2 a 1 a 1 b 2 c abc}
+
+# The following procedure compiles the SQL given as an argument and returns
+# TRUE if that SQL uses any transient tables and returns FALSE if no
+# transient tables are used. This is used to make sure that the
+# sqliteFlattenSubquery() routine in select.c is doing its job.
+#
+proc is_flat {sql} {
+ return [expr 0>[lsearch [execsql "EXPLAIN $sql"] OpenTemp]]
+}
+
+# Check that the flattener works correctly for deeply nested subqueries
+# involving joins.
+#
+do_test select6-8.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t3(p,q);
+ INSERT INTO t3 VALUES(1,11);
+ INSERT INTO t3 VALUES(2,22);
+ CREATE TABLE t4(q,r);
+ INSERT INTO t4 VALUES(11,111);
+ INSERT INTO t4 VALUES(22,222);
+ COMMIT;
+ SELECT * FROM t3 NATURAL JOIN t4;
+ }
+} {1 11 111 2 22 222}
+do_test select6-8.2 {
+ execsql {
+ SELECT y, p, q, r FROM
+ (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m,
+ (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n
+ WHERE y=p
+ }
+} {1 1 11 111 2 2 22 222 2 2 22 222}
+do_test select6-8.3 {
+ is_flat {
+ SELECT y, p, q, r FROM
+ (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m,
+ (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n
+ WHERE y=p
+ }
+} {1}
+do_test select6-8.4 {
+ execsql {
+ SELECT DISTINCT y, p, q, r FROM
+ (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m,
+ (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n
+ WHERE y=p
+ }
+} {1 1 11 111 2 2 22 222}
+do_test select6-8.5 {
+ execsql {
+ SELECT * FROM
+ (SELECT y, p, q, r FROM
+ (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m,
+ (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n
+ WHERE y=p) AS e,
+ (SELECT r AS z FROM t4 WHERE q=11) AS f
+ WHERE e.r=f.z
+ }
+} {1 1 11 111 111}
+do_test select6-8.6 {
+ is_flat {
+ SELECT * FROM
+ (SELECT y, p, q, r FROM
+ (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m,
+ (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n
+ WHERE y=p) AS e,
+ (SELECT r AS z FROM t4 WHERE q=11) AS f
+ WHERE e.r=f.z
+ }
+} {1}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/sort.test b/usr/src/cmd/svc/configd/sqlite/test/sort.test
new file mode 100644
index 0000000000..337b15d609
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/sort.test
@@ -0,0 +1,364 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the CREATE TABLE statement.
+#
+# $Id: sort.test,v 1.9 2003/04/18 17:45:15 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a bunch of data to sort against
+#
+do_test sort-1.0 {
+ set fd [open data.txt w]
+ puts $fd "1\tone\t0\tI\t3.141592653"
+ puts $fd "2\ttwo\t1\tII\t2.15"
+ puts $fd "3\tthree\t1\tIII\t4221.0"
+ puts $fd "4\tfour\t2\tIV\t-0.0013442"
+ puts $fd "5\tfive\t2\tV\t-11"
+ puts $fd "6\tsix\t2\tVI\t0.123"
+ puts $fd "7\tseven\t2\tVII\t123.0"
+ puts $fd "8\teight\t3\tVIII\t-1.6"
+ close $fd
+ execsql {
+ CREATE TABLE t1(
+ n int,
+ v varchar(10),
+ log int,
+ roman varchar(10),
+ flt real
+ );
+ COPY t1 FROM 'data.txt'
+ }
+ file delete data.txt
+ execsql {SELECT count(*) FROM t1}
+} {8}
+
+do_test sort-1.1 {
+ execsql {SELECT n FROM t1 ORDER BY n}
+} {1 2 3 4 5 6 7 8}
+do_test sort-1.1.1 {
+ execsql {SELECT n FROM t1 ORDER BY n ASC}
+} {1 2 3 4 5 6 7 8}
+do_test sort-1.1.1 {
+ execsql {SELECT ALL n FROM t1 ORDER BY n ASC}
+} {1 2 3 4 5 6 7 8}
+do_test sort-1.2 {
+ execsql {SELECT n FROM t1 ORDER BY n DESC}
+} {8 7 6 5 4 3 2 1}
+do_test sort-1.3a {
+ execsql {SELECT v FROM t1 ORDER BY v}
+} {eight five four one seven six three two}
+do_test sort-1.3b {
+ execsql {SELECT n FROM t1 ORDER BY v}
+} {8 5 4 1 7 6 3 2}
+do_test sort-1.4 {
+ execsql {SELECT n FROM t1 ORDER BY v DESC}
+} {2 3 6 7 1 4 5 8}
+do_test sort-1.5 {
+ execsql {SELECT flt FROM t1 ORDER BY flt}
+} {-11 -1.6 -0.0013442 0.123 2.15 3.141592653 123.0 4221.0}
+do_test sort-1.6 {
+ execsql {SELECT flt FROM t1 ORDER BY flt DESC}
+} {4221.0 123.0 3.141592653 2.15 0.123 -0.0013442 -1.6 -11}
+do_test sort-1.7 {
+ execsql {SELECT roman FROM t1 ORDER BY roman}
+} {I II III IV V VI VII VIII}
+do_test sort-1.8 {
+ execsql {SELECT n FROM t1 ORDER BY log, flt}
+} {1 2 3 5 4 6 7 8}
+do_test sort-1.8.1 {
+ execsql {SELECT n FROM t1 ORDER BY log asc, flt}
+} {1 2 3 5 4 6 7 8}
+do_test sort-1.8.2 {
+ execsql {SELECT n FROM t1 ORDER BY log, flt ASC}
+} {1 2 3 5 4 6 7 8}
+do_test sort-1.8.3 {
+ execsql {SELECT n FROM t1 ORDER BY log ASC, flt asc}
+} {1 2 3 5 4 6 7 8}
+do_test sort-1.9 {
+ execsql {SELECT n FROM t1 ORDER BY log, flt DESC}
+} {1 3 2 7 6 4 5 8}
+do_test sort-1.9.1 {
+ execsql {SELECT n FROM t1 ORDER BY log ASC, flt DESC}
+} {1 3 2 7 6 4 5 8}
+do_test sort-1.10 {
+ execsql {SELECT n FROM t1 ORDER BY log DESC, flt}
+} {8 5 4 6 7 2 3 1}
+do_test sort-1.11 {
+ execsql {SELECT n FROM t1 ORDER BY log DESC, flt DESC}
+} {8 7 6 4 5 3 2 1}
+
+# These tests are designed to reach some hard-to-reach places
+# inside the string comparison routines.
+#
+# (Later) The sorting behavior changed in 2.7.0. But we will
+# keep these tests. You can never have too many test cases!
+#
+do_test sort-2.1.1 {
+ execsql {
+ UPDATE t1 SET v='x' || -flt;
+ UPDATE t1 SET v='x-2b' where v=='x-0.123';
+ SELECT v FROM t1 ORDER BY v;
+ }
+} {x-123 x-2.15 x-2b x-3.141592653 x-4221 x0.0013442 x1.6 x11}
+do_test sort-2.1.2 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999);
+ }
+} {x-123 x-2.15 x-2b x-3.141592653 x-4221 x0.0013442 x1.6 x11}
+do_test sort-2.1.3 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999)+0.0;
+ }
+} {x-4221 x-123 x-3.141592653 x-2.15 x-2b x0.0013442 x1.6 x11}
+do_test sort-2.1.4 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999) DESC;
+ }
+} {x11 x1.6 x0.0013442 x-4221 x-3.141592653 x-2b x-2.15 x-123}
+do_test sort-2.1.5 {
+ execsql {
+ SELECT v FROM t1 ORDER BY substr(v,2,999)+0.0 DESC;
+ }
+} {x11 x1.6 x0.0013442 x-2b x-2.15 x-3.141592653 x-123 x-4221}
+
+# This is a bug fix for 2.2.4.
+# Strings are normally mapped to upper-case for a caseless comparison.
+# But this can cause problems for characters in between 'Z' and 'a'.
+#
+do_test sort-3.1 {
+ execsql {
+ CREATE TABLE t2(a,b);
+ INSERT INTO t2 VALUES('AGLIENTU',1);
+ INSERT INTO t2 VALUES('AGLIE`',2);
+ INSERT INTO t2 VALUES('AGNA',3);
+ SELECT a, b FROM t2 ORDER BY a;
+ }
+} {AGLIENTU 1 AGLIE` 2 AGNA 3}
+do_test sort-3.2 {
+ execsql {
+ SELECT a, b FROM t2 ORDER BY a DESC;
+ }
+} {AGNA 3 AGLIE` 2 AGLIENTU 1}
+do_test sort-3.3 {
+ execsql {
+ DELETE FROM t2;
+ INSERT INTO t2 VALUES('aglientu',1);
+ INSERT INTO t2 VALUES('aglie`',2);
+ INSERT INTO t2 VALUES('agna',3);
+ SELECT a, b FROM t2 ORDER BY a;
+ }
+} {aglie` 2 aglientu 1 agna 3}
+do_test sort-3.4 {
+ execsql {
+ SELECT a, b FROM t2 ORDER BY a DESC;
+ }
+} {agna 3 aglientu 1 aglie` 2}
+
+# Version 2.7.0 testing.
+#
+do_test sort-4.1 {
+ execsql {
+ INSERT INTO t1 VALUES(9,'x2.7',3,'IX',4.0e5);
+ INSERT INTO t1 VALUES(10,'x5.0e10',3,'X',-4.0e5);
+ INSERT INTO t1 VALUES(11,'x-4.0e9',3,'XI',4.1e4);
+ INSERT INTO t1 VALUES(12,'x01234567890123456789',3,'XII',-4.2e3);
+ SELECT n FROM t1 ORDER BY n;
+ }
+} {1 2 3 4 5 6 7 8 9 10 11 12}
+do_test sort-4.2 {
+ execsql {
+ SELECT n||'' FROM t1 ORDER BY 1;
+ }
+} {1 10 11 12 2 3 4 5 6 7 8 9}
+do_test sort-4.3 {
+ execsql {
+ SELECT n+0 FROM t1 ORDER BY 1;
+ }
+} {1 2 3 4 5 6 7 8 9 10 11 12}
+do_test sort-4.4 {
+ execsql {
+ SELECT n||'' FROM t1 ORDER BY 1 DESC;
+ }
+} {9 8 7 6 5 4 3 2 12 11 10 1}
+do_test sort-4.5 {
+ execsql {
+ SELECT n+0 FROM t1 ORDER BY 1 DESC;
+ }
+} {12 11 10 9 8 7 6 5 4 3 2 1}
+do_test sort-4.6 {
+ execsql {
+ SELECT v FROM t1 ORDER BY 1;
+ }
+} {x-123 x-2.15 x-2b x-3.141592653 x-4.0e9 x-4221 x0.0013442 x01234567890123456789 x1.6 x11 x2.7 x5.0e10}
+do_test sort-4.7 {
+ execsql {
+ SELECT v FROM t1 ORDER BY 1 DESC;
+ }
+} {x5.0e10 x2.7 x11 x1.6 x01234567890123456789 x0.0013442 x-4221 x-4.0e9 x-3.141592653 x-2b x-2.15 x-123}
+do_test sort-4.8 {
+ execsql {
+ SELECT substr(v,2,99) FROM t1 ORDER BY 1;
+ }
+} {-123 -2.15 -2b -3.141592653 -4.0e9 -4221 0.0013442 01234567890123456789 1.6 11 2.7 5.0e10}
+#do_test sort-4.9 {
+# execsql {
+# SELECT substr(v,2,99)+0.0 FROM t1 ORDER BY 1;
+# }
+#} {-4000000000 -4221 -123 -3.141592653 -2.15 -2 0.0013442 1.6 2.7 11 50000000000 1.23456789012346e+18}
+
+do_test sort-5.1 {
+ execsql {
+ create table t3(a,b);
+ insert into t3 values(5,NULL);
+ insert into t3 values(6,NULL);
+ insert into t3 values(3,NULL);
+ insert into t3 values(4,'cd');
+ insert into t3 values(1,'ab');
+ insert into t3 values(2,NULL);
+ select a from t3 order by b, a;
+ }
+} {2 3 5 6 1 4}
+do_test sort-5.2 {
+ execsql {
+ select a from t3 order by b, a desc;
+ }
+} {6 5 3 2 1 4}
+do_test sort-5.3 {
+ execsql {
+ select a from t3 order by b desc, a;
+ }
+} {4 1 2 3 5 6}
+do_test sort-5.4 {
+ execsql {
+ select a from t3 order by b desc, a desc;
+ }
+} {4 1 6 5 3 2}
+
+do_test sort-6.1 {
+ execsql {
+ create index i3 on t3(b,a);
+ select a from t3 order by b, a;
+ }
+} {2 3 5 6 1 4}
+do_test sort-6.2 {
+ execsql {
+ select a from t3 order by b, a desc;
+ }
+} {6 5 3 2 1 4}
+do_test sort-6.3 {
+ execsql {
+ select a from t3 order by b desc, a;
+ }
+} {4 1 2 3 5 6}
+do_test sort-6.4 {
+ execsql {
+ select a from t3 order by b desc, a desc;
+ }
+} {4 1 6 5 3 2}
+
+do_test sort-7.1 {
+ execsql {
+ CREATE TABLE t4(
+ a INTEGER,
+ b VARCHAR(30)
+ );
+ INSERT INTO t4 VALUES(1,1);
+ INSERT INTO t4 VALUES(2,2);
+ INSERT INTO t4 VALUES(11,11);
+ INSERT INTO t4 VALUES(12,12);
+ SELECT a FROM t4 ORDER BY 1;
+ }
+} {1 2 11 12}
+do_test sort-7.2 {
+ execsql {
+ SELECT b FROM t4 ORDER BY 1
+ }
+} {1 11 12 2}
+do_test sort-7.3 {
+ execsql {
+ CREATE VIEW v4 AS SELECT * FROM t4;
+ SELECT a FROM v4 ORDER BY 1;
+ }
+} {1 2 11 12}
+do_test sort-7.4 {
+ execsql {
+ SELECT b FROM v4 ORDER BY 1;
+ }
+} {1 11 12 2}
+do_test sort-7.5 {
+ execsql {
+ SELECT a FROM t4 UNION SELECT a FROM v4 ORDER BY 1;
+ }
+} {1 2 11 12}
+do_test sort-7.6 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT a FROM v4 ORDER BY 1;
+ }
+} {1 2 11 12}
+do_test sort-7.7 {
+ execsql {
+ SELECT a FROM t4 UNION SELECT b FROM v4 ORDER BY 1;
+ }
+} {1 2 11 12}
+do_test sort-7.8 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1;
+ }
+} {1 11 12 2}
+do_test sort-7.9 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE numeric;
+ }
+} {1 2 11 12}
+do_test sort-7.10 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE integer;
+ }
+} {1 2 11 12}
+do_test sort-7.11 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE text;
+ }
+} {1 11 12 2}
+do_test sort-7.12 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE blob;
+ }
+} {1 11 12 2}
+do_test sort-7.13 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE clob;
+ }
+} {1 11 12 2}
+do_test sort-7.14 {
+ execsql {
+ SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE varchar;
+ }
+} {1 11 12 2}
+
+# Ticket #297
+#
+do_test sort-8.1 {
+ execsql {
+ CREATE TABLE t5(a real, b text);
+ INSERT INTO t5 VALUES(100,'A1');
+ INSERT INTO t5 VALUES(100.0,'A2');
+ SELECT * FROM t5 ORDER BY a, b;
+ }
+} {100 A1 100.0 A2}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/subselect.test b/usr/src/cmd/svc/configd/sqlite/test/subselect.test
new file mode 100644
index 0000000000..85b3911935
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/subselect.test
@@ -0,0 +1,158 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing SELECT statements that are part of
+# expressions.
+#
+# $Id: subselect.test,v 1.7 2002/07/15 18:55:26 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Basic sanity checking. Try a simple subselect.
+#
+do_test subselect-1.1 {
+ execsql {
+ CREATE TABLE t1(a int, b int);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t1 VALUES(3,4);
+ INSERT INTO t1 VALUES(5,6);
+ }
+ execsql {SELECT * FROM t1 WHERE a = (SELECT count(*) FROM t1)}
+} {3 4}
+
+# Try a select with more than one result column.
+#
+do_test subselect-1.2 {
+ set v [catch {execsql {SELECT * FROM t1 WHERE a = (SELECT * FROM t1)}} msg]
+ lappend v $msg
+} {1 {only a single result allowed for a SELECT that is part of an expression}}
+
+# A subselect without an aggregate.
+#
+do_test subselect-1.3a {
+ execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=2)}
+} {2}
+do_test subselect-1.3b {
+ execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=4)}
+} {4}
+do_test subselect-1.3c {
+ execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=6)}
+} {6}
+do_test subselect-1.3c {
+ execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=8)}
+} {}
+
+# What if the subselect doesn't return any value. We should get
+# NULL as the result. Check it out.
+#
+do_test subselect-1.4 {
+ execsql {SELECT b from t1 where a = coalesce((SELECT a FROM t1 WHERE b=5),1)}
+} {2}
+
+# Try multiple subselects within a single expression.
+#
+do_test subselect-1.5 {
+ execsql {
+ CREATE TABLE t2(x int, y int);
+ INSERT INTO t2 VALUES(1,2);
+ INSERT INTO t2 VALUES(2,4);
+ INSERT INTO t2 VALUES(3,8);
+ INSERT INTO t2 VALUES(4,16);
+ }
+ execsql {
+ SELECT y from t2
+ WHERE x = (SELECT sum(b) FROM t1 where a notnull) - (SELECT sum(a) FROM t1)
+ }
+} {8}
+
+# Try something useful. Delete every entry from t2 where the
+# x value is less than half of the maximum.
+#
+do_test subselect-1.6 {
+ execsql {DELETE FROM t2 WHERE x < 0.5*(SELECT max(x) FROM t2)}
+ execsql {SELECT x FROM t2 ORDER BY x}
+} {2 3 4}
+
+# Make sure sorting works for SELECTs there used as a scalar expression.
+#
+do_test subselect-2.1 {
+ execsql {
+ SELECT (SELECT a FROM t1 ORDER BY a), (SELECT a FROM t1 ORDER BY a DESC)
+ }
+} {1 5}
+do_test subselect-2.2 {
+ execsql {
+ SELECT 1 IN (SELECT a FROM t1 ORDER BY a);
+ }
+} {1}
+do_test subselect-2.3 {
+ execsql {
+ SELECT 2 IN (SELECT a FROM t1 ORDER BY a DESC);
+ }
+} {0}
+
+# Verify that the ORDER BY clause is honored in a subquery.
+#
+do_test subselect-3.1 {
+ execsql {
+ CREATE TABLE t3(x int);
+ INSERT INTO t3 SELECT a FROM t1 UNION ALL SELECT b FROM t1;
+ SELECT * FROM t3 ORDER BY x;
+ }
+} {1 2 3 4 5 6}
+do_test subselect-3.2 {
+ execsql {
+ SELECT sum(x) FROM (SELECT x FROM t3 ORDER BY x LIMIT 2);
+ }
+} {3}
+do_test subselect-3.3 {
+ execsql {
+ SELECT sum(x) FROM (SELECT x FROM t3 ORDER BY x DESC LIMIT 2);
+ }
+} {11}
+do_test subselect-3.4 {
+ execsql {
+ SELECT (SELECT x FROM t3 ORDER BY x);
+ }
+} {1}
+do_test subselect-3.5 {
+ execsql {
+ SELECT (SELECT x FROM t3 ORDER BY x DESC);
+ }
+} {6}
+do_test subselect-3.6 {
+ execsql {
+ SELECT (SELECT x FROM t3 ORDER BY x LIMIT 1);
+ }
+} {1}
+do_test subselect-3.7 {
+ execsql {
+ SELECT (SELECT x FROM t3 ORDER BY x DESC LIMIT 1);
+ }
+} {6}
+do_test subselect-3.8 {
+ execsql {
+ SELECT (SELECT x FROM t3 ORDER BY x LIMIT 1 OFFSET 2);
+ }
+} {3}
+do_test subselect-3.9 {
+ execsql {
+ SELECT (SELECT x FROM t3 ORDER BY x DESC LIMIT 1 OFFSET 2);
+ }
+} {4}
+
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/table.test b/usr/src/cmd/svc/configd/sqlite/test/table.test
new file mode 100644
index 0000000000..f04b2a5bad
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/table.test
@@ -0,0 +1,506 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the CREATE TABLE statement.
+#
+# $Id: table.test,v 1.22 2003/01/29 18:46:54 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create a basic table and verify it is added to sqlite_master
+#
+do_test table-1.1 {
+ execsql {
+ CREATE TABLE test1 (
+ one varchar(10),
+ two text
+ )
+ }
+ execsql {
+ SELECT sql FROM sqlite_master WHERE type!='meta'
+ }
+} {{CREATE TABLE test1 (
+ one varchar(10),
+ two text
+ )}}
+
+
+# Verify the other fields of the sqlite_master file.
+#
+do_test table-1.3 {
+ execsql {SELECT name, tbl_name, type FROM sqlite_master WHERE type!='meta'}
+} {test1 test1 table}
+
+# Close and reopen the database. Verify that everything is
+# still the same.
+#
+do_test table-1.4 {
+ db close
+ sqlite db test.db
+ execsql {SELECT name, tbl_name, type from sqlite_master WHERE type!='meta'}
+} {test1 test1 table}
+
+# Drop the database and make sure it disappears.
+#
+do_test table-1.5 {
+ execsql {DROP TABLE test1}
+ execsql {SELECT * FROM sqlite_master WHERE type!='meta'}
+} {}
+
+# Close and reopen the database. Verify that the table is
+# still gone.
+#
+do_test table-1.6 {
+ db close
+ sqlite db test.db
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {}
+
+# Repeat the above steps, but this time quote the table name.
+#
+do_test table-1.10 {
+ execsql {CREATE TABLE "create" (f1 int)}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {create}
+do_test table-1.11 {
+ execsql {DROP TABLE "create"}
+ execsql {SELECT name FROM "sqlite_master" WHERE type!='meta'}
+} {}
+do_test table-1.12 {
+ execsql {CREATE TABLE test1("f1 ho" int)}
+ execsql {SELECT name as "X" FROM sqlite_master WHERE type!='meta'}
+} {test1}
+do_test table-1.13 {
+ execsql {DROP TABLE "TEST1"}
+ execsql {SELECT name FROM "sqlite_master" WHERE type!='meta'}
+} {}
+
+
+
+# Verify that we cannot make two tables with the same name
+#
+do_test table-2.1 {
+ execsql {CREATE TABLE TEST2(one text)}
+ set v [catch {execsql {CREATE TABLE test2(two text)}} msg]
+ lappend v $msg
+} {1 {table test2 already exists}}
+do_test table-2.1b {
+ set v [catch {execsql {CREATE TABLE sqlite_master(two text)}} msg]
+ lappend v $msg
+} {1 {table sqlite_master already exists}}
+do_test table-2.1c {
+ db close
+ sqlite db test.db
+ set v [catch {execsql {CREATE TABLE sqlite_master(two text)}} msg]
+ lappend v $msg
+} {1 {table sqlite_master already exists}}
+do_test table-2.1d {
+ execsql {DROP TABLE test2; SELECT name FROM sqlite_master WHERE type!='meta'}
+} {}
+
+# Verify that we cannot make a table with the same name as an index
+#
+do_test table-2.2a {
+ execsql {CREATE TABLE test2(one text); CREATE INDEX test3 ON test2(one)}
+ set v [catch {execsql {CREATE TABLE test3(two text)}} msg]
+ lappend v $msg
+} {1 {there is already an index named test3}}
+do_test table-2.2b {
+ db close
+ sqlite db test.db
+ set v [catch {execsql {CREATE TABLE test3(two text)}} msg]
+ lappend v $msg
+} {1 {there is already an index named test3}}
+do_test table-2.2c {
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {test2 test3}
+do_test table-2.2d {
+ execsql {DROP INDEX test3}
+ set v [catch {execsql {CREATE TABLE test3(two text)}} msg]
+ lappend v $msg
+} {0 {}}
+do_test table-2.2e {
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {test2 test3}
+do_test table-2.2f {
+ execsql {DROP TABLE test2; DROP TABLE test3}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {}
+
+# Create a table with many field names
+#
+set big_table \
+{CREATE TABLE big(
+ f1 varchar(20),
+ f2 char(10),
+ f3 varchar(30) primary key,
+ f4 text,
+ f5 text,
+ f6 text,
+ f7 text,
+ f8 text,
+ f9 text,
+ f10 text,
+ f11 text,
+ f12 text,
+ f13 text,
+ f14 text,
+ f15 text,
+ f16 text,
+ f17 text,
+ f18 text,
+ f19 text,
+ f20 text
+)}
+do_test table-3.1 {
+ execsql $big_table
+ execsql {SELECT sql FROM sqlite_master WHERE type=='table'}
+} \{$big_table\}
+do_test table-3.2 {
+ set v [catch {execsql {CREATE TABLE BIG(xyz foo)}} msg]
+ lappend v $msg
+} {1 {table BIG already exists}}
+do_test table-3.3 {
+ set v [catch {execsql {CREATE TABLE biG(xyz foo)}} msg]
+ lappend v $msg
+} {1 {table biG already exists}}
+do_test table-3.4 {
+ set v [catch {execsql {CREATE TABLE bIg(xyz foo)}} msg]
+ lappend v $msg
+} {1 {table bIg already exists}}
+do_test table-3.5 {
+ db close
+ sqlite db test.db
+ set v [catch {execsql {CREATE TABLE Big(xyz foo)}} msg]
+ lappend v $msg
+} {1 {table Big already exists}}
+do_test table-3.6 {
+ execsql {DROP TABLE big}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {}
+
+# Try creating large numbers of tables
+#
+set r {}
+for {set i 1} {$i<=100} {incr i} {
+ lappend r [format test%03d $i]
+}
+do_test table-4.1 {
+ for {set i 1} {$i<=100} {incr i} {
+ set sql "CREATE TABLE [format test%03d $i] ("
+ for {set k 1} {$k<$i} {incr k} {
+ append sql "field$k text,"
+ }
+ append sql "last_field text)"
+ execsql $sql
+ }
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} $r
+do_test table-4.1b {
+ db close
+ sqlite db test.db
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} $r
+
+# Drop the even numbered tables
+#
+set r {}
+for {set i 1} {$i<=100} {incr i 2} {
+ lappend r [format test%03d $i]
+}
+do_test table-4.2 {
+ for {set i 2} {$i<=100} {incr i 2} {
+ # if {$i==38} {execsql {pragma vdbe_trace=on}}
+ set sql "DROP TABLE [format TEST%03d $i]"
+ execsql $sql
+ }
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} $r
+#exit
+
+# Drop the odd number tables
+#
+do_test table-4.3 {
+ for {set i 1} {$i<=100} {incr i 2} {
+ set sql "DROP TABLE [format test%03d $i]"
+ execsql $sql
+ }
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name}
+} {}
+
+# Try to drop a table that does not exist
+#
+do_test table-5.1 {
+ set v [catch {execsql {DROP TABLE test009}} msg]
+ lappend v $msg
+} {1 {no such table: test009}}
+
+# Try to drop sqlite_master
+#
+do_test table-5.2 {
+ set v [catch {execsql {DROP TABLE sqlite_master}} msg]
+ lappend v $msg
+} {1 {table sqlite_master may not be dropped}}
+
+# Make sure an EXPLAIN does not really create a new table
+#
+do_test table-5.3 {
+ execsql {EXPLAIN CREATE TABLE test1(f1 int)}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {}
+
+# Make sure an EXPLAIN does not really drop an existing table
+#
+do_test table-5.4 {
+ execsql {CREATE TABLE test1(f1 int)}
+ execsql {EXPLAIN DROP TABLE test1}
+ execsql {SELECT name FROM sqlite_master WHERE type!='meta'}
+} {test1}
+
+# Create a table with a goofy name
+#
+#do_test table-6.1 {
+# execsql {CREATE TABLE 'Spaces In This Name!'(x int)}
+# execsql {INSERT INTO 'spaces in this name!' VALUES(1)}
+# set list [glob -nocomplain testdb/spaces*.tbl]
+#} {testdb/spaces+in+this+name+.tbl}
+
+# Try using keywords as table names or column names.
+#
+do_test table-7.1 {
+ set v [catch {execsql {
+ CREATE TABLE weird(
+ desc text,
+ asc text,
+ explain int,
+ [14_vac] boolean,
+ fuzzy_dog_12 varchar(10),
+ begin blob,
+ end clob
+ )
+ }} msg]
+ lappend v $msg
+} {0 {}}
+do_test table-7.2 {
+ execsql {
+ INSERT INTO weird VALUES('a','b',9,0,'xyz','hi','y''all');
+ SELECT * FROM weird;
+ }
+} {a b 9 0 xyz hi y'all}
+do_test table-7.3 {
+ execsql2 {
+ SELECT * FROM weird;
+ }
+} {desc a asc b explain 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all}
+
+# Try out the CREATE TABLE AS syntax
+#
+do_test table-8.1 {
+ execsql2 {
+ CREATE TABLE t2 AS SELECT * FROM weird;
+ SELECT * FROM t2;
+ }
+} {desc a asc b explain 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all}
+do_test table-8.1.1 {
+ execsql {
+ SELECT sql FROM sqlite_master WHERE name='t2';
+ }
+} {{CREATE TABLE t2(
+ 'desc',
+ 'asc',
+ 'explain',
+ '14_vac',
+ fuzzy_dog_12,
+ 'begin',
+ 'end'
+)}}
+do_test table-8.2 {
+ execsql {
+ CREATE TABLE 't3''xyz'(a,b,c);
+ INSERT INTO [t3'xyz] VALUES(1,2,3);
+ SELECT * FROM [t3'xyz];
+ }
+} {1 2 3}
+do_test table-8.3 {
+ execsql2 {
+ CREATE TABLE [t4'abc] AS SELECT count(*) as cnt, max(b+c) FROM [t3'xyz];
+ SELECT * FROM [t4'abc];
+ }
+} {cnt 1 max(b+c) 5}
+do_test table-8.3.1 {
+ execsql {
+ SELECT sql FROM sqlite_master WHERE name='t4''abc'
+ }
+} {{CREATE TABLE 't4''abc'(cnt,'max(b+c)')}}
+do_test table-8.4 {
+ execsql2 {
+ CREATE TEMPORARY TABLE t5 AS SELECT count(*) AS [y'all] FROM [t3'xyz];
+ SELECT * FROM t5;
+ }
+} {y'all 1}
+do_test table-8.5 {
+ db close
+ sqlite db test.db
+ execsql2 {
+ SELECT * FROM [t4'abc];
+ }
+} {cnt 1 max(b+c) 5}
+do_test table-8.6 {
+ execsql2 {
+ SELECT * FROM t2;
+ }
+} {desc a asc b explain 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all}
+do_test table-8.7 {
+ catchsql {
+ SELECT * FROM t5;
+ }
+} {1 {no such table: t5}}
+do_test table-8.8 {
+ catchsql {
+ CREATE TABLE t5 AS SELECT * FROM no_such_table;
+ }
+} {1 {no such table: no_such_table}}
+
+# Make sure we cannot have duplicate column names within a table.
+#
+do_test table-9.1 {
+ catchsql {
+ CREATE TABLE t6(a,b,a);
+ }
+} {1 {duplicate column name: a}}
+
+# Check the foreign key syntax.
+#
+do_test table-10.1 {
+ catchsql {
+ CREATE TABLE t6(a REFERENCES t4(a) NOT NULL);
+ INSERT INTO t6 VALUES(NULL);
+ }
+} {1 {t6.a may not be NULL}}
+do_test table-10.2 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a REFERENCES t4(a) MATCH PARTIAL);
+ }
+} {0 {}}
+do_test table-10.3 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a REFERENCES t4 MATCH FULL ON DELETE SET NULL NOT NULL);
+ }
+} {0 {}}
+do_test table-10.4 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a REFERENCES t4 MATCH FULL ON UPDATE SET DEFAULT DEFAULT 1);
+ }
+} {0 {}}
+do_test table-10.5 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a NOT NULL NOT DEFERRABLE INITIALLY IMMEDIATE);
+ }
+} {0 {}}
+do_test table-10.6 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a NOT NULL DEFERRABLE INITIALLY DEFERRED);
+ }
+} {0 {}}
+do_test table-10.7 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a,
+ FOREIGN KEY (a) REFERENCES t4(b) DEFERRABLE INITIALLY DEFERRED
+ );
+ }
+} {0 {}}
+do_test table-10.8 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a,b,c,
+ FOREIGN KEY (b,c) REFERENCES t4(x,y) MATCH PARTIAL
+ ON UPDATE SET NULL ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
+ );
+ }
+} {0 {}}
+do_test table-10.9 {
+ catchsql {
+ DROP TABLE t6;
+ CREATE TABLE t6(a,b,c,
+ FOREIGN KEY (b,c) REFERENCES t4(x)
+ );
+ }
+} {1 {number of columns in foreign key does not match the number of columns in the referenced table}}
+do_test table-10.10 {
+ catchsql {DROP TABLE t6}
+ catchsql {
+ CREATE TABLE t6(a,b,c,
+ FOREIGN KEY (b,c) REFERENCES t4(x,y,z)
+ );
+ }
+} {1 {number of columns in foreign key does not match the number of columns in the referenced table}}
+do_test table-10.11 {
+ catchsql {DROP TABLE t6}
+ catchsql {
+ CREATE TABLE t6(a,b, c REFERENCES t4(x,y));
+ }
+} {1 {foreign key on c should reference only one column of table t4}}
+do_test table-10.12 {
+ catchsql {DROP TABLE t6}
+ catchsql {
+ CREATE TABLE t6(a,b,c,
+ FOREIGN KEY (b,x) REFERENCES t4(x,y)
+ );
+ }
+} {1 {unknown column "x" in foreign key definition}}
+do_test table-10.13 {
+ catchsql {DROP TABLE t6}
+ catchsql {
+ CREATE TABLE t6(a,b,c,
+ FOREIGN KEY (x,b) REFERENCES t4(x,y)
+ );
+ }
+} {1 {unknown column "x" in foreign key definition}}
+
+
+# Test for the "typeof" function.
+#
+do_test table-11.1 {
+ execsql {
+ CREATE TABLE t7(
+ a integer primary key,
+ b number(5,10),
+ c character varying (8),
+ d VARCHAR(9),
+ e clob,
+ f BLOB,
+ g Text,
+ h
+ );
+ INSERT INTO t7(a) VALUES(1);
+ SELECT typeof(a), typeof(b), typeof(c), typeof(d),
+ typeof(e), typeof(f), typeof(g), typeof(h)
+ FROM t7 LIMIT 1;
+ }
+} {numeric numeric text text text text text numeric}
+do_test table-11.2 {
+ execsql {
+ SELECT typeof(a+b), typeof(a||b), typeof(c+d), typeof(c||d)
+ FROM t7 LIMIT 1;
+ }
+} {numeric text numeric text}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/tableapi.test b/usr/src/cmd/svc/configd/sqlite/test/tableapi.test
new file mode 100644
index 0000000000..a41fd55695
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/tableapi.test
@@ -0,0 +1,204 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the sqlite_exec_printf() and
+# sqlite_get_table_printf() APIs.
+#
+# $Id: tableapi.test,v 1.7 2004/02/02 12:29:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test tableapi-1.0 {
+ set ::dbx [sqlite_open test.db]
+ catch {sqlite_exec_printf $::dbx {DROP TABLE xyz} {}}
+ sqlite_exec_printf $::dbx {CREATE TABLE %s(a int, b text)} xyz
+} {0 {}}
+do_test tableapi-1.1 {
+ sqlite_exec_printf $::dbx {
+ INSERT INTO xyz VALUES(1,'%q')
+ } {Hi Y'all}
+} {0 {}}
+do_test tableapi-1.2 {
+ sqlite_exec_printf $::dbx {SELECT * FROM xyz} {}
+} {0 {a b 1 {Hi Y'all}}}
+
+do_test tableapi-2.1 {
+ sqlite_get_table_printf $::dbx {
+ BEGIN TRANSACTION;
+ SELECT * FROM xyz WHERE b='%q'
+ } {Hi Y'all}
+} {0 1 2 a b 1 {Hi Y'all}}
+do_test tableapi-2.2 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz
+ } {}
+} {0 1 2 a b 1 {Hi Y'all}}
+do_test tableapi-2.3 {
+ for {set i 2} {$i<=50} {incr i} {
+ sqlite_get_table_printf $::dbx \
+ "INSERT INTO xyz VALUES($i,'(%s)')" $i
+ }
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz ORDER BY a
+ } {}
+} {0 50 2 a b 1 {Hi Y'all} 2 (2) 3 (3) 4 (4) 5 (5) 6 (6) 7 (7) 8 (8) 9 (9) 10 (10) 11 (11) 12 (12) 13 (13) 14 (14) 15 (15) 16 (16) 17 (17) 18 (18) 19 (19) 20 (20) 21 (21) 22 (22) 23 (23) 24 (24) 25 (25) 26 (26) 27 (27) 28 (28) 29 (29) 30 (30) 31 (31) 32 (32) 33 (33) 34 (34) 35 (35) 36 (36) 37 (37) 38 (38) 39 (39) 40 (40) 41 (41) 42 (42) 43 (43) 44 (44) 45 (45) 46 (46) 47 (47) 48 (48) 49 (49) 50 (50)}
+do_test tableapi-2.3.1 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>49 ORDER BY a
+ } {}
+} {0 1 2 a b 50 (50)}
+do_test tableapi-2.3.2 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>47 ORDER BY a
+ } {}
+} {0 3 2 a b 48 (48) 49 (49) 50 (50)}
+do_test tableapi-2.4 {
+ set manyquote ''''''''
+ append manyquote $manyquote
+ append manyquote $manyquote
+ append manyquote $manyquote
+ append manyquote $manyquote
+ append manyquote $manyquote
+ append manyquote $manyquote
+ set ::big_str "$manyquote Hello $manyquote"
+ sqlite_get_table_printf $::dbx {
+ INSERT INTO xyz VALUES(51,'%q')
+ } $::big_str
+} {0 0 0}
+do_test tableapi-2.5 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>49 ORDER BY a;
+ } {}
+} "0 2 2 a b 50 (50) 51 \173$::big_str\175"
+do_test tableapi-2.6 {
+ sqlite_get_table_printf $::dbx {
+ INSERT INTO xyz VALUES(52,NULL)
+ } {}
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a IN (42,50,52) ORDER BY a DESC
+ } {}
+} {0 3 2 a b 52 NULL 50 (50) 42 (42)}
+do_test tableapi-2.7 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>1000
+ } {}
+} {0 0 0}
+
+# Repeat all tests with the empty_result_callbacks pragma turned on
+#
+do_test tableapi-3.1 {
+ sqlite_get_table_printf $::dbx {
+ ROLLBACK;
+ PRAGMA empty_result_callbacks = ON;
+ SELECT * FROM xyz WHERE b='%q'
+ } {Hi Y'all}
+} {0 1 2 a b 1 {Hi Y'all}}
+do_test tableapi-3.2 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz
+ } {}
+} {0 1 2 a b 1 {Hi Y'all}}
+do_test tableapi-3.3 {
+ for {set i 2} {$i<=50} {incr i} {
+ sqlite_get_table_printf $::dbx \
+ "INSERT INTO xyz VALUES($i,'(%s)')" $i
+ }
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz ORDER BY a
+ } {}
+} {0 50 2 a b 1 {Hi Y'all} 2 (2) 3 (3) 4 (4) 5 (5) 6 (6) 7 (7) 8 (8) 9 (9) 10 (10) 11 (11) 12 (12) 13 (13) 14 (14) 15 (15) 16 (16) 17 (17) 18 (18) 19 (19) 20 (20) 21 (21) 22 (22) 23 (23) 24 (24) 25 (25) 26 (26) 27 (27) 28 (28) 29 (29) 30 (30) 31 (31) 32 (32) 33 (33) 34 (34) 35 (35) 36 (36) 37 (37) 38 (38) 39 (39) 40 (40) 41 (41) 42 (42) 43 (43) 44 (44) 45 (45) 46 (46) 47 (47) 48 (48) 49 (49) 50 (50)}
+do_test tableapi-3.3.1 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>49 ORDER BY a
+ } {}
+} {0 1 2 a b 50 (50)}
+do_test tableapi-3.3.2 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>47 ORDER BY a
+ } {}
+} {0 3 2 a b 48 (48) 49 (49) 50 (50)}
+do_test tableapi-3.4 {
+ sqlite_get_table_printf $::dbx {
+ INSERT INTO xyz VALUES(51,'%q')
+ } $::big_str
+} {0 0 0}
+do_test tableapi-3.5 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>49 ORDER BY a;
+ } {}
+} "0 2 2 a b 50 (50) 51 \173$::big_str\175"
+do_test tableapi-3.6 {
+ sqlite_get_table_printf $::dbx {
+ INSERT INTO xyz VALUES(52,NULL)
+ } {}
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a IN (42,50,52) ORDER BY a DESC
+ } {}
+} {0 3 2 a b 52 NULL 50 (50) 42 (42)}
+do_test tableapi-3.7 {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz WHERE a>1000
+ } {}
+} {0 0 2 a b}
+
+do_test tableapi-4.1 {
+ set rc [catch {
+ sqlite_get_table_printf $::dbx {
+ SELECT * FROM xyz; SELECT * FROM sqlite_master
+ } {}
+ } msg]
+ concat $rc $msg
+} {0 1 {sqlite_get_table() called with two or more incompatible queries}}
+
+# A report on the mailing list says that the sqlite_get_table() api fails
+# on queries involving more than 40 columns. The following code attempts
+# to test that complaint
+#
+do_test tableapi-5.1 {
+ set sql "CREATE TABLE t2("
+ set sep ""
+ for {set i 1} {$i<=100} {incr i} {
+ append sql ${sep}x$i
+ set sep ,
+ }
+ append sql )
+ sqlite_get_table_printf $::dbx $sql {}
+ set sql "INSERT INTO t2 VALUES("
+ set sep ""
+ for {set i 1} {$i<=100} {incr i} {
+ append sql ${sep}$i
+ set sep ,
+ }
+ append sql )
+ sqlite_get_table_printf $::dbx $sql {}
+ sqlite_get_table_printf $::dbx {SELECT * FROM t2} {}
+} {0 1 100 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 x32 x33 x34 x35 x36 x37 x38 x39 x40 x41 x42 x43 x44 x45 x46 x47 x48 x49 x50 x51 x52 x53 x54 x55 x56 x57 x58 x59 x60 x61 x62 x63 x64 x65 x66 x67 x68 x69 x70 x71 x72 x73 x74 x75 x76 x77 x78 x79 x80 x81 x82 x83 x84 x85 x86 x87 x88 x89 x90 x91 x92 x93 x94 x95 x96 x97 x98 x99 x100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100}
+do_test tableapi-5.2 {
+ set sql "INSERT INTO t2 VALUES("
+ set sep ""
+ for {set i 1} {$i<=100} {incr i} {
+ append sql ${sep}[expr {$i+1000}]
+ set sep ,
+ }
+ append sql )
+ sqlite_get_table_printf $::dbx $sql {}
+ sqlite_get_table_printf $::dbx {SELECT * FROM t2} {}
+} {0 2 100 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 x32 x33 x34 x35 x36 x37 x38 x39 x40 x41 x42 x43 x44 x45 x46 x47 x48 x49 x50 x51 x52 x53 x54 x55 x56 x57 x58 x59 x60 x61 x62 x63 x64 x65 x66 x67 x68 x69 x70 x71 x72 x73 x74 x75 x76 x77 x78 x79 x80 x81 x82 x83 x84 x85 x86 x87 x88 x89 x90 x91 x92 x93 x94 x95 x96 x97 x98 x99 x100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100}
+
+do_test tableapi-99.0 {
+ sqlite_close $::dbx
+} {}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/tclsqlite.test b/usr/src/cmd/svc/configd/sqlite/test/tclsqlite.test
new file mode 100644
index 0000000000..d5a4249c7f
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/tclsqlite.test
@@ -0,0 +1,122 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for TCL interface to the
+# SQLite library.
+#
+# Actually, all tests are based on the TCL interface, so the main
+# interface is pretty well tested. This file contains some addition
+# tests for fringe issues that the main test suite does not cover.
+#
+# $Id: tclsqlite.test,v 1.20.2.1 2004/07/19 19:30:50 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Check the error messages generated by tclsqlite
+#
+if {[sqlite -has-codec]} {
+ set r "sqlite_orig HANDLE FILENAME ?-key CODEC-KEY?"
+} else {
+ set r "sqlite HANDLE FILENAME ?MODE?"
+}
+do_test tcl-1.1 {
+ set v [catch {sqlite bogus} msg]
+ lappend v $msg
+} [list 1 "wrong # args: should be \"$r\""]
+do_test tcl-1.2 {
+ set v [catch {db bogus} msg]
+ lappend v $msg
+} {1 {bad option "bogus": must be authorizer, busy, changes, close, commit_hook, complete, errorcode, eval, function, last_insert_rowid, last_statement_changes, onecolumn, progress, rekey, timeout, or trace}}
+do_test tcl-1.3 {
+ execsql {CREATE TABLE t1(a int, b int)}
+ execsql {INSERT INTO t1 VALUES(10,20)}
+ set v [catch {
+ db eval {SELECT * FROM t1} data {
+ error "The error message"
+ }
+ } msg]
+ lappend v $msg
+} {1 {The error message}}
+do_test tcl-1.4 {
+ set v [catch {
+ db eval {SELECT * FROM t2} data {
+ error "The error message"
+ }
+ } msg]
+ lappend v $msg
+} {1 {no such table: t2}}
+do_test tcl-1.5 {
+ set v [catch {
+ db eval {SELECT * FROM t1} data {
+ break
+ }
+ } msg]
+ lappend v $msg
+} {0 {}}
+do_test tcl-1.6 {
+ set v [catch {
+ db eval {SELECT * FROM t1} data {
+ expr x*
+ }
+ } msg]
+ regsub {:.*$} $msg {} msg
+ lappend v $msg
+} {1 {syntax error in expression "x*"}}
+
+if {[sqlite -encoding]=="UTF-8" && [sqlite -tcl-uses-utf]} {
+ catch {unset ::result}
+ do_test tcl-2.1 {
+ execsql "CREATE TABLE t\u0123x(a int, b\u1235 float)"
+ execsql "PRAGMA table_info(t\u0123x)"
+ } "0 a int 0 {} 0 1 b\u1235 float 0 {} 0"
+ do_test tcl-2.2 {
+ execsql "INSERT INTO t\u0123x VALUES(1,2.3)"
+ db eval "SELECT * FROM t\u0123x" result break
+ set result(*)
+ } "a b\u1235"
+}
+
+if {[sqlite -encoding]=="iso8859" && [sqlite -tcl-uses-utf]} {
+ do_test tcl-2.1 {
+ execsql "CREATE TABLE t\251x(a int, b\306 float)"
+ execsql "PRAGMA table_info(t\251x)"
+ } "0 a int 0 {} 0 1 b\306 float 0 {} 0"
+ do_test tcl-2.2 {
+ execsql "INSERT INTO t\251x VALUES(1,2.3)"
+ db eval "SELECT * FROM t\251x" result break
+ set result(*)
+ } "a b\306"
+}
+
+# Test the onecolumn method
+#
+do_test tcl-3.1 {
+ execsql {
+ INSERT INTO t1 SELECT a*2, b*2 FROM t1;
+ INSERT INTO t1 SELECT a*2+1, b*2+1 FROM t1;
+ INSERT INTO t1 SELECT a*2+3, b*2+3 FROM t1;
+ }
+ set rc [catch {db onecolumn {SELECT * FROM t1 ORDER BY a}} msg]
+ lappend rc $msg
+} {0 10}
+do_test tcl-3.2 {
+ db onecolumn {SELECT * FROM t1 WHERE a<0}
+} {}
+do_test tcl-3.3 {
+ set rc [catch {db onecolumn} errmsg]
+ lappend rc $errmsg
+} {1 {wrong # args: should be "db onecolumn SQL"}}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/temptable.test b/usr/src/cmd/svc/configd/sqlite/test/temptable.test
new file mode 100644
index 0000000000..93ff24bc6c
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/temptable.test
@@ -0,0 +1,402 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 October 7
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for temporary tables and indices.
+#
+# $Id: temptable.test,v 1.11 2004/02/14 16:31:04 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Create an alternative connection to the database
+#
+do_test temptable-1.0 {
+ sqlite db2 ./test.db
+ set dummy {}
+} {}
+
+# Create a permanent table.
+#
+do_test temptable-1.1 {
+ execsql {CREATE TABLE t1(a,b,c);}
+ execsql {INSERT INTO t1 VALUES(1,2,3);}
+ execsql {SELECT * FROM t1}
+} {1 2 3}
+do_test temptable-1.2 {
+ catch {db2 eval {SELECT * FROM sqlite_master}}
+ db2 eval {SELECT * FROM t1}
+} {1 2 3}
+do_test temptable-1.3 {
+ execsql {SELECT name FROM sqlite_master}
+} {t1}
+do_test temptable-1.4 {
+ db2 eval {SELECT name FROM sqlite_master}
+} {t1}
+
+# Create a temporary table. Verify that only one of the two
+# processes can see it.
+#
+do_test temptable-1.5 {
+ db2 eval {
+ CREATE TEMP TABLE t2(x,y,z);
+ INSERT INTO t2 VALUES(4,5,6);
+ }
+ db2 eval {SELECT * FROM t2}
+} {4 5 6}
+do_test temptable-1.6 {
+ catch {execsql {SELECT * FROM sqlite_master}}
+ catchsql {SELECT * FROM t2}
+} {1 {no such table: t2}}
+do_test temptable-1.7 {
+ catchsql {INSERT INTO t2 VALUES(8,9,0);}
+} {1 {no such table: t2}}
+do_test temptable-1.8 {
+ db2 eval {INSERT INTO t2 VALUES(8,9,0);}
+ db2 eval {SELECT * FROM t2 ORDER BY x}
+} {4 5 6 8 9 0}
+do_test temptable-1.9 {
+ db2 eval {DELETE FROM t2 WHERE x==8}
+ db2 eval {SELECT * FROM t2 ORDER BY x}
+} {4 5 6}
+do_test temptable-1.10 {
+ db2 eval {DELETE FROM t2}
+ db2 eval {SELECT * FROM t2}
+} {}
+do_test temptable-1.11 {
+ db2 eval {
+ INSERT INTO t2 VALUES(7,6,5);
+ INSERT INTO t2 VALUES(4,3,2);
+ SELECT * FROM t2 ORDER BY x;
+ }
+} {4 3 2 7 6 5}
+do_test temptable-1.12 {
+ db2 eval {DROP TABLE t2;}
+ set r [catch {db2 eval {SELECT * FROM t2}} msg]
+ lappend r $msg
+} {1 {no such table: t2}}
+
+# Make sure temporary tables work with transactions
+#
+do_test temptable-2.1 {
+ execsql {
+ BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE t2(x,y);
+ INSERT INTO t2 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+} {1 2}
+do_test temptable-2.2 {
+ execsql {ROLLBACK}
+ catchsql {SELECT * FROM t2}
+} {1 {no such table: t2}}
+do_test temptable-2.3 {
+ execsql {
+ BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE t2(x,y);
+ INSERT INTO t2 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+} {1 2}
+do_test temptable-2.4 {
+ execsql {COMMIT}
+ catchsql {SELECT * FROM t2}
+} {0 {1 2}}
+do_test temptable-2.5 {
+ set r [catch {db2 eval {SELECT * FROM t2}} msg]
+ lappend r $msg
+} {1 {no such table: t2}}
+
+# Make sure indices on temporary tables are also temporary.
+#
+do_test temptable-3.1 {
+ execsql {
+ CREATE INDEX i2 ON t2(x);
+ SELECT name FROM sqlite_master WHERE type='index';
+ }
+} {}
+do_test temptable-3.2 {
+ execsql {
+ SELECT y FROM t2 WHERE x=1;
+ }
+} {2}
+do_test temptable-3.3 {
+ execsql {
+ DROP INDEX i2;
+ SELECT y FROM t2 WHERE x=1;
+ }
+} {2}
+do_test temptable-3.4 {
+ execsql {
+ CREATE INDEX i2 ON t2(x);
+ DROP TABLE t2;
+ }
+ catchsql {DROP INDEX i2}
+} {1 {no such index: i2}}
+
+# Check for correct name collision processing. A name collision can
+# occur when process A creates a temporary table T then process B
+# creates a permanent table also named T. The temp table in process A
+# hides the existance of the permanent table.
+#
+do_test temptable-4.1 {
+ execsql {
+ CREATE TEMP TABLE t2(x,y);
+ INSERT INTO t2 VALUES(10,20);
+ SELECT * FROM t2;
+ } db2
+} {10 20}
+do_test temptable-4.2 {
+ execsql {
+ CREATE TABLE t2(x,y,z);
+ INSERT INTO t2 VALUES(9,8,7);
+ SELECT * FROM t2;
+ }
+} {9 8 7}
+do_test temptable-4.3 {
+ catchsql {
+ SELECT * FROM t2;
+ } db2
+} {0 {10 20}}
+do_test temptable-4.4.1 {
+ catchsql {
+ SELECT * FROM temp.t2;
+ } db2
+} {0 {10 20}}
+do_test temptable-4.4.2 {
+ catchsql {
+ SELECT * FROM main.t2;
+ } db2
+} {1 {no such table: main.t2}}
+#do_test temptable-4.4.3 {
+# catchsql {
+# SELECT name FROM main.sqlite_master WHERE type='table';
+# } db2
+#} {1 {database schema has changed}}
+do_test temptable-4.4.4 {
+ catchsql {
+ SELECT name FROM main.sqlite_master WHERE type='table';
+ } db2
+} {0 {t1 t2}}
+do_test temptable-4.4.5 {
+ catchsql {
+ SELECT * FROM main.t2;
+ } db2
+} {0 {9 8 7}}
+do_test temptable-4.4.6 {
+ # TEMP takes precedence over MAIN
+ catchsql {
+ SELECT * FROM t2;
+ } db2
+} {0 {10 20}}
+do_test temptable-4.5 {
+ catchsql {
+ DROP TABLE t2; -- should drop TEMP
+ SELECT * FROM t2; -- data should be from MAIN
+ } db2
+} {0 {9 8 7}}
+do_test temptable-4.6 {
+ db2 close
+ sqlite db2 ./test.db
+ catchsql {
+ SELECT * FROM t2;
+ } db2
+} {0 {9 8 7}}
+do_test temptable-4.7 {
+ catchsql {
+ DROP TABLE t2;
+ SELECT * FROM t2;
+ }
+} {1 {no such table: t2}}
+do_test temptable-4.8 {
+ db2 close
+ sqlite db2 ./test.db
+ execsql {
+ CREATE TEMP TABLE t2(x unique,y);
+ INSERT INTO t2 VALUES(1,2);
+ SELECT * FROM t2;
+ } db2
+} {1 2}
+do_test temptable-4.9 {
+ execsql {
+ CREATE TABLE t2(x unique, y);
+ INSERT INTO t2 VALUES(3,4);
+ SELECT * FROM t2;
+ }
+} {3 4}
+do_test temptable-4.10.1 {
+ catchsql {
+ SELECT * FROM t2;
+ } db2
+} {0 {1 2}}
+#do_test temptable-4.10.2 {
+# catchsql {
+# SELECT name FROM sqlite_master WHERE type='table'
+# } db2
+#} {1 {database schema has changed}}
+do_test temptable-4.10.3 {
+ catchsql {
+ SELECT name FROM sqlite_master WHERE type='table'
+ } db2
+} {0 {t1 t2}}
+do_test temptable-4.11 {
+ execsql {
+ SELECT * FROM t2;
+ } db2
+} {1 2}
+do_test temptable-4.12 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {3 4}
+do_test temptable-4.13 {
+ catchsql {
+ DROP TABLE t2; -- drops TEMP.T2
+ SELECT * FROM t2; -- uses MAIN.T2
+ } db2
+} {0 {3 4}}
+do_test temptable-4.14 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {3 4}
+do_test temptable-4.15 {
+ db2 close
+ sqlite db2 ./test.db
+ execsql {
+ SELECT * FROM t2;
+ } db2
+} {3 4}
+
+# Now create a temporary table in db2 and a permanent index in db. The
+# temporary table in db2 should mask the name of the permanent index,
+# but the permanent index should still be accessible and should still
+# be updated when its corresponding table changes.
+#
+do_test temptable-5.1 {
+ execsql {
+ CREATE TEMP TABLE mask(a,b,c)
+ } db2
+ execsql {
+ CREATE INDEX mask ON t2(x);
+ SELECT * FROM t2;
+ }
+} {3 4}
+#do_test temptable-5.2 {
+# catchsql {
+# SELECT * FROM t2;
+# } db2
+#} {1 {database schema has changed}}
+do_test temptable-5.3 {
+ catchsql {
+ SELECT * FROM t2;
+ } db2
+} {0 {3 4}}
+do_test temptable-5.4 {
+ execsql {
+ SELECT y FROM t2 WHERE x=3
+ }
+} {4}
+do_test temptable-5.5 {
+ execsql {
+ SELECT y FROM t2 WHERE x=3
+ } db2
+} {4}
+do_test temptable-5.6 {
+ execsql {
+ INSERT INTO t2 VALUES(1,2);
+ SELECT y FROM t2 WHERE x=1;
+ } db2
+} {2}
+do_test temptable-5.7 {
+ execsql {
+ SELECT y FROM t2 WHERE x=3
+ } db2
+} {4}
+do_test temptable-5.8 {
+ execsql {
+ SELECT y FROM t2 WHERE x=1;
+ }
+} {2}
+do_test temptable-5.9 {
+ execsql {
+ SELECT y FROM t2 WHERE x=3
+ }
+} {4}
+
+db2 close
+
+# Test for correct operation of read-only databases
+#
+do_test temptable-6.1 {
+ execsql {
+ CREATE TABLE t8(x);
+ INSERT INTO t8 VALUES('xyzzy');
+ SELECT * FROM t8;
+ }
+} {xyzzy}
+do_test temptable-6.2 {
+ db close
+ catch {file attributes test.db -permissions 0444}
+ catch {file attributes test.db -readonly 1}
+ sqlite db test.db
+ if {[file writable test.db]} {
+ error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user"
+ }
+ execsql {
+ SELECT * FROM t8;
+ }
+} {xyzzy}
+do_test temptable-6.3 {
+ if {[file writable test.db]} {
+ error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user"
+ }
+ catchsql {
+ CREATE TABLE t9(x,y);
+ }
+} {1 {attempt to write a readonly database}}
+do_test temptable-6.4 {
+ catchsql {
+ CREATE TEMP TABLE t9(x,y);
+ }
+} {0 {}}
+do_test temptable-6.5 {
+ catchsql {
+ INSERT INTO t9 VALUES(1,2);
+ SELECT * FROM t9;
+ }
+} {0 {1 2}}
+do_test temptable-6.6 {
+ if {[file writable test.db]} {
+ error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user"
+ }
+ catchsql {
+ INSERT INTO t8 VALUES('hello');
+ SELECT * FROM t8;
+ }
+} {1 {attempt to write a readonly database}}
+do_test temptable-6.7 {
+ catchsql {
+ SELECT * FROM t8,t9;
+ }
+} {0 {xyzzy 1 2}}
+do_test temptable-6.8 {
+ db close
+ sqlite db test.db
+ catchsql {
+ SELECT * FROM t8,t9;
+ }
+} {1 {no such table: t9}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/tester.tcl b/usr/src/cmd/svc/configd/sqlite/test/tester.tcl
new file mode 100644
index 0000000000..8cc6951eee
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/tester.tcl
@@ -0,0 +1,267 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements some common TCL routines used for regression
+# testing the SQLite library
+#
+# $Id: tester.tcl,v 1.28 2004/02/14 01:39:50 drh Exp $
+
+# Make sure tclsqlite was compiled correctly. Abort now with an
+# error message if not.
+#
+if {[sqlite -tcl-uses-utf]} {
+ if {"\u1234"=="u1234"} {
+ puts stderr "***** BUILD PROBLEM *****"
+ puts stderr "$argv0 was linked against an older version"
+ puts stderr "of TCL that does not support Unicode, but uses a header"
+ puts stderr "file (\"tcl.h\") from a new TCL version that does support"
+ puts stderr "Unicode. This combination causes internal errors."
+ puts stderr "Recompile using a TCL library and header file that match"
+ puts stderr "and try again.\n**************************"
+ exit 1
+ }
+} else {
+ if {"\u1234"!="u1234"} {
+ puts stderr "***** BUILD PROBLEM *****"
+ puts stderr "$argv0 was linked against an newer version"
+ puts stderr "of TCL that supports Unicode, but uses a header file"
+ puts stderr "(\"tcl.h\") from a old TCL version that does not support"
+ puts stderr "Unicode. This combination causes internal errors."
+ puts stderr "Recompile using a TCL library and header file that match"
+ puts stderr "and try again.\n**************************"
+ exit 1
+ }
+}
+
+# Use the pager codec if it is available
+#
+if {[sqlite -has-codec] && [info command sqlite_orig]==""} {
+ rename sqlite sqlite_orig
+ proc sqlite {args} {
+ if {[llength $args]==2 && [string index [lindex $args 0] 0]!="-"} {
+ lappend args -key {xyzzy}
+ }
+ uplevel 1 sqlite_orig $args
+ }
+}
+
+
+# Create a test database
+#
+catch {db close}
+file delete -force test.db
+file delete -force test.db-journal
+sqlite db ./test.db
+if {[info exists ::SETUP_SQL]} {
+ db eval $::SETUP_SQL
+}
+
+# Abort early if this script has been run before.
+#
+if {[info exists nTest]} return
+
+# Set the test counters to zero
+#
+set nErr 0
+set nTest 0
+set nProb 0
+set skip_test 0
+set failList {}
+
+# Invoke the do_test procedure to run a single test
+#
+proc do_test {name cmd expected} {
+ global argv nErr nTest skip_test
+ if {$skip_test} {
+ set skip_test 0
+ return
+ }
+ if {[llength $argv]==0} {
+ set go 1
+ } else {
+ set go 0
+ foreach pattern $argv {
+ if {[string match $pattern $name]} {
+ set go 1
+ break
+ }
+ }
+ }
+ if {!$go} return
+ incr nTest
+ puts -nonewline $name...
+ flush stdout
+ if {[catch {uplevel #0 "$cmd;\n"} result]} {
+ puts "\nError: $result"
+ incr nErr
+ lappend ::failList $name
+ if {$nErr>100} {puts "*** Giving up..."; finalize_testing}
+ } elseif {[string compare $result $expected]} {
+ puts "\nExpected: \[$expected\]\n Got: \[$result\]"
+ incr nErr
+ lappend ::failList $name
+ if {$nErr>100} {puts "*** Giving up..."; finalize_testing}
+ } else {
+ puts " Ok"
+ }
+}
+
+# Invoke this procedure on a test that is probabilistic
+# and might fail sometimes.
+#
+proc do_probtest {name cmd expected} {
+ global argv nProb nTest skip_test
+ if {$skip_test} {
+ set skip_test 0
+ return
+ }
+ if {[llength $argv]==0} {
+ set go 1
+ } else {
+ set go 0
+ foreach pattern $argv {
+ if {[string match $pattern $name]} {
+ set go 1
+ break
+ }
+ }
+ }
+ if {!$go} return
+ incr nTest
+ puts -nonewline $name...
+ flush stdout
+ if {[catch {uplevel #0 "$cmd;\n"} result]} {
+ puts "\nError: $result"
+ incr nErr
+ } elseif {[string compare $result $expected]} {
+ puts "\nExpected: \[$expected\]\n Got: \[$result\]"
+ puts "NOTE: The results of the previous test depend on system load"
+ puts "and processor speed. The test may sometimes fail even if the"
+ puts "library is working correctly."
+ incr nProb
+ } else {
+ puts " Ok"
+ }
+}
+
+# The procedure uses the special "sqlite_malloc_stat" command
+# (which is only available if SQLite is compiled with -DMEMORY_DEBUG=1)
+# to see how many malloc()s have not been free()ed. The number
+# of surplus malloc()s is stored in the global variable $::Leak.
+# If the value in $::Leak grows, it may mean there is a memory leak
+# in the library.
+#
+proc memleak_check {} {
+ if {[info command sqlite_malloc_stat]!=""} {
+ set r [sqlite_malloc_stat]
+ set ::Leak [expr {[lindex $r 0]-[lindex $r 1]}]
+ }
+}
+
+# Run this routine last
+#
+proc finish_test {} {
+ finalize_testing
+}
+proc finalize_testing {} {
+ global nTest nErr nProb sqlite_open_file_count
+ if {$nErr==0} memleak_check
+ catch {db close}
+ puts "$nErr errors out of $nTest tests"
+ puts "Failures on these tests: $::failList"
+ if {$nProb>0} {
+ puts "$nProb probabilistic tests also failed, but this does"
+ puts "not necessarily indicate a malfunction."
+ }
+ if {$sqlite_open_file_count} {
+ puts "$sqlite_open_file_count files were left open"
+ incr nErr
+ }
+ exit [expr {$nErr>0}]
+}
+
+# A procedure to execute SQL
+#
+proc execsql {sql {db db}} {
+ # puts "SQL = $sql"
+ return [$db eval $sql]
+}
+
+# Execute SQL and catch exceptions.
+#
+proc catchsql {sql {db db}} {
+ # puts "SQL = $sql"
+ set r [catch {$db eval $sql} msg]
+ lappend r $msg
+ return $r
+}
+
+# Do an VDBE code dump on the SQL given
+#
+proc explain {sql {db db}} {
+ puts ""
+ puts "addr opcode p1 p2 p3 "
+ puts "---- ------------ ------ ------ ---------------"
+ $db eval "explain $sql" {} {
+ puts [format {%-4d %-12.12s %-6d %-6d %s} $addr $opcode $p1 $p2 $p3]
+ }
+}
+
+# Another procedure to execute SQL. This one includes the field
+# names in the returned list.
+#
+proc execsql2 {sql} {
+ set result {}
+ db eval $sql data {
+ foreach f $data(*) {
+ lappend result $f $data($f)
+ }
+ }
+ return $result
+}
+
+# Use the non-callback API to execute multiple SQL statements
+#
+proc stepsql {dbptr sql} {
+ set sql [string trim $sql]
+ set r 0
+ while {[string length $sql]>0} {
+ if {[catch {sqlite_compile $dbptr $sql sqltail} vm]} {
+ return [list 1 $vm]
+ }
+ set sql [string trim $sqltail]
+ while {[sqlite_step $vm N VAL COL]=="SQLITE_ROW"} {
+ foreach v $VAL {lappend r $v}
+ }
+ if {[catch {sqlite_finalize $vm} errmsg]} {
+ return [list 1 $errmsg]
+ }
+ }
+ return $r
+}
+
+# Delete a file or directory
+#
+proc forcedelete {filename} {
+ if {[catch {file delete -force $filename}]} {
+ exec rm -rf $filename
+ }
+}
+
+# Do an integrity check of the entire database
+#
+proc integrity_check {name} {
+ do_test $name {
+ execsql {PRAGMA integrity_check}
+ } {ok}
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/test/thread1.test b/usr/src/cmd/svc/configd/sqlite/test/thread1.test
new file mode 100644
index 0000000000..cbca2e364a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/thread1.test
@@ -0,0 +1,161 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2003 December 18
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is multithreading behavior
+#
+# $Id: thread1.test,v 1.3 2004/02/11 02:18:07 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Skip this whole file if the thread testing code is not enabled
+#
+if {[llength [info command thread_step]]==0 || [sqlite -has-codec]} {
+ finish_test
+ return
+}
+
+# Create some data to work with
+#
+do_test thread1-1.1 {
+ execsql {
+ CREATE TABLE t1(a,b);
+ INSERT INTO t1 VALUES(1,'abcdefgh');
+ INSERT INTO t1 SELECT a+1, b||b FROM t1;
+ INSERT INTO t1 SELECT a+2, b||b FROM t1;
+ INSERT INTO t1 SELECT a+4, b||b FROM t1;
+ SELECT count(*), max(length(b)) FROM t1;
+ }
+} {8 64}
+
+# Interleave two threads on read access. Then make sure a third
+# thread can write the database. In other words:
+#
+# read-lock A
+# read-lock B
+# unlock A
+# unlock B
+# write-lock C
+#
+# At one point, the write-lock of C would fail on Linux.
+#
+do_test thread1-1.2 {
+ thread_create A test.db
+ thread_create B test.db
+ thread_create C test.db
+ thread_compile A {SELECT a FROM t1}
+ thread_step A
+ thread_result A
+} SQLITE_ROW
+do_test thread1-1.3 {
+ thread_argc A
+} 1
+do_test thread1-1.4 {
+ thread_argv A 0
+} 1
+do_test thread1-1.5 {
+ thread_compile B {SELECT b FROM t1}
+ thread_step B
+ thread_result B
+} SQLITE_ROW
+do_test thread1-1.6 {
+ thread_argc B
+} 1
+do_test thread1-1.7 {
+ thread_argv B 0
+} abcdefgh
+do_test thread1-1.8 {
+ thread_finalize A
+ thread_result A
+} SQLITE_OK
+do_test thread1-1.9 {
+ thread_finalize B
+ thread_result B
+} SQLITE_OK
+do_test thread1-1.10 {
+ thread_compile C {CREATE TABLE t2(x,y)}
+ thread_step C
+ thread_result C
+} SQLITE_DONE
+do_test thread1-1.11 {
+ thread_finalize C
+ thread_result C
+} SQLITE_OK
+do_test thread1-1.12 {
+ catchsql {SELECT name FROM sqlite_master}
+ execsql {SELECT name FROM sqlite_master}
+} {t1 t2}
+
+
+# Under this scenario:
+#
+# read-lock A
+# read-lock B
+# unlock A
+# write-lock C
+#
+# Make sure the write-lock fails with SQLITE_BUSY
+#
+do_test thread1-2.1 {
+ thread_halt *
+ thread_create A test.db
+ thread_compile A {SELECT a FROM t1}
+ thread_step A
+ thread_result A
+} SQLITE_ROW
+do_test thread1-2.2 {
+ thread_create B test.db
+ thread_compile B {SELECT b FROM t1}
+ thread_step B
+ thread_result B
+} SQLITE_ROW
+do_test thread1-2.3 {
+ thread_create C test.db
+ thread_compile C {INSERT INTO t2 VALUES(98,99)}
+ thread_step C
+ thread_result C
+} SQLITE_BUSY
+do_test thread1-2.4 {
+ execsql {SELECT * FROM t2}
+} {}
+do_test thread1-2.5 {
+ thread_finalize A
+ thread_result A
+} SQLITE_OK
+do_test thread1-2.6 {
+ thread_step C
+ thread_result C
+} SQLITE_BUSY
+do_test thread1-2.7 {
+ execsql {SELECT * FROM t2}
+} {}
+do_test thread1-2.8 {
+ thread_finalize B
+ thread_result B
+} SQLITE_OK
+do_test thread1-2.9 {
+ thread_step C
+ thread_result C
+} SQLITE_DONE
+do_test thread1-2.10 {
+ execsql {SELECT * FROM t2}
+} {98 99}
+do_test thread1-2.11 {
+ thread_finalize C
+ thread_result C
+} SQLITE_OK
+
+thread_halt *
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/threadtest1.c b/usr/src/cmd/svc/configd/sqlite/test/threadtest1.c
new file mode 100644
index 0000000000..48f4bf7679
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/threadtest1.c
@@ -0,0 +1,285 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2002 January 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements a simple standalone program used to test whether
+** or not the SQLite library is threadsafe.
+**
+** Testing the thread safety of SQLite is difficult because there are very
+** few places in the code that are even potentially unsafe, and those
+** places execute for very short periods of time. So even if the library
+** is compiled with its mutexes disabled, it is likely to work correctly
+** in a multi-threaded program most of the time.
+**
+** This file is NOT part of the standard SQLite library. It is used for
+** testing only.
+*/
+#include "sqlite.h"
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+** Enable for tracing
+*/
+static int verbose = 0;
+
+/*
+** Come here to die.
+*/
+static void Exit(int rc){
+ exit(rc);
+}
+
+extern char *sqlite_mprintf(const char *zFormat, ...);
+extern char *sqlite_vmprintf(const char *zFormat, va_list);
+
+/*
+** When a lock occurs, yield.
+*/
+static int db_is_locked(void *NotUsed, const char *zNotUsed, int iNotUsed){
+ /* sched_yield(); */
+ if( verbose ) printf("BUSY %s\n", (char*)NotUsed);
+ usleep(100);
+ return 1;
+}
+
+/*
+** Used to accumulate query results by db_query()
+*/
+struct QueryResult {
+ const char *zFile; /* Filename - used for error reporting */
+ int nElem; /* Number of used entries in azElem[] */
+ int nAlloc; /* Number of slots allocated for azElem[] */
+ char **azElem; /* The result of the query */
+};
+
+/*
+** The callback function for db_query
+*/
+static int db_query_callback(
+ void *pUser, /* Pointer to the QueryResult structure */
+ int nArg, /* Number of columns in this result row */
+ char **azArg, /* Text of data in all columns */
+ char **NotUsed /* Names of the columns */
+){
+ struct QueryResult *pResult = (struct QueryResult*)pUser;
+ int i;
+ if( pResult->nElem + nArg >= pResult->nAlloc ){
+ if( pResult->nAlloc==0 ){
+ pResult->nAlloc = nArg+1;
+ }else{
+ pResult->nAlloc = pResult->nAlloc*2 + nArg + 1;
+ }
+ pResult->azElem = realloc( pResult->azElem, pResult->nAlloc*sizeof(char*));
+ if( pResult->azElem==0 ){
+ fprintf(stdout,"%s: malloc failed\n", pResult->zFile);
+ return 1;
+ }
+ }
+ if( azArg==0 ) return 0;
+ for(i=0; i<nArg; i++){
+ pResult->azElem[pResult->nElem++] =
+ sqlite_mprintf("%s",azArg[i] ? azArg[i] : "");
+ }
+ return 0;
+}
+
+/*
+** Execute a query against the database. NULL values are returned
+** as an empty string. The list is terminated by a single NULL pointer.
+*/
+char **db_query(sqlite *db, const char *zFile, const char *zFormat, ...){
+ char *zSql;
+ int rc;
+ char *zErrMsg = 0;
+ va_list ap;
+ struct QueryResult sResult;
+ va_start(ap, zFormat);
+ zSql = sqlite_vmprintf(zFormat, ap);
+ va_end(ap);
+ memset(&sResult, 0, sizeof(sResult));
+ sResult.zFile = zFile;
+ if( verbose ) printf("QUERY %s: %s\n", zFile, zSql);
+ rc = sqlite_exec(db, zSql, db_query_callback, &sResult, &zErrMsg);
+ if( rc==SQLITE_SCHEMA ){
+ if( zErrMsg ) free(zErrMsg);
+ rc = sqlite_exec(db, zSql, db_query_callback, &sResult, &zErrMsg);
+ }
+ if( verbose ) printf("DONE %s %s\n", zFile, zSql);
+ if( zErrMsg ){
+ fprintf(stdout,"%s: query failed: %s - %s\n", zFile, zSql, zErrMsg);
+ free(zErrMsg);
+ free(zSql);
+ Exit(1);
+ }
+ sqlite_freemem(zSql);
+ if( sResult.azElem==0 ){
+ db_query_callback(&sResult, 0, 0, 0);
+ }
+ sResult.azElem[sResult.nElem] = 0;
+ return sResult.azElem;
+}
+
+/*
+** Execute an SQL statement.
+*/
+void db_execute(sqlite *db, const char *zFile, const char *zFormat, ...){
+ char *zSql;
+ int rc;
+ char *zErrMsg = 0;
+ va_list ap;
+ va_start(ap, zFormat);
+ zSql = sqlite_vmprintf(zFormat, ap);
+ va_end(ap);
+ if( verbose ) printf("EXEC %s: %s\n", zFile, zSql);
+ rc = sqlite_exec(db, zSql, 0, 0, &zErrMsg);
+ while( rc==SQLITE_SCHEMA ){
+ if( zErrMsg ) free(zErrMsg);
+ rc = sqlite_exec(db, zSql, 0, 0, &zErrMsg);
+ }
+ if( verbose ) printf("DONE %s: %s\n", zFile, zSql);
+ if( zErrMsg ){
+ fprintf(stdout,"%s: command failed: %s - %s\n", zFile, zSql, zErrMsg);
+ free(zErrMsg);
+ sqlite_freemem(zSql);
+ Exit(1);
+ }
+ sqlite_freemem(zSql);
+}
+
+/*
+** Free the results of a db_query() call.
+*/
+void db_query_free(char **az){
+ int i;
+ for(i=0; az[i]; i++){
+ sqlite_freemem(az[i]);
+ }
+ free(az);
+}
+
+/*
+** Check results
+*/
+void db_check(const char *zFile, const char *zMsg, char **az, ...){
+ va_list ap;
+ int i;
+ char *z;
+ va_start(ap, az);
+ for(i=0; (z = va_arg(ap, char*))!=0; i++){
+ if( az[i]==0 || strcmp(az[i],z)!=0 ){
+ fprintf(stdout,"%s: %s: bad result in column %d: %s\n",
+ zFile, zMsg, i+1, az[i]);
+ db_query_free(az);
+ Exit(1);
+ }
+ }
+ va_end(ap);
+ db_query_free(az);
+}
+
+pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t sig = PTHREAD_COND_INITIALIZER;
+int thread_cnt = 0;
+
+static void *worker_bee(void *pArg){
+ const char *zFilename = (char*)pArg;
+ char *azErr;
+ int i, cnt;
+ int t = atoi(zFilename);
+ char **az;
+ sqlite *db;
+
+ pthread_mutex_lock(&lock);
+ thread_cnt++;
+ pthread_mutex_unlock(&lock);
+ printf("%s: START\n", zFilename);
+ fflush(stdout);
+ for(cnt=0; cnt<10; cnt++){
+ db = sqlite_open(&zFilename[2], 0, &azErr);
+ if( db==0 ){
+ fprintf(stdout,"%s: can't open\n", zFilename);
+ Exit(1);
+ }
+ sqlite_busy_handler(db, db_is_locked, zFilename);
+ db_execute(db, zFilename, "CREATE TABLE t%d(a,b,c);", t);
+ for(i=1; i<=100; i++){
+ db_execute(db, zFilename, "INSERT INTO t%d VALUES(%d,%d,%d);",
+ t, i, i*2, i*i);
+ }
+ az = db_query(db, zFilename, "SELECT count(*) FROM t%d", t);
+ db_check(zFilename, "tX size", az, "100", 0);
+ az = db_query(db, zFilename, "SELECT avg(b) FROM t%d", t);
+ db_check(zFilename, "tX avg", az, "101", 0);
+ db_execute(db, zFilename, "DELETE FROM t%d WHERE a>50", t);
+ az = db_query(db, zFilename, "SELECT avg(b) FROM t%d", t);
+ db_check(zFilename, "tX avg2", az, "51", 0);
+ for(i=1; i<=50; i++){
+ char z1[30], z2[30];
+ az = db_query(db, zFilename, "SELECT b, c FROM t%d WHERE a=%d", t, i);
+ sprintf(z1, "%d", i*2);
+ sprintf(z2, "%d", i*i);
+ db_check(zFilename, "readback", az, z1, z2, 0);
+ }
+ db_execute(db, zFilename, "DROP TABLE t%d;", t);
+ sqlite_close(db);
+ }
+ printf("%s: END\n", zFilename);
+ /* unlink(zFilename); */
+ fflush(stdout);
+ pthread_mutex_lock(&lock);
+ thread_cnt--;
+ if( thread_cnt<=0 ){
+ pthread_cond_signal(&sig);
+ }
+ pthread_mutex_unlock(&lock);
+ return 0;
+}
+
+int main(int argc, char **argv){
+ char *zFile;
+ int i, n;
+ pthread_t id;
+ if( argc>2 && strcmp(argv[1], "-v")==0 ){
+ verbose = 1;
+ argc--;
+ argv++;
+ }
+ if( argc<2 || (n=atoi(argv[1]))<1 ) n = 10;
+ for(i=0; i<n; i++){
+ char zBuf[200];
+ sprintf(zBuf, "testdb-%d", (i+1)/2);
+ unlink(zBuf);
+ }
+ for(i=0; i<n; i++){
+ zFile = sqlite_mprintf("%d.testdb-%d", i%2+1, (i+2)/2);
+ unlink(zFile);
+ pthread_create(&id, 0, worker_bee, (void*)zFile);
+ pthread_detach(id);
+ }
+ pthread_mutex_lock(&lock);
+ while( thread_cnt>0 ){
+ pthread_cond_wait(&sig, &lock);
+ }
+ pthread_mutex_unlock(&lock);
+ for(i=0; i<n; i++){
+ char zBuf[200];
+ sprintf(zBuf, "testdb-%d", (i+1)/2);
+ unlink(zBuf);
+ }
+ return 0;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/test/threadtest2.c b/usr/src/cmd/svc/configd/sqlite/test/threadtest2.c
new file mode 100644
index 0000000000..9e49e50af1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/threadtest2.c
@@ -0,0 +1,127 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** 2004 January 13
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements a simple standalone program used to test whether
+** or not the SQLite library is threadsafe.
+**
+** This file is NOT part of the standard SQLite library. It is used for
+** testing only.
+*/
+#include <stdio.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <string.h>
+#include <stdlib.h>
+#include "sqlite.h"
+
+/*
+** Name of the database
+*/
+#define DB_FILE "test.db"
+
+/*
+** When this variable becomes non-zero, all threads stop
+** what they are doing.
+*/
+volatile int all_stop = 0;
+
+/*
+** Callback from the integrity check. If the result is anything other
+** than "ok" it means the integrity check has failed. Set the "all_stop"
+** global variable to stop all other activity. Print the error message
+** or print OK if the string "ok" is seen.
+*/
+int check_callback(void *notUsed, int argc, char **argv, char **notUsed2){
+ if( strcmp(argv[0],"ok") ){
+ all_stop = 1;
+ fprintf(stderr,"pid=%d. %s\n", getpid(), argv[0]);
+ }else{
+ /* fprintf(stderr,"pid=%d. OK\n", getpid()); */
+ }
+ return 0;
+}
+
+/*
+** Do an integrity check on the database. If the first integrity check
+** fails, try it a second time.
+*/
+int integrity_check(sqlite *db){
+ int rc;
+ if( all_stop ) return 0;
+ /* fprintf(stderr,"pid=%d: CHECK\n", getpid()); */
+ rc = sqlite_exec(db, "pragma integrity_check", check_callback, 0, 0);
+ if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
+ fprintf(stderr,"pid=%d, Integrity check returns %d\n", getpid(), rc);
+ }
+ if( all_stop ){
+ sqlite_exec(db, "pragma integrity_check", check_callback, 0, 0);
+ }
+ return 0;
+}
+
+/*
+** This is the worker thread
+*/
+void *worker(void *notUsed){
+ sqlite *db;
+ int rc;
+ int cnt = 0;
+ while( !all_stop && cnt++<10000 ){
+ if( cnt%1000==0 ) printf("pid=%d: %d\n", getpid(), cnt);
+ while( (db = sqlite_open(DB_FILE, 0, 0))==0 ) sched_yield();
+ sqlite_exec(db, "PRAGMA synchronous=OFF", 0, 0, 0);
+ integrity_check(db);
+ if( all_stop ){ sqlite_close(db); break; }
+ /* fprintf(stderr, "pid=%d: BEGIN\n", getpid()); */
+ rc = sqlite_exec(db, "INSERT INTO t1 VALUES('bogus data')", 0, 0, 0);
+ /* fprintf(stderr, "pid=%d: END rc=%d\n", getpid(), rc); */
+ sqlite_close(db);
+ }
+ return 0;
+}
+
+/*
+** Initialize the database and start the threads
+*/
+int main(int argc, char **argv){
+ sqlite *db;
+ int i, rc;
+ pthread_t aThread[5];
+
+ if( strcmp(DB_FILE,":memory:") ) unlink(DB_FILE);
+ db = sqlite_open(DB_FILE, 0, 0);
+ if( db==0 ){
+ fprintf(stderr,"unable to initialize database\n");
+ exit(1);
+ }
+ rc = sqlite_exec(db, "CREATE TABLE t1(x);", 0,0,0);
+ if( rc ){
+ fprintf(stderr,"cannot create table t1: %d\n", rc);
+ exit(1);
+ }
+ sqlite_close(db);
+ for(i=0; i<sizeof(aThread)/sizeof(aThread[0]); i++){
+ pthread_create(&aThread[i], 0, worker, 0);
+ }
+ for(i=0; i<sizeof(aThread)/sizeof(aThread[i]); i++){
+ pthread_join(aThread[i], 0);
+ }
+ if( !all_stop ){
+ printf("Everything seems ok.\n");
+ return 0;
+ }else{
+ printf("We hit an error.\n");
+ return 1;
+ }
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/test/trans.test b/usr/src/cmd/svc/configd/sqlite/test/trans.test
new file mode 100644
index 0000000000..a6b27e4953
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/trans.test
@@ -0,0 +1,905 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is database locks.
+#
+# $Id: trans.test,v 1.19 2004/03/08 13:26:18 drh Exp $
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+
+# Create several tables to work with.
+#
+do_test trans-1.0 {
+ execsql {
+ CREATE TABLE one(a int PRIMARY KEY, b text);
+ INSERT INTO one VALUES(1,'one');
+ INSERT INTO one VALUES(2,'two');
+ INSERT INTO one VALUES(3,'three');
+ SELECT b FROM one ORDER BY a;
+ }
+} {one two three}
+do_test trans-1.1 {
+ execsql {
+ CREATE TABLE two(a int PRIMARY KEY, b text);
+ INSERT INTO two VALUES(1,'I');
+ INSERT INTO two VALUES(5,'V');
+ INSERT INTO two VALUES(10,'X');
+ SELECT b FROM two ORDER BY a;
+ }
+} {I V X}
+do_test trans-1.9 {
+ sqlite altdb test.db
+ execsql {SELECT b FROM one ORDER BY a} altdb
+} {one two three}
+do_test trans-1.10 {
+ execsql {SELECT b FROM two ORDER BY a} altdb
+} {I V X}
+integrity_check trans-1.11
+
+# Basic transactions
+#
+do_test trans-2.1 {
+ set v [catch {execsql {BEGIN}} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-2.2 {
+ set v [catch {execsql {END}} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-2.3 {
+ set v [catch {execsql {BEGIN TRANSACTION}} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-2.4 {
+ set v [catch {execsql {COMMIT TRANSACTION}} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-2.5 {
+ set v [catch {execsql {BEGIN TRANSACTION 'foo'}} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-2.6 {
+ set v [catch {execsql {ROLLBACK TRANSACTION 'foo'}} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-2.10 {
+ execsql {
+ BEGIN;
+ SELECT a FROM one ORDER BY a;
+ SELECT a FROM two ORDER BY a;
+ END;
+ }
+} {1 2 3 1 5 10}
+integrity_check trans-2.11
+
+# Check the locking behavior
+#
+do_test trans-3.1 {
+ execsql {
+ BEGIN;
+ SELECT a FROM one ORDER BY a;
+ }
+} {1 2 3}
+do_test trans-3.2 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-3.3 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-3.4 {
+ set v [catch {execsql {
+ INSERT INTO one VALUES(4,'four');
+ }} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-3.5 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-3.6 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-3.7 {
+ set v [catch {execsql {
+ INSERT INTO two VALUES(4,'IV');
+ }} msg]
+ lappend v $msg
+} {0 {}}
+do_test trans-3.8 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-3.9 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-3.10 {
+ execsql {END TRANSACTION}
+} {}
+do_test trans-3.11 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {0 {1 4 5 10}}
+do_test trans-3.12 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {0 {1 2 3 4}}
+do_test trans-3.13 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } db} msg]
+ lappend v $msg
+} {0 {1 4 5 10}}
+do_test trans-3.14 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } db} msg]
+ lappend v $msg
+} {0 {1 2 3 4}}
+integrity_check trans-3.15
+
+do_test trans-4.1 {
+ set v [catch {execsql {
+ COMMIT;
+ } db} msg]
+ lappend v $msg
+} {1 {cannot commit - no transaction is active}}
+do_test trans-4.2 {
+ set v [catch {execsql {
+ ROLLBACK;
+ } db} msg]
+ lappend v $msg
+} {1 {cannot rollback - no transaction is active}}
+do_test trans-4.3 {
+ set v [catch {execsql {
+ BEGIN TRANSACTION;
+ SELECT a FROM two ORDER BY a;
+ } db} msg]
+ lappend v $msg
+} {0 {1 4 5 10}}
+do_test trans-4.4 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-4.5 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-4.6 {
+ set v [catch {execsql {
+ BEGIN TRANSACTION;
+ SELECT a FROM one ORDER BY a;
+ } db} msg]
+ lappend v $msg
+} {1 {cannot start a transaction within a transaction}}
+do_test trans-4.7 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-4.8 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {1 {database is locked}}
+do_test trans-4.9 {
+ set v [catch {execsql {
+ END TRANSACTION;
+ SELECT a FROM two ORDER BY a;
+ } db} msg]
+ lappend v $msg
+} {0 {1 4 5 10}}
+do_test trans-4.10 {
+ set v [catch {execsql {
+ SELECT a FROM two ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {0 {1 4 5 10}}
+do_test trans-4.11 {
+ set v [catch {execsql {
+ SELECT a FROM one ORDER BY a;
+ } altdb} msg]
+ lappend v $msg
+} {0 {1 2 3 4}}
+integrity_check trans-4.12
+do_test trans-4.98 {
+ altdb close
+ execsql {
+ DROP TABLE one;
+ DROP TABLE two;
+ }
+} {}
+integrity_check trans-4.99
+
+# Check out the commit/rollback behavior of the database
+#
+do_test trans-5.1 {
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
+} {}
+do_test trans-5.2 {
+ execsql {BEGIN TRANSACTION}
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
+} {}
+do_test trans-5.3 {
+ execsql {CREATE TABLE one(a text, b int)}
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
+} {one}
+do_test trans-5.4 {
+ execsql {SELECT a,b FROM one ORDER BY b}
+} {}
+do_test trans-5.5 {
+ execsql {INSERT INTO one(a,b) VALUES('hello', 1)}
+ execsql {SELECT a,b FROM one ORDER BY b}
+} {hello 1}
+do_test trans-5.6 {
+ execsql {ROLLBACK}
+ execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
+} {}
+do_test trans-5.7 {
+ set v [catch {
+ execsql {SELECT a,b FROM one ORDER BY b}
+ } msg]
+ lappend v $msg
+} {1 {no such table: one}}
+
+# Test commits and rollbacks of table CREATE TABLEs, CREATE INDEXs
+# DROP TABLEs and DROP INDEXs
+#
+do_test trans-5.8 {
+ execsql {
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name
+ }
+} {}
+do_test trans-5.9 {
+ execsql {
+ BEGIN TRANSACTION;
+ CREATE TABLE t1(a int, b int, c int);
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {t1}
+do_test trans-5.10 {
+ execsql {
+ CREATE INDEX i1 ON t1(a);
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i1 t1}
+do_test trans-5.11 {
+ execsql {
+ COMMIT;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i1 t1}
+do_test trans-5.12 {
+ execsql {
+ BEGIN TRANSACTION;
+ CREATE TABLE t2(a int, b int, c int);
+ CREATE INDEX i2a ON t2(a);
+ CREATE INDEX i2b ON t2(b);
+ DROP TABLE t1;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i2a i2b t2}
+do_test trans-5.13 {
+ execsql {
+ ROLLBACK;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i1 t1}
+do_test trans-5.14 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP INDEX i1;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {t1}
+do_test trans-5.15 {
+ execsql {
+ ROLLBACK;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i1 t1}
+do_test trans-5.16 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP INDEX i1;
+ CREATE TABLE t2(x int, y int, z int);
+ CREATE INDEX i2x ON t2(x);
+ CREATE INDEX i2y ON t2(y);
+ INSERT INTO t2 VALUES(1,2,3);
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i2x i2y t1 t2}
+do_test trans-5.17 {
+ execsql {
+ COMMIT;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i2x i2y t1 t2}
+do_test trans-5.18 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {1 2 3}
+do_test trans-5.19 {
+ execsql {
+ SELECT x FROM t2 WHERE y=2;
+ }
+} {1}
+do_test trans-5.20 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ DROP TABLE t2;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {}
+do_test trans-5.21 {
+ set r [catch {execsql {
+ SELECT * FROM t2
+ }} msg]
+ lappend r $msg
+} {1 {no such table: t2}}
+do_test trans-5.22 {
+ execsql {
+ ROLLBACK;
+ SELECT name fROM sqlite_master
+ WHERE type='table' OR type='index'
+ ORDER BY name;
+ }
+} {i2x i2y t1 t2}
+do_test trans-5.23 {
+ execsql {
+ SELECT * FROM t2;
+ }
+} {1 2 3}
+integrity_check trans-5.23
+
+
+# Try to DROP and CREATE tables and indices with the same name
+# within a transaction. Make sure ROLLBACK works.
+#
+do_test trans-6.1 {
+ execsql2 {
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(p,q,r);
+ ROLLBACK;
+ SELECT * FROM t1;
+ }
+} {a 1 b 2 c 3}
+do_test trans-6.2 {
+ execsql2 {
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(p,q,r);
+ COMMIT;
+ SELECT * FROM t1;
+ }
+} {}
+do_test trans-6.3 {
+ execsql2 {
+ INSERT INTO t1 VALUES(1,2,3);
+ SELECT * FROM t1;
+ }
+} {p 1 q 2 r 3}
+do_test trans-6.4 {
+ execsql2 {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(4,5,6);
+ SELECT * FROM t1;
+ DROP TABLE t1;
+ }
+} {a 4 b 5 c 6}
+do_test trans-6.5 {
+ execsql2 {
+ ROLLBACK;
+ SELECT * FROM t1;
+ }
+} {p 1 q 2 r 3}
+do_test trans-6.6 {
+ execsql2 {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(4,5,6);
+ SELECT * FROM t1;
+ DROP TABLE t1;
+ }
+} {a 4 b 5 c 6}
+do_test trans-6.7 {
+ catchsql {
+ COMMIT;
+ SELECT * FROM t1;
+ }
+} {1 {no such table: t1}}
+
+# Repeat on a table with an automatically generated index.
+#
+do_test trans-6.10 {
+ execsql2 {
+ CREATE TABLE t1(a unique,b,c);
+ INSERT INTO t1 VALUES(1,2,3);
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(p unique,q,r);
+ ROLLBACK;
+ SELECT * FROM t1;
+ }
+} {a 1 b 2 c 3}
+do_test trans-6.11 {
+ execsql2 {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(p unique,q,r);
+ COMMIT;
+ SELECT * FROM t1;
+ }
+} {}
+do_test trans-6.12 {
+ execsql2 {
+ INSERT INTO t1 VALUES(1,2,3);
+ SELECT * FROM t1;
+ }
+} {p 1 q 2 r 3}
+do_test trans-6.13 {
+ execsql2 {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(a unique,b,c);
+ INSERT INTO t1 VALUES(4,5,6);
+ SELECT * FROM t1;
+ DROP TABLE t1;
+ }
+} {a 4 b 5 c 6}
+do_test trans-6.14 {
+ execsql2 {
+ ROLLBACK;
+ SELECT * FROM t1;
+ }
+} {p 1 q 2 r 3}
+do_test trans-6.15 {
+ execsql2 {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(a unique,b,c);
+ INSERT INTO t1 VALUES(4,5,6);
+ SELECT * FROM t1;
+ DROP TABLE t1;
+ }
+} {a 4 b 5 c 6}
+do_test trans-6.16 {
+ catchsql {
+ COMMIT;
+ SELECT * FROM t1;
+ }
+} {1 {no such table: t1}}
+
+do_test trans-6.20 {
+ execsql {
+ CREATE TABLE t1(a integer primary key,b,c);
+ INSERT INTO t1 VALUES(1,-2,-3);
+ INSERT INTO t1 VALUES(4,-5,-6);
+ SELECT * FROM t1;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.21 {
+ execsql {
+ CREATE INDEX i1 ON t1(b);
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.22 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP INDEX i1;
+ SELECT * FROM t1 WHERE b<1;
+ ROLLBACK;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.23 {
+ execsql {
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.24 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ ROLLBACK;
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+
+do_test trans-6.25 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP INDEX i1;
+ CREATE INDEX i1 ON t1(c);
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.26 {
+ execsql {
+ SELECT * FROM t1 WHERE c<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.27 {
+ execsql {
+ ROLLBACK;
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.28 {
+ execsql {
+ SELECT * FROM t1 WHERE c<1;
+ }
+} {1 -2 -3 4 -5 -6}
+
+# The following repeats steps 6.20 through 6.28, but puts a "unique"
+# constraint the first field of the table in order to generate an
+# automatic index.
+#
+do_test trans-6.30 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ CREATE TABLE t1(a int unique,b,c);
+ COMMIT;
+ INSERT INTO t1 VALUES(1,-2,-3);
+ INSERT INTO t1 VALUES(4,-5,-6);
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.31 {
+ execsql {
+ CREATE INDEX i1 ON t1(b);
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.32 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP INDEX i1;
+ SELECT * FROM t1 WHERE b<1;
+ ROLLBACK;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.33 {
+ execsql {
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.34 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP TABLE t1;
+ ROLLBACK;
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+
+do_test trans-6.35 {
+ execsql {
+ BEGIN TRANSACTION;
+ DROP INDEX i1;
+ CREATE INDEX i1 ON t1(c);
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.36 {
+ execsql {
+ SELECT * FROM t1 WHERE c<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.37 {
+ execsql {
+ DROP INDEX i1;
+ SELECT * FROM t1 WHERE c<1;
+ }
+} {1 -2 -3 4 -5 -6}
+do_test trans-6.38 {
+ execsql {
+ ROLLBACK;
+ SELECT * FROM t1 WHERE b<1;
+ }
+} {4 -5 -6 1 -2 -3}
+do_test trans-6.39 {
+ execsql {
+ SELECT * FROM t1 WHERE c<1;
+ }
+} {1 -2 -3 4 -5 -6}
+integrity_check trans-6.40
+
+# Test to make sure rollback restores the database back to its original
+# state.
+#
+do_test trans-7.1 {
+ execsql {BEGIN}
+ for {set i 0} {$i<1000} {incr i} {
+ set r1 [expr {rand()}]
+ set r2 [expr {rand()}]
+ set r3 [expr {rand()}]
+ execsql "INSERT INTO t2 VALUES($r1,$r2,$r3)"
+ }
+ execsql {COMMIT}
+ set ::checksum [execsql {SELECT md5sum(x,y,z) FROM t2}]
+ set ::checksum2 [
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+ ]
+ execsql {SELECT count(*) FROM t2}
+} {1001}
+do_test trans-7.2 {
+ execsql {SELECT md5sum(x,y,z) FROM t2}
+} $checksum
+do_test trans-7.2.1 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.3 {
+ execsql {
+ BEGIN;
+ DELETE FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.4 {
+ execsql {
+ BEGIN;
+ INSERT INTO t2 SELECT * FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.5 {
+ execsql {
+ BEGIN;
+ DELETE FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.6 {
+ execsql {
+ BEGIN;
+ INSERT INTO t2 SELECT * FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.7 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t3 AS SELECT * FROM t2;
+ INSERT INTO t2 SELECT * FROM t3;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.8 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.9 {
+ execsql {
+ BEGIN;
+ CREATE TEMP TABLE t3 AS SELECT * FROM t2;
+ INSERT INTO t2 SELECT * FROM t3;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.10 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.11 {
+ execsql {
+ BEGIN;
+ CREATE TEMP TABLE t3 AS SELECT * FROM t2;
+ INSERT INTO t2 SELECT * FROM t3;
+ DROP INDEX i2x;
+ DROP INDEX i2y;
+ CREATE INDEX i3a ON t3(x);
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.12 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.13 {
+ execsql {
+ BEGIN;
+ DROP TABLE t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.14 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+integrity_check trans-7.15
+
+# Arrange for another process to begin modifying the database but abort
+# and die in the middle of the modification. Then have this process read
+# the database. This process should detect the journal file and roll it
+# back. Verify that this happens correctly.
+#
+set fd [open test.tcl w]
+puts $fd {
+ sqlite db test.db
+ db eval {
+ PRAGMA default_cache_size=20;
+ BEGIN;
+ CREATE TABLE t3 AS SELECT * FROM t2;
+ DELETE FROM t2;
+ }
+ sqlite_abort
+}
+close $fd
+do_test trans-8.1 {
+ catch {exec [info nameofexec] test.tcl}
+ execsql {SELECT md5sum(x,y,z) FROM t2}
+} $checksum
+do_test trans-8.2 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+integrity_check trans-8.3
+
+# In the following sequence of tests, compute the MD5 sum of the content
+# of a table, make lots of modifications to that table, then do a rollback.
+# Verify that after the rollback, the MD5 checksum is unchanged.
+#
+do_test trans-9.1 {
+ execsql {
+ PRAGMA default_cache_size=10;
+ }
+ db close
+ sqlite db test.db
+ execsql {
+ BEGIN;
+ CREATE TABLE t3(x TEXT);
+ INSERT INTO t3 VALUES(randstr(10,400));
+ INSERT INTO t3 VALUES(randstr(10,400));
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3;
+ COMMIT;
+ SELECT count(*) FROM t3;
+ }
+} {1024}
+
+# The following procedure computes a "signature" for table "t3". If
+# T3 changes in any way, the signature should change.
+#
+# This is used to test ROLLBACK. We gather a signature for t3, then
+# make lots of changes to t3, then rollback and take another signature.
+# The two signatures should be the same.
+#
+proc signature {} {
+ return [db eval {SELECT count(*), md5sum(x) FROM t3}]
+}
+
+# Repeat the following group of tests 20 times for quick testing and
+# 40 times for full testing. Each iteration of the test makes table
+# t3 a little larger, and thus takes a little longer, so doing 40 tests
+# is more than 2.0 times slower than doing 20 tests. Considerably more.
+#
+if {[info exists ISQUICK]} {
+ set limit 20
+} else {
+ set limit 40
+}
+
+# Do rollbacks. Make sure the signature does not change.
+#
+for {set i 2} {$i<=$limit} {incr i} {
+ set ::sig [signature]
+ set cnt [lindex $::sig 0]
+ set ::journal_format [expr {($i%3)+1}]
+ if {$i%2==0} {
+ execsql {PRAGMA synchronous=FULL}
+ } else {
+ execsql {PRAGMA synchronous=NORMAL}
+ }
+ do_test trans-9.$i.1-$cnt {
+ execsql {
+ BEGIN;
+ DELETE FROM t3 WHERE random()%10!=0;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ ROLLBACK;
+ }
+ signature
+ } $sig
+ do_test trans-9.$i.2-$cnt {
+ execsql {
+ BEGIN;
+ DELETE FROM t3 WHERE random()%10!=0;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ DELETE FROM t3 WHERE random()%10!=0;
+ INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
+ ROLLBACK;
+ }
+ signature
+ } $sig
+ if {$i<$limit} {
+ do_test trans-9.$i.9-$cnt {
+ execsql {
+ INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0;
+ }
+ } {}
+ }
+ set ::pager_old_format 0
+}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/trigger1.test b/usr/src/cmd/svc/configd/sqlite/test/trigger1.test
new file mode 100644
index 0000000000..44e1091597
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/trigger1.test
@@ -0,0 +1,522 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file tests creating and dropping triggers, and interaction thereof
+# with the database COMMIT/ROLLBACK logic.
+#
+# 1. CREATE and DROP TRIGGER tests
+# trig-1.1: Error if table does not exist
+# trig-1.2: Error if trigger already exists
+# trig-1.3: Created triggers are deleted if the transaction is rolled back
+# trig-1.4: DROP TRIGGER removes trigger
+# trig-1.5: Dropped triggers are restored if the transaction is rolled back
+# trig-1.6: Error if dropped trigger doesn't exist
+# trig-1.7: Dropping the table automatically drops all triggers
+# trig-1.8: A trigger created on a TEMP table is not inserted into sqlite_master
+# trig-1.9: Ensure that we cannot create a trigger on sqlite_master
+# trig-1.10:
+# trig-1.11:
+# trig-1.12: Ensure that INSTEAD OF triggers cannot be created on tables
+# trig-1.13: Ensure that AFTER triggers cannot be created on views
+# trig-1.14: Ensure that BEFORE triggers cannot be created on views
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test trigger1-1.1.2 {
+ catchsql {
+ CREATE TRIGGER trig UPDATE ON no_such_table BEGIN
+ SELECT * from sqlite_master;
+ END;
+ }
+} {1 {no such table: no_such_table}}
+do_test trigger1-1.1.2 {
+ catchsql {
+ CREATE TEMP TRIGGER trig UPDATE ON no_such_table BEGIN
+ SELECT * from sqlite_master;
+ END;
+ }
+} {1 {no such table: no_such_table}}
+
+execsql {
+ CREATE TABLE t1(a);
+}
+execsql {
+ CREATE TRIGGER tr1 INSERT ON t1 BEGIN
+ INSERT INTO t1 values(1);
+ END;
+}
+do_test trigger1-1.2 {
+ catchsql {
+ CREATE TRIGGER tr1 DELETE ON t1 BEGIN
+ SELECT * FROM sqlite_master;
+ END
+ }
+} {1 {trigger tr1 already exists}}
+
+do_test trigger1-1.3 {
+ catchsql {
+ BEGIN;
+ CREATE TRIGGER tr2 INSERT ON t1 BEGIN
+ SELECT * from sqlite_master; END;
+ ROLLBACK;
+ CREATE TRIGGER tr2 INSERT ON t1 BEGIN
+ SELECT * from sqlite_master; END;
+ }
+} {0 {}}
+
+do_test trigger1-1.4 {
+ catchsql {
+ DROP TRIGGER tr1;
+ CREATE TRIGGER tr1 DELETE ON t1 BEGIN
+ SELECT * FROM sqlite_master;
+ END
+ }
+} {0 {}}
+
+do_test trigger1-1.5 {
+ execsql {
+ BEGIN;
+ DROP TRIGGER tr2;
+ ROLLBACK;
+ DROP TRIGGER tr2;
+ }
+} {}
+
+do_test trigger1-1.6 {
+ catchsql {
+ DROP TRIGGER biggles;
+ }
+} {1 {no such trigger: biggles}}
+
+do_test trigger1-1.7 {
+ catchsql {
+ DROP TABLE t1;
+ DROP TRIGGER tr1;
+ }
+} {1 {no such trigger: tr1}}
+
+execsql {
+ CREATE TEMP TABLE temp_table(a);
+}
+do_test trigger1-1.8 {
+ execsql {
+ CREATE TRIGGER temp_trig UPDATE ON temp_table BEGIN
+ SELECT * from sqlite_master;
+ END;
+ SELECT count(*) FROM sqlite_master WHERE name = 'temp_trig';
+ }
+} {0}
+
+do_test trigger1-1.9 {
+ catchsql {
+ CREATE TRIGGER tr1 AFTER UPDATE ON sqlite_master BEGIN
+ SELECT * FROM sqlite_master;
+ END;
+ }
+} {1 {cannot create trigger on system table}}
+
+# Check to make sure that a DELETE statement within the body of
+# a trigger does not mess up the DELETE that caused the trigger to
+# run in the first place.
+#
+do_test trigger1-1.10 {
+ execsql {
+ create table t1(a,b);
+ insert into t1 values(1,'a');
+ insert into t1 values(2,'b');
+ insert into t1 values(3,'c');
+ insert into t1 values(4,'d');
+ create trigger r1 after delete on t1 for each row begin
+ delete from t1 WHERE a=old.a+2;
+ end;
+ delete from t1 where a in (1,3);
+ select * from t1;
+ drop table t1;
+ }
+} {2 b 4 d}
+do_test trigger1-1.11 {
+ execsql {
+ create table t1(a,b);
+ insert into t1 values(1,'a');
+ insert into t1 values(2,'b');
+ insert into t1 values(3,'c');
+ insert into t1 values(4,'d');
+ create trigger r1 after update on t1 for each row begin
+ delete from t1 WHERE a=old.a+2;
+ end;
+ update t1 set b='x-' || b where a in (1,3);
+ select * from t1;
+ drop table t1;
+ }
+} {1 x-a 2 b 4 d}
+
+# Ensure that we cannot create INSTEAD OF triggers on tables
+do_test trigger1-1.12 {
+ catchsql {
+ create table t1(a,b);
+ create trigger t1t instead of update on t1 for each row begin
+ delete from t1 WHERE a=old.a+2;
+ end;
+ }
+} {1 {cannot create INSTEAD OF trigger on table: t1}}
+# Ensure that we cannot create BEFORE triggers on views
+do_test trigger1-1.13 {
+ catchsql {
+ create view v1 as select * from t1;
+ create trigger v1t before update on v1 for each row begin
+ delete from t1 WHERE a=old.a+2;
+ end;
+ }
+} {1 {cannot create BEFORE trigger on view: v1}}
+# Ensure that we cannot create AFTER triggers on views
+do_test trigger1-1.14 {
+ catchsql {
+ drop view v1;
+ create view v1 as select * from t1;
+ create trigger v1t AFTER update on v1 for each row begin
+ delete from t1 WHERE a=old.a+2;
+ end;
+ }
+} {1 {cannot create AFTER trigger on view: v1}}
+
+# Check for memory leaks in the trigger parser
+#
+do_test trigger1-2.1 {
+ catchsql {
+ CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN
+ SELECT * FROM; -- Syntax error
+ END;
+ }
+} {1 {near ";": syntax error}}
+do_test trigger1-2.2 {
+ catchsql {
+ CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN
+ SELECT * FROM t1;
+ SELECT * FROM; -- Syntax error
+ END;
+ }
+} {1 {near ";": syntax error}}
+
+# Create a trigger that refers to a table that might not exist.
+#
+do_test trigger1-3.1 {
+ execsql {
+ CREATE TEMP TABLE t2(x,y);
+ }
+ catchsql {
+ CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN
+ INSERT INTO t2 VALUES(NEW.a,NEW.b);
+ END;
+ }
+} {0 {}}
+do_test trigger-3.2 {
+ catchsql {
+ INSERT INTO t1 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+} {1 {no such table: main.t2}}
+do_test trigger-3.3 {
+ db close
+ set rc [catch {sqlite db test.db} err]
+ if {$rc} {lappend rc $err}
+ set rc
+} {0}
+do_test trigger-3.4 {
+ catchsql {
+ INSERT INTO t1 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+} {1 {no such table: main.t2}}
+do_test trigger-3.5 {
+ catchsql {
+ CREATE TEMP TABLE t2(x,y);
+ INSERT INTO t1 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+} {1 {no such table: main.t2}}
+do_test trigger-3.6 {
+ catchsql {
+ DROP TRIGGER r1;
+ CREATE TEMP TRIGGER r1 AFTER INSERT ON t1 BEGIN
+ INSERT INTO t2 VALUES(NEW.a,NEW.b);
+ END;
+ INSERT INTO t1 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+} {0 {1 2}}
+do_test trigger-3.7 {
+ execsql {
+ DROP TABLE t2;
+ CREATE TABLE t2(x,y);
+ SELECT * FROM t2;
+ }
+} {}
+do_test trigger-3.8 {
+ execsql {
+ INSERT INTO t1 VALUES(3,4);
+ SELECT * FROM t1 UNION ALL SELECT * FROM t2;
+ }
+} {1 2 3 4 3 4}
+do_test trigger-3.9 {
+ db close
+ sqlite db test.db
+ execsql {
+ INSERT INTO t1 VALUES(5,6);
+ SELECT * FROM t1 UNION ALL SELECT * FROM t2;
+ }
+} {1 2 3 4 5 6 3 4}
+
+do_test trigger-4.1 {
+ execsql {
+ CREATE TEMP TRIGGER r1 BEFORE INSERT ON t1 BEGIN
+ INSERT INTO t2 VALUES(NEW.a,NEW.b);
+ END;
+ INSERT INTO t1 VALUES(7,8);
+ SELECT * FROM t2;
+ }
+} {3 4 7 8}
+do_test trigger-4.2 {
+ sqlite db2 test.db
+ execsql {
+ INSERT INTO t1 VALUES(9,10);
+ } db2;
+ db2 close
+ execsql {
+ SELECT * FROM t2;
+ }
+} {3 4 7 8}
+do_test trigger-4.3 {
+ execsql {
+ DROP TABLE t1;
+ SELECT * FROM t2;
+ };
+} {3 4 7 8}
+do_test trigger-4.4 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM t2;
+ };
+} {3 4 7 8}
+
+integrity_check trigger-5.1
+
+# Create a trigger with the same name as a table. Make sure the
+# trigger works. Then drop the trigger. Make sure the table is
+# still there.
+#
+do_test trigger-6.1 {
+ execsql {SELECT type, name FROM sqlite_master}
+} {view v1 table t2}
+do_test trigger-6.2 {
+ execsql {
+ CREATE TRIGGER t2 BEFORE DELETE ON t2 BEGIN
+ SELECT RAISE(ABORT,'deletes are not allows');
+ END;
+ SELECT type, name FROM sqlite_master;
+ }
+} {view v1 table t2 trigger t2}
+do_test trigger-6.3 {
+ catchsql {DELETE FROM t2}
+} {1 {deletes are not allows}}
+do_test trigger-6.4 {
+ execsql {SELECT * FROM t2}
+} {3 4 7 8}
+do_test trigger-6.5 {
+ db close
+ sqlite db test.db
+ execsql {SELECT type, name FROM sqlite_master}
+} {view v1 table t2 trigger t2}
+do_test trigger-6.6 {
+ execsql {
+ DROP TRIGGER t2;
+ SELECT type, name FROM sqlite_master;
+ }
+} {view v1 table t2}
+do_test trigger-6.7 {
+ execsql {SELECT * FROM t2}
+} {3 4 7 8}
+do_test trigger-6.8 {
+ db close
+ sqlite db test.db
+ execsql {SELECT * FROM t2}
+} {3 4 7 8}
+
+integrity_check trigger-7.1
+
+# Check to make sure the name of a trigger can be quoted so that keywords
+# can be used as trigger names. Ticket #468
+#
+do_test trigger-8.1 {
+ execsql {
+ CREATE TRIGGER 'trigger' AFTER INSERT ON t2 BEGIN SELECT 1; END;
+ SELECT name FROM sqlite_master WHERE type='trigger';
+ }
+} {trigger}
+do_test trigger-8.2 {
+ execsql {
+ DROP TRIGGER 'trigger';
+ SELECT name FROM sqlite_master WHERE type='trigger';
+ }
+} {}
+do_test trigger-8.3 {
+ execsql {
+ CREATE TRIGGER "trigger" AFTER INSERT ON t2 BEGIN SELECT 1; END;
+ SELECT name FROM sqlite_master WHERE type='trigger';
+ }
+} {trigger}
+do_test trigger-8.4 {
+ execsql {
+ DROP TRIGGER "trigger";
+ SELECT name FROM sqlite_master WHERE type='trigger';
+ }
+} {}
+do_test trigger-8.5 {
+ execsql {
+ CREATE TRIGGER [trigger] AFTER INSERT ON t2 BEGIN SELECT 1; END;
+ SELECT name FROM sqlite_master WHERE type='trigger';
+ }
+} {trigger}
+do_test trigger-8.6 {
+ execsql {
+ DROP TRIGGER [trigger];
+ SELECT name FROM sqlite_master WHERE type='trigger';
+ }
+} {}
+
+# Make sure REPLACE works inside of triggers.
+#
+do_test trigger-9.1 {
+ execsql {
+ CREATE TABLE t3(a,b);
+ CREATE TABLE t4(x UNIQUE, b);
+ CREATE TRIGGER r34 AFTER INSERT ON t3 BEGIN
+ REPLACE INTO t4 VALUES(new.a,new.b);
+ END;
+ INSERT INTO t3 VALUES(1,2);
+ SELECT * FROM t3 UNION ALL SELECT 99, 99 UNION ALL SELECT * FROM t4;
+ }
+} {1 2 99 99 1 2}
+do_test trigger-9.2 {
+ execsql {
+ INSERT INTO t3 VALUES(1,3);
+ SELECT * FROM t3 UNION ALL SELECT 99, 99 UNION ALL SELECT * FROM t4;
+ }
+} {1 2 1 3 99 99 1 3}
+
+execsql {
+ DROP TABLE t2;
+ DROP TABLE t3;
+ DROP TABLE t4;
+}
+
+# Ticket #764. At one stage TEMP triggers would fail to re-install when the
+# schema was reloaded. The following tests ensure that TEMP triggers are
+# correctly re-installed.
+#
+# Also verify that references within trigger programs are resolved at
+# statement compile time, not trigger installation time. This means, for
+# example, that you can drop and re-create tables referenced by triggers.
+do_test trigger-10.0 {
+ file delete -force test2.db
+ file delete -force test2.db-journal
+ sqlite db2 test2.db
+ execsql {CREATE TABLE t3(a, b, c);} db2
+ db2 close
+ execsql {
+ ATTACH 'test2.db' AS aux;
+ }
+} {}
+do_test trigger-10.1 {
+ execsql {
+ CREATE TABLE t1(a, b, c);
+ CREATE temp TABLE t2(a, b, c);
+ CREATE TABLE insert_log(db, a, b, c);
+ }
+} {}
+do_test trigger-10.2 {
+ execsql {
+ CREATE TEMP TRIGGER trig1 AFTER INSERT ON t1 BEGIN
+ INSERT INTO insert_log VALUES('main', new.a, new.b, new.c);
+ END;
+ CREATE TEMP TRIGGER trig2 AFTER INSERT ON t2 BEGIN
+ INSERT INTO insert_log VALUES('temp', new.a, new.b, new.c);
+ END;
+ CREATE TEMP TRIGGER trig3 AFTER INSERT ON t3 BEGIN
+ INSERT INTO insert_log VALUES('aux', new.a, new.b, new.c);
+ END;
+ }
+} {}
+do_test trigger-10.3 {
+ execsql {
+ INSERT INTO t1 VALUES(1, 2, 3);
+ INSERT INTO t2 VALUES(4, 5, 6);
+ INSERT INTO t3 VALUES(7, 8, 9);
+ }
+} {}
+do_test trigger-10.4 {
+ execsql {
+ SELECT * FROM insert_log;
+ }
+} {main 1 2 3 temp 4 5 6 aux 7 8 9}
+do_test trigger-10.5 {
+ execsql {
+ BEGIN;
+ INSERT INTO t1 VALUES(1, 2, 3);
+ INSERT INTO t2 VALUES(4, 5, 6);
+ INSERT INTO t3 VALUES(7, 8, 9);
+ ROLLBACK;
+ }
+} {}
+do_test trigger-10.6 {
+ execsql {
+ SELECT * FROM insert_log;
+ }
+} {main 1 2 3 temp 4 5 6 aux 7 8 9}
+do_test trigger-10.7 {
+ execsql {
+ DELETE FROM insert_log;
+ INSERT INTO t1 VALUES(11, 12, 13);
+ INSERT INTO t2 VALUES(14, 15, 16);
+ INSERT INTO t3 VALUES(17, 18, 19);
+ }
+} {}
+do_test trigger-10.8 {
+ execsql {
+ SELECT * FROM insert_log;
+ }
+} {main 11 12 13 temp 14 15 16 aux 17 18 19}
+do_test trigger-10.8 {
+# Drop and re-create the insert_log table in a different database. Note
+# that we can change the column names because the trigger programs don't
+# use them explicitly.
+ execsql {
+ DROP TABLE insert_log;
+ CREATE temp TABLE insert_log(db, d, e, f);
+ }
+} {}
+do_test trigger-10.10 {
+ execsql {
+ INSERT INTO t1 VALUES(21, 22, 23);
+ INSERT INTO t2 VALUES(24, 25, 26);
+ INSERT INTO t3 VALUES(27, 28, 29);
+ }
+} {}
+do_test trigger-10.11 {
+ execsql {
+ SELECT * FROM insert_log;
+ }
+} {main 21 22 23 temp 24 25 26 aux 27 28 29}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/trigger2.test b/usr/src/cmd/svc/configd/sqlite/test/trigger2.test
new file mode 100644
index 0000000000..7567f01fcd
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/trigger2.test
@@ -0,0 +1,721 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Regression testing of FOR EACH ROW table triggers
+#
+# 1. Trigger execution order tests.
+# These tests ensure that BEFORE and AFTER triggers are fired at the correct
+# times relative to each other and the triggering statement.
+#
+# trigger2-1.1.*: ON UPDATE trigger execution model.
+# trigger2-1.2.*: DELETE trigger execution model.
+# trigger2-1.3.*: INSERT trigger execution model.
+#
+# 2. Trigger program execution tests.
+# These tests ensure that trigger programs execute correctly (ie. that a
+# trigger program can correctly execute INSERT, UPDATE, DELETE * SELECT
+# statements, and combinations thereof).
+#
+# 3. Selective trigger execution
+# This tests that conditional triggers (ie. UPDATE OF triggers and triggers
+# with WHEN clauses) are fired only fired when they are supposed to be.
+#
+# trigger2-3.1: UPDATE OF triggers
+# trigger2-3.2: WHEN clause
+#
+# 4. Cascaded trigger execution
+# Tests that trigger-programs may cause other triggers to fire. Also that a
+# trigger-program is never executed recursively.
+#
+# trigger2-4.1: Trivial cascading trigger
+# trigger2-4.2: Trivial recursive trigger handling
+#
+# 5. Count changes behaviour.
+# Verify that rows altered by triggers are not included in the return value
+# of the "count changes" interface.
+#
+# 6. ON CONFLICT clause handling
+# trigger2-6.1[a-f]: INSERT statements
+# trigger2-6.2[a-f]: UPDATE statements
+#
+# 7. Triggers on views fire correctly.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# 1.
+set ii 0
+foreach tbl_defn {
+ {CREATE TEMP TABLE tbl (a, b);}
+ {CREATE TABLE tbl (a, b);}
+ {CREATE TABLE tbl (a INTEGER PRIMARY KEY, b);}
+ {CREATE TEMPORARY TABLE tbl (a INTEGER PRIMARY KEY, b);}
+ {CREATE TABLE tbl (a, b PRIMARY KEY);}
+ {CREATE TABLE tbl (a, b); CREATE INDEX tbl_idx ON tbl(b);}
+ {CREATE TEMP TABLE tbl (a, b); CREATE INDEX tbl_idx ON tbl(b);}
+} {
+ incr ii
+ catchsql { DROP INDEX tbl_idx; }
+ catchsql {
+ DROP TABLE rlog;
+ DROP TABLE clog;
+ DROP TABLE tbl;
+ DROP TABLE other_tbl;
+ }
+
+ execsql $tbl_defn
+
+ execsql {
+ INSERT INTO tbl VALUES(1, 2);
+ INSERT INTO tbl VALUES(3, 4);
+
+ CREATE TABLE rlog (idx, old_a, old_b, db_sum_a, db_sum_b, new_a, new_b);
+ CREATE TABLE clog (idx, old_a, old_b, db_sum_a, db_sum_b, new_a, new_b);
+
+ CREATE TRIGGER before_update_row BEFORE UPDATE ON tbl FOR EACH ROW
+ BEGIN
+ INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog),
+ old.a, old.b,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ new.a, new.b);
+ END;
+
+ CREATE TRIGGER after_update_row AFTER UPDATE ON tbl FOR EACH ROW
+ BEGIN
+ INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog),
+ old.a, old.b,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ new.a, new.b);
+ END;
+
+ CREATE TRIGGER conditional_update_row AFTER UPDATE ON tbl FOR EACH ROW
+ WHEN old.a = 1
+ BEGIN
+ INSERT INTO clog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM clog),
+ old.a, old.b,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ new.a, new.b);
+ END;
+ }
+
+ do_test trigger2-1.$ii.1 {
+ execsql {
+ UPDATE tbl SET a = a * 10, b = b * 10;
+ SELECT * FROM rlog ORDER BY idx;
+ SELECT * FROM clog ORDER BY idx;
+ }
+ } [list 1 1 2 4 6 10 20 \
+ 2 1 2 13 24 10 20 \
+ 3 3 4 13 24 30 40 \
+ 4 3 4 40 60 30 40 \
+ 1 1 2 13 24 10 20 ]
+
+ execsql {
+ DELETE FROM rlog;
+ DELETE FROM tbl;
+ INSERT INTO tbl VALUES (100, 100);
+ INSERT INTO tbl VALUES (300, 200);
+ CREATE TRIGGER delete_before_row BEFORE DELETE ON tbl FOR EACH ROW
+ BEGIN
+ INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog),
+ old.a, old.b,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ 0, 0);
+ END;
+
+ CREATE TRIGGER delete_after_row AFTER DELETE ON tbl FOR EACH ROW
+ BEGIN
+ INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog),
+ old.a, old.b,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ 0, 0);
+ END;
+ }
+ do_test trigger2-1.$ii.2 {
+ execsql {
+ DELETE FROM tbl;
+ SELECT * FROM rlog;
+ }
+ } [list 1 100 100 400 300 0 0 \
+ 2 100 100 300 200 0 0 \
+ 3 300 200 300 200 0 0 \
+ 4 300 200 0 0 0 0 ]
+
+ execsql {
+ DELETE FROM rlog;
+ CREATE TRIGGER insert_before_row BEFORE INSERT ON tbl FOR EACH ROW
+ BEGIN
+ INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog),
+ 0, 0,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ new.a, new.b);
+ END;
+
+ CREATE TRIGGER insert_after_row AFTER INSERT ON tbl FOR EACH ROW
+ BEGIN
+ INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog),
+ 0, 0,
+ (SELECT sum(a) FROM tbl), (SELECT sum(b) FROM tbl),
+ new.a, new.b);
+ END;
+ }
+ do_test trigger2-1.$ii.3 {
+ execsql {
+
+ CREATE TABLE other_tbl(a, b);
+ INSERT INTO other_tbl VALUES(1, 2);
+ INSERT INTO other_tbl VALUES(3, 4);
+ -- INSERT INTO tbl SELECT * FROM other_tbl;
+ INSERT INTO tbl VALUES(5, 6);
+ DROP TABLE other_tbl;
+
+ SELECT * FROM rlog;
+ }
+ } [list 1 0 0 0 0 5 6 \
+ 2 0 0 5 6 5 6 ]
+
+ do_test trigger2-1.$ii.4 {
+ execsql {
+ PRAGMA integrity_check;
+ }
+ } {ok}
+}
+catchsql {
+ DROP TABLE rlog;
+ DROP TABLE clog;
+ DROP TABLE tbl;
+ DROP TABLE other_tbl;
+}
+
+# 2.
+set ii 0
+foreach tr_program {
+ {UPDATE tbl SET b = old.b;}
+ {INSERT INTO log VALUES(new.c, 2, 3);}
+ {DELETE FROM log WHERE a = 1;}
+ {INSERT INTO tbl VALUES(500, new.b * 10, 700);
+ UPDATE tbl SET c = old.c;
+ DELETE FROM log;}
+ {INSERT INTO log select * from tbl;}
+} {
+ foreach test_varset [ list \
+ {
+ set statement {UPDATE tbl SET c = 10 WHERE a = 1;}
+ set prep {INSERT INTO tbl VALUES(1, 2, 3);}
+ set newC 10
+ set newB 2
+ set newA 1
+ set oldA 1
+ set oldB 2
+ set oldC 3
+ } \
+ {
+ set statement {DELETE FROM tbl WHERE a = 1;}
+ set prep {INSERT INTO tbl VALUES(1, 2, 3);}
+ set oldA 1
+ set oldB 2
+ set oldC 3
+ } \
+ {
+ set statement {INSERT INTO tbl VALUES(1, 2, 3);}
+ set newA 1
+ set newB 2
+ set newC 3
+ }
+ ] \
+ {
+ set statement {}
+ set prep {}
+ set newA {''}
+ set newB {''}
+ set newC {''}
+ set oldA {''}
+ set oldB {''}
+ set oldC {''}
+
+ incr ii
+
+ eval $test_varset
+
+ set statement_type [string range $statement 0 5]
+ set tr_program_fixed $tr_program
+ if {$statement_type == "DELETE"} {
+ regsub -all new\.a $tr_program_fixed {''} tr_program_fixed
+ regsub -all new\.b $tr_program_fixed {''} tr_program_fixed
+ regsub -all new\.c $tr_program_fixed {''} tr_program_fixed
+ }
+ if {$statement_type == "INSERT"} {
+ regsub -all old\.a $tr_program_fixed {''} tr_program_fixed
+ regsub -all old\.b $tr_program_fixed {''} tr_program_fixed
+ regsub -all old\.c $tr_program_fixed {''} tr_program_fixed
+ }
+
+
+ set tr_program_cooked $tr_program
+ regsub -all new\.a $tr_program_cooked $newA tr_program_cooked
+ regsub -all new\.b $tr_program_cooked $newB tr_program_cooked
+ regsub -all new\.c $tr_program_cooked $newC tr_program_cooked
+ regsub -all old\.a $tr_program_cooked $oldA tr_program_cooked
+ regsub -all old\.b $tr_program_cooked $oldB tr_program_cooked
+ regsub -all old\.c $tr_program_cooked $oldC tr_program_cooked
+
+ catchsql {
+ DROP TABLE tbl;
+ DROP TABLE log;
+ }
+
+ execsql {
+ CREATE TABLE tbl(a PRIMARY KEY, b, c);
+ CREATE TABLE log(a, b, c);
+ }
+
+ set query {SELECT * FROM tbl; SELECT * FROM log;}
+ set prep "$prep; INSERT INTO log VALUES(1, 2, 3);\
+ INSERT INTO log VALUES(10, 20, 30);"
+
+# Check execution of BEFORE programs:
+
+ set before_data [ execsql "$prep $tr_program_cooked $statement $query" ]
+
+ execsql "DELETE FROM tbl; DELETE FROM log; $prep";
+ execsql "CREATE TRIGGER the_trigger BEFORE [string range $statement 0 6]\
+ ON tbl BEGIN $tr_program_fixed END;"
+
+ do_test trigger2-2.$ii-before "execsql {$statement $query}" $before_data
+
+ execsql "DROP TRIGGER the_trigger;"
+ execsql "DELETE FROM tbl; DELETE FROM log;"
+
+# Check execution of AFTER programs
+ set after_data [ execsql "$prep $statement $tr_program_cooked $query" ]
+
+ execsql "DELETE FROM tbl; DELETE FROM log; $prep";
+ execsql "CREATE TRIGGER the_trigger AFTER [string range $statement 0 6]\
+ ON tbl BEGIN $tr_program_fixed END;"
+
+ do_test trigger2-2.$ii-after "execsql {$statement $query}" $after_data
+ execsql "DROP TRIGGER the_trigger;"
+
+ do_test trigger2-2.$ii-integrity {
+ execsql {
+ PRAGMA integrity_check;
+ }
+ } {ok}
+
+ }
+}
+catchsql {
+ DROP TABLE tbl;
+ DROP TABLE log;
+}
+
+# 3.
+
+# trigger2-3.1: UPDATE OF triggers
+execsql {
+ CREATE TABLE tbl (a, b, c, d);
+ CREATE TABLE log (a);
+ INSERT INTO log VALUES (0);
+ INSERT INTO tbl VALUES (0, 0, 0, 0);
+ INSERT INTO tbl VALUES (1, 0, 0, 0);
+ CREATE TRIGGER tbl_after_update_cd BEFORE UPDATE OF c, d ON tbl
+ BEGIN
+ UPDATE log SET a = a + 1;
+ END;
+}
+do_test trigger2-3.1 {
+ execsql {
+ UPDATE tbl SET b = 1, c = 10; -- 2
+ UPDATE tbl SET b = 10; -- 0
+ UPDATE tbl SET d = 4 WHERE a = 0; --1
+ UPDATE tbl SET a = 4, b = 10; --0
+ SELECT * FROM log;
+ }
+} {3}
+execsql {
+ DROP TABLE tbl;
+ DROP TABLE log;
+}
+
+# trigger2-3.2: WHEN clause
+set when_triggers [ list \
+ {t1 BEFORE INSERT ON tbl WHEN new.a > 20} \
+ {t2 BEFORE INSERT ON tbl WHEN (SELECT count(*) FROM tbl) = 0} ]
+
+execsql {
+ CREATE TABLE tbl (a, b, c, d);
+ CREATE TABLE log (a);
+ INSERT INTO log VALUES (0);
+}
+
+foreach trig $when_triggers {
+ execsql "CREATE TRIGGER $trig BEGIN UPDATE log set a = a + 1; END;"
+}
+
+do_test trigger2-3.2 {
+ execsql {
+
+ INSERT INTO tbl VALUES(0, 0, 0, 0); -- 1
+ SELECT * FROM log;
+ UPDATE log SET a = 0;
+
+ INSERT INTO tbl VALUES(0, 0, 0, 0); -- 0
+ SELECT * FROM log;
+ UPDATE log SET a = 0;
+
+ INSERT INTO tbl VALUES(200, 0, 0, 0); -- 1
+ SELECT * FROM log;
+ UPDATE log SET a = 0;
+ }
+} {1 0 1}
+execsql {
+ DROP TABLE tbl;
+ DROP TABLE log;
+}
+do_test trigger2-3.3 {
+ execsql {
+ PRAGMA integrity_check;
+ }
+} {ok}
+
+# Simple cascaded trigger
+execsql {
+ CREATE TABLE tblA(a, b);
+ CREATE TABLE tblB(a, b);
+ CREATE TABLE tblC(a, b);
+
+ CREATE TRIGGER tr1 BEFORE INSERT ON tblA BEGIN
+ INSERT INTO tblB values(new.a, new.b);
+ END;
+
+ CREATE TRIGGER tr2 BEFORE INSERT ON tblB BEGIN
+ INSERT INTO tblC values(new.a, new.b);
+ END;
+}
+do_test trigger2-4.1 {
+ execsql {
+ INSERT INTO tblA values(1, 2);
+ SELECT * FROM tblA;
+ SELECT * FROM tblB;
+ SELECT * FROM tblC;
+ }
+} {1 2 1 2 1 2}
+execsql {
+ DROP TABLE tblA;
+ DROP TABLE tblB;
+ DROP TABLE tblC;
+}
+
+# Simple recursive trigger
+execsql {
+ CREATE TABLE tbl(a, b, c);
+ CREATE TRIGGER tbl_trig BEFORE INSERT ON tbl
+ BEGIN
+ INSERT INTO tbl VALUES (new.a, new.b, new.c);
+ END;
+}
+do_test trigger2-4.2 {
+ execsql {
+ INSERT INTO tbl VALUES (1, 2, 3);
+ select * from tbl;
+ }
+} {1 2 3 1 2 3}
+execsql {
+ DROP TABLE tbl;
+}
+
+# 5.
+execsql {
+ CREATE TABLE tbl(a, b, c);
+ CREATE TRIGGER tbl_trig BEFORE INSERT ON tbl
+ BEGIN
+ INSERT INTO tbl VALUES (1, 2, 3);
+ INSERT INTO tbl VALUES (2, 2, 3);
+ UPDATE tbl set b = 10 WHERE a = 1;
+ DELETE FROM tbl WHERE a = 1;
+ DELETE FROM tbl;
+ END;
+}
+do_test trigger2-5 {
+ execsql {
+ INSERT INTO tbl VALUES(100, 200, 300);
+ }
+ db changes
+} {1}
+execsql {
+ DROP TABLE tbl;
+}
+
+# Handling of ON CONFLICT by INSERT statements inside triggers
+execsql {
+ CREATE TABLE tbl (a primary key, b, c);
+ CREATE TRIGGER ai_tbl AFTER INSERT ON tbl BEGIN
+ INSERT OR IGNORE INTO tbl values (new.a, 0, 0);
+ END;
+}
+do_test trigger2-6.1a {
+ execsql {
+ BEGIN;
+ INSERT INTO tbl values (1, 2, 3);
+ SELECT * from tbl;
+ }
+} {1 2 3}
+do_test trigger2-6.1b {
+ catchsql {
+ INSERT OR ABORT INTO tbl values (2, 2, 3);
+ }
+} {1 {column a is not unique}}
+do_test trigger2-6.1c {
+ execsql {
+ SELECT * from tbl;
+ }
+} {1 2 3}
+do_test trigger2-6.1d {
+ catchsql {
+ INSERT OR FAIL INTO tbl values (2, 2, 3);
+ }
+} {1 {column a is not unique}}
+do_test trigger2-6.1e {
+ execsql {
+ SELECT * from tbl;
+ }
+} {1 2 3 2 2 3}
+do_test trigger2-6.1f {
+ execsql {
+ INSERT OR REPLACE INTO tbl values (2, 2, 3);
+ SELECT * from tbl;
+ }
+} {1 2 3 2 0 0}
+do_test trigger2-6.1g {
+ catchsql {
+ INSERT OR ROLLBACK INTO tbl values (3, 2, 3);
+ }
+} {1 {column a is not unique}}
+do_test trigger2-6.1h {
+ execsql {
+ SELECT * from tbl;
+ }
+} {}
+execsql {DELETE FROM tbl}
+
+
+# Handling of ON CONFLICT by UPDATE statements inside triggers
+execsql {
+ INSERT INTO tbl values (4, 2, 3);
+ INSERT INTO tbl values (6, 3, 4);
+ CREATE TRIGGER au_tbl AFTER UPDATE ON tbl BEGIN
+ UPDATE OR IGNORE tbl SET a = new.a, c = 10;
+ END;
+}
+do_test trigger2-6.2a {
+ execsql {
+ BEGIN;
+ UPDATE tbl SET a = 1 WHERE a = 4;
+ SELECT * from tbl;
+ }
+} {1 2 10 6 3 4}
+do_test trigger2-6.2b {
+ catchsql {
+ UPDATE OR ABORT tbl SET a = 4 WHERE a = 1;
+ }
+} {1 {column a is not unique}}
+do_test trigger2-6.2c {
+ execsql {
+ SELECT * from tbl;
+ }
+} {1 2 10 6 3 4}
+do_test trigger2-6.2d {
+ catchsql {
+ UPDATE OR FAIL tbl SET a = 4 WHERE a = 1;
+ }
+} {1 {column a is not unique}}
+do_test trigger2-6.2e {
+ execsql {
+ SELECT * from tbl;
+ }
+} {4 2 10 6 3 4}
+do_test trigger2-6.2f.1 {
+ execsql {
+ UPDATE OR REPLACE tbl SET a = 1 WHERE a = 4;
+ SELECT * from tbl;
+ }
+} {1 3 10}
+do_test trigger2-6.2f.2 {
+ execsql {
+ INSERT INTO tbl VALUES (2, 3, 4);
+ SELECT * FROM tbl;
+ }
+} {1 3 10 2 3 4}
+do_test trigger2-6.2g {
+ catchsql {
+ UPDATE OR ROLLBACK tbl SET a = 4 WHERE a = 1;
+ }
+} {1 {column a is not unique}}
+do_test trigger2-6.2h {
+ execsql {
+ SELECT * from tbl;
+ }
+} {4 2 3 6 3 4}
+execsql {
+ DROP TABLE tbl;
+}
+
+# 7. Triggers on views
+do_test trigger2-7.1 {
+ execsql {
+ CREATE TABLE ab(a, b);
+ CREATE TABLE cd(c, d);
+ INSERT INTO ab VALUES (1, 2);
+ INSERT INTO ab VALUES (0, 0);
+ INSERT INTO cd VALUES (3, 4);
+
+ CREATE TABLE tlog(ii INTEGER PRIMARY KEY,
+ olda, oldb, oldc, oldd, newa, newb, newc, newd);
+
+ CREATE VIEW abcd AS SELECT a, b, c, d FROM ab, cd;
+
+ CREATE TRIGGER before_update INSTEAD OF UPDATE ON abcd BEGIN
+ INSERT INTO tlog VALUES(NULL,
+ old.a, old.b, old.c, old.d, new.a, new.b, new.c, new.d);
+ END;
+ CREATE TRIGGER after_update INSTEAD OF UPDATE ON abcd BEGIN
+ INSERT INTO tlog VALUES(NULL,
+ old.a, old.b, old.c, old.d, new.a, new.b, new.c, new.d);
+ END;
+
+ CREATE TRIGGER before_delete INSTEAD OF DELETE ON abcd BEGIN
+ INSERT INTO tlog VALUES(NULL,
+ old.a, old.b, old.c, old.d, 0, 0, 0, 0);
+ END;
+ CREATE TRIGGER after_delete INSTEAD OF DELETE ON abcd BEGIN
+ INSERT INTO tlog VALUES(NULL,
+ old.a, old.b, old.c, old.d, 0, 0, 0, 0);
+ END;
+
+ CREATE TRIGGER before_insert INSTEAD OF INSERT ON abcd BEGIN
+ INSERT INTO tlog VALUES(NULL,
+ 0, 0, 0, 0, new.a, new.b, new.c, new.d);
+ END;
+ CREATE TRIGGER after_insert INSTEAD OF INSERT ON abcd BEGIN
+ INSERT INTO tlog VALUES(NULL,
+ 0, 0, 0, 0, new.a, new.b, new.c, new.d);
+ END;
+ }
+} {};
+
+do_test trigger2-7.2 {
+ execsql {
+ UPDATE abcd SET a = 100, b = 5*5 WHERE a = 1;
+ DELETE FROM abcd WHERE a = 1;
+ INSERT INTO abcd VALUES(10, 20, 30, 40);
+ SELECT * FROM tlog;
+ }
+} [ list 1 1 2 3 4 100 25 3 4 \
+ 2 1 2 3 4 100 25 3 4 \
+ 3 1 2 3 4 0 0 0 0 \
+ 4 1 2 3 4 0 0 0 0 \
+ 5 0 0 0 0 10 20 30 40 \
+ 6 0 0 0 0 10 20 30 40 ]
+
+do_test trigger2-7.3 {
+ execsql {
+ DELETE FROM tlog;
+ INSERT INTO abcd VALUES(10, 20, 30, 40);
+ UPDATE abcd SET a = 100, b = 5*5 WHERE a = 1;
+ DELETE FROM abcd WHERE a = 1;
+ SELECT * FROM tlog;
+ }
+} [ list \
+ 1 0 0 0 0 10 20 30 40 \
+ 2 0 0 0 0 10 20 30 40 \
+ 3 1 2 3 4 100 25 3 4 \
+ 4 1 2 3 4 100 25 3 4 \
+ 5 1 2 3 4 0 0 0 0 \
+ 6 1 2 3 4 0 0 0 0 \
+]
+do_test trigger2-7.4 {
+ execsql {
+ DELETE FROM tlog;
+ DELETE FROM abcd WHERE a = 1;
+ INSERT INTO abcd VALUES(10, 20, 30, 40);
+ UPDATE abcd SET a = 100, b = 5*5 WHERE a = 1;
+ SELECT * FROM tlog;
+ }
+} [ list \
+ 1 1 2 3 4 0 0 0 0 \
+ 2 1 2 3 4 0 0 0 0 \
+ 3 0 0 0 0 10 20 30 40 \
+ 4 0 0 0 0 10 20 30 40 \
+ 5 1 2 3 4 100 25 3 4 \
+ 6 1 2 3 4 100 25 3 4 \
+]
+
+do_test trigger2-8.1 {
+ execsql {
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(1,2,3);
+ CREATE VIEW v1 AS
+ SELECT a+b AS x, b+c AS y, a+c AS z FROM t1;
+ SELECT * FROM v1;
+ }
+} {3 5 4}
+do_test trigger2-8.2 {
+ execsql {
+ CREATE TABLE v1log(a,b,c,d,e,f);
+ CREATE TRIGGER r1 INSTEAD OF DELETE ON v1 BEGIN
+ INSERT INTO v1log VALUES(OLD.x,NULL,OLD.y,NULL,OLD.z,NULL);
+ END;
+ DELETE FROM v1 WHERE x=1;
+ SELECT * FROM v1log;
+ }
+} {}
+do_test trigger2-8.3 {
+ execsql {
+ DELETE FROM v1 WHERE x=3;
+ SELECT * FROM v1log;
+ }
+} {3 {} 5 {} 4 {}}
+do_test trigger2-8.4 {
+ execsql {
+ INSERT INTO t1 VALUES(4,5,6);
+ DELETE FROM v1log;
+ DELETE FROM v1 WHERE y=11;
+ SELECT * FROM v1log;
+ }
+} {9 {} 11 {} 10 {}}
+do_test trigger2-8.5 {
+ execsql {
+ CREATE TRIGGER r2 INSTEAD OF INSERT ON v1 BEGIN
+ INSERT INTO v1log VALUES(NULL,NEW.x,NULL,NEW.y,NULL,NEW.z);
+ END;
+ DELETE FROM v1log;
+ INSERT INTO v1 VALUES(1,2,3);
+ SELECT * FROM v1log;
+ }
+} {{} 1 {} 2 {} 3}
+do_test trigger2-8.6 {
+ execsql {
+ CREATE TRIGGER r3 INSTEAD OF UPDATE ON v1 BEGIN
+ INSERT INTO v1log VALUES(OLD.x,NEW.x,OLD.y,NEW.y,OLD.z,NEW.z);
+ END;
+ DELETE FROM v1log;
+ UPDATE v1 SET x=x+100, y=y+200, z=z+300;
+ SELECT * FROM v1log;
+ }
+} {3 103 5 205 4 304 9 109 11 211 10 310}
+
+do_test trigger2-9.9 {
+ execsql {PRAGMA integrity_check}
+} {ok}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/trigger3.test b/usr/src/cmd/svc/configd/sqlite/test/trigger3.test
new file mode 100644
index 0000000000..bf4101600b
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/trigger3.test
@@ -0,0 +1,169 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file tests the RAISE() function.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Test that we can cause ROLLBACK, FAIL and ABORT correctly
+# catchsql { DROP TABLE tbl; }
+catchsql { CREATE TABLE tbl (a, b, c) }
+
+execsql {
+ CREATE TRIGGER before_tbl_insert BEFORE INSERT ON tbl BEGIN SELECT CASE
+ WHEN (new.a = 4) THEN RAISE(IGNORE) END;
+ END;
+
+ CREATE TRIGGER after_tbl_insert AFTER INSERT ON tbl BEGIN SELECT CASE
+ WHEN (new.a = 1) THEN RAISE(ABORT, 'Trigger abort')
+ WHEN (new.a = 2) THEN RAISE(FAIL, 'Trigger fail')
+ WHEN (new.a = 3) THEN RAISE(ROLLBACK, 'Trigger rollback') END;
+ END;
+}
+# ABORT
+do_test trigger3-1.1 {
+ catchsql {
+ BEGIN;
+ INSERT INTO tbl VALUES (5, 5, 6);
+ INSERT INTO tbl VALUES (1, 5, 6);
+ }
+} {1 {Trigger abort}}
+do_test trigger3-1.2 {
+ execsql {
+ SELECT * FROM tbl;
+ ROLLBACK;
+ }
+} {5 5 6}
+do_test trigger3-1.3 {
+ execsql {SELECT * FROM tbl}
+} {}
+
+# FAIL
+do_test trigger3-2.1 {
+ catchsql {
+ BEGIN;
+ INSERT INTO tbl VALUES (5, 5, 6);
+ INSERT INTO tbl VALUES (2, 5, 6);
+ }
+} {1 {Trigger fail}}
+do_test trigger3-2.2 {
+ execsql {
+ SELECT * FROM tbl;
+ ROLLBACK;
+ }
+} {5 5 6 2 5 6}
+# ROLLBACK
+do_test trigger3-3.1 {
+ catchsql {
+ BEGIN;
+ INSERT INTO tbl VALUES (5, 5, 6);
+ INSERT INTO tbl VALUES (3, 5, 6);
+ }
+} {1 {Trigger rollback}}
+do_test trigger3-3.2 {
+ execsql {
+ SELECT * FROM tbl;
+ }
+} {}
+# IGNORE
+do_test trigger3-4.1 {
+ catchsql {
+ BEGIN;
+ INSERT INTO tbl VALUES (5, 5, 6);
+ INSERT INTO tbl VALUES (4, 5, 6);
+ }
+} {0 {}}
+do_test trigger3-4.2 {
+ execsql {
+ SELECT * FROM tbl;
+ ROLLBACK;
+ }
+} {5 5 6}
+
+# Check that we can also do RAISE(IGNORE) for UPDATE and DELETE
+execsql {DROP TABLE tbl;}
+execsql {CREATE TABLE tbl (a, b, c);}
+execsql {INSERT INTO tbl VALUES(1, 2, 3);}
+execsql {INSERT INTO tbl VALUES(4, 5, 6);}
+execsql {
+ CREATE TRIGGER before_tbl_update BEFORE UPDATE ON tbl BEGIN
+ SELECT CASE WHEN (old.a = 1) THEN RAISE(IGNORE) END;
+ END;
+
+ CREATE TRIGGER before_tbl_delete BEFORE DELETE ON tbl BEGIN
+ SELECT CASE WHEN (old.a = 1) THEN RAISE(IGNORE) END;
+ END;
+}
+do_test trigger3-5.1 {
+ execsql {
+ UPDATE tbl SET c = 10;
+ SELECT * FROM tbl;
+ }
+} {1 2 3 4 5 10}
+do_test trigger3-5.2 {
+ execsql {
+ DELETE FROM tbl;
+ SELECT * FROM tbl;
+ }
+} {1 2 3}
+
+# Check that RAISE(IGNORE) works correctly for nested triggers:
+execsql {CREATE TABLE tbl2(a, b, c)}
+execsql {
+ CREATE TRIGGER after_tbl2_insert AFTER INSERT ON tbl2 BEGIN
+ UPDATE tbl SET c = 10;
+ INSERT INTO tbl2 VALUES (new.a, new.b, new.c);
+ END;
+}
+do_test trigger3-6 {
+ execsql {
+ INSERT INTO tbl2 VALUES (1, 2, 3);
+ SELECT * FROM tbl2;
+ SELECT * FROM tbl;
+ }
+} {1 2 3 1 2 3 1 2 3}
+
+# Check that things also work for view-triggers
+execsql {CREATE VIEW tbl_view AS SELECT * FROM tbl}
+execsql {
+ CREATE TRIGGER tbl_view_insert INSTEAD OF INSERT ON tbl_view BEGIN
+ SELECT CASE WHEN (new.a = 1) THEN RAISE(ROLLBACK, 'View rollback')
+ WHEN (new.a = 2) THEN RAISE(IGNORE)
+ WHEN (new.a = 3) THEN RAISE(ABORT, 'View abort') END;
+ END;
+}
+
+do_test trigger3-7.1 {
+ catchsql {
+ INSERT INTO tbl_view VALUES(1, 2, 3);
+ }
+} {1 {View rollback}}
+do_test trigger3-7.2 {
+ catchsql {
+ INSERT INTO tbl_view VALUES(2, 2, 3);
+ }
+} {0 {}}
+do_test trigger3-7.3 {
+ catchsql {
+ INSERT INTO tbl_view VALUES(3, 2, 3);
+ }
+} {1 {View abort}}
+
+integrity_check trigger3-8.1
+
+catchsql { DROP TABLE tbl; }
+catchsql { DROP TABLE tbl2; }
+catchsql { DROP VIEW tbl_view; }
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/trigger4.test b/usr/src/cmd/svc/configd/sqlite/test/trigger4.test
new file mode 100644
index 0000000000..9f6301bee1
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/trigger4.test
@@ -0,0 +1,130 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file tests the triggers of views.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test trigger4-1.1 {
+ execsql {
+ create table test1(id integer primary key,a);
+ create table test2(id integer,b);
+ create view test as
+ select test1.id as id,a as a,b as b
+ from test1 join test2 on test2.id = test1.id;
+ create trigger I_test instead of insert on test
+ begin
+ insert into test1 (id,a) values (NEW.id,NEW.a);
+ insert into test2 (id,b) values (NEW.id,NEW.b);
+ end;
+ insert into test values(1,2,3);
+ select * from test1;
+ }
+} {1 2}
+do_test trigger4-1.2 {
+ execsql {
+ select * from test2;
+ }
+} {1 3}
+do_test trigger4-1.3 {
+ db close
+ sqlite db test.db
+ execsql {
+ insert into test values(4,5,6);
+ select * from test1;
+ }
+} {1 2 4 5}
+do_test trigger4-1.4 {
+ execsql {
+ select * from test2;
+ }
+} {1 3 4 6}
+
+do_test trigger4-2.1 {
+ execsql {
+ create trigger U_test instead of update on test
+ begin
+ update test1 set a=NEW.a where id=NEW.id;
+ update test2 set b=NEW.b where id=NEW.id;
+ end;
+ update test set a=22 where id=1;
+ select * from test1;
+ }
+} {1 22 4 5}
+do_test trigger4-2.2 {
+ execsql {
+ select * from test2;
+ }
+} {1 3 4 6}
+do_test trigger4-2.3 {
+ db close
+ sqlite db test.db
+ execsql {
+ update test set b=66 where id=4;
+ select * from test1;
+ }
+} {1 22 4 5}
+do_test trigger4-2.4 {
+ execsql {
+ select * from test2;
+ }
+} {1 3 4 66}
+
+do_test trigger4-3.1 {
+ catchsql {
+ drop table test2;
+ insert into test values(7,8,9);
+ }
+} {1 {no such table: main.test2}}
+do_test trigger4-3.2 {
+ db close
+ sqlite db test.db
+ catchsql {
+ insert into test values(7,8,9);
+ }
+} {1 {no such table: main.test2}}
+do_test trigger4-3.3 {
+ catchsql {
+ update test set a=222 where id=1;
+ }
+} {1 {no such table: main.test2}}
+do_test trigger4-3.4 {
+ execsql {
+ select * from test1;
+ }
+} {1 22 4 5}
+do_test trigger4-3.5 {
+ execsql {
+ create table test2(id,b);
+ insert into test values(7,8,9);
+ select * from test1;
+ }
+} {1 22 4 5 7 8}
+do_test trigger4-3.6 {
+ execsql {
+ select * from test2;
+ }
+} {7 9}
+do_test trigger4-3.7 {
+ db close
+ sqlite db test.db
+ execsql {
+ update test set b=99 where id=7;
+ select * from test2;
+ }
+} {7 99}
+
+integrity_check trigger4-4.1
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/unique.test b/usr/src/cmd/svc/configd/sqlite/test/unique.test
new file mode 100644
index 0000000000..9b6977bf71
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/unique.test
@@ -0,0 +1,235 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 27
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the CREATE UNIQUE INDEX statement,
+# and primary keys, and the UNIQUE constraint on table columns
+#
+# $Id: unique.test,v 1.7 2003/08/05 13:13:39 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Try to create a table with two primary keys.
+# (This is allowed in SQLite even that it is not valid SQL)
+#
+do_test unique-1.1 {
+ catchsql {
+ CREATE TABLE t1(
+ a int PRIMARY KEY,
+ b int PRIMARY KEY,
+ c text
+ );
+ }
+} {1 {table "t1" has more than one primary key}}
+do_test unique-1.1b {
+ catchsql {
+ CREATE TABLE t1(
+ a int PRIMARY KEY,
+ b int UNIQUE,
+ c text
+ );
+ }
+} {0 {}}
+do_test unique-1.2 {
+ catchsql {
+ INSERT INTO t1(a,b,c) VALUES(1,2,3)
+ }
+} {0 {}}
+do_test unique-1.3 {
+ catchsql {
+ INSERT INTO t1(a,b,c) VALUES(1,3,4)
+ }
+} {1 {column a is not unique}}
+do_test unique-1.4 {
+ execsql {
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 2 3}
+do_test unique-1.5 {
+ catchsql {
+ INSERT INTO t1(a,b,c) VALUES(3,2,4)
+ }
+} {1 {column b is not unique}}
+do_test unique-1.6 {
+ execsql {
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 2 3}
+do_test unique-1.7 {
+ catchsql {
+ INSERT INTO t1(a,b,c) VALUES(3,4,5)
+ }
+} {0 {}}
+do_test unique-1.8 {
+ execsql {
+ SELECT * FROM t1 ORDER BY a;
+ }
+} {1 2 3 3 4 5}
+integrity_check unique-1.9
+
+do_test unique-2.0 {
+ execsql {
+ DROP TABLE t1;
+ CREATE TABLE t2(a int, b int);
+ INSERT INTO t2(a,b) VALUES(1,2);
+ INSERT INTO t2(a,b) VALUES(3,4);
+ SELECT * FROM t2 ORDER BY a;
+ }
+} {1 2 3 4}
+do_test unique-2.1 {
+ catchsql {
+ CREATE UNIQUE INDEX i2 ON t2(a)
+ }
+} {0 {}}
+do_test unique-2.2 {
+ catchsql {
+ SELECT * FROM t2 ORDER BY a
+ }
+} {0 {1 2 3 4}}
+do_test unique-2.3 {
+ catchsql {
+ INSERT INTO t2 VALUES(1,5);
+ }
+} {1 {column a is not unique}}
+do_test unique-2.4 {
+ catchsql {
+ SELECT * FROM t2 ORDER BY a
+ }
+} {0 {1 2 3 4}}
+do_test unique-2.5 {
+ catchsql {
+ DROP INDEX i2;
+ SELECT * FROM t2 ORDER BY a;
+ }
+} {0 {1 2 3 4}}
+do_test unique-2.6 {
+ catchsql {
+ INSERT INTO t2 VALUES(1,5)
+ }
+} {0 {}}
+do_test unique-2.7 {
+ catchsql {
+ SELECT * FROM t2 ORDER BY a, b;
+ }
+} {0 {1 2 1 5 3 4}}
+do_test unique-2.8 {
+ catchsql {
+ CREATE UNIQUE INDEX i2 ON t2(a);
+ }
+} {1 {indexed columns are not unique}}
+do_test unique-2.9 {
+ catchsql {
+ CREATE INDEX i2 ON t2(a);
+ }
+} {0 {}}
+integrity_check unique-2.10
+
+# Test the UNIQUE keyword as used on two or more fields.
+#
+do_test unique-3.1 {
+ catchsql {
+ CREATE TABLE t3(
+ a int,
+ b int,
+ c int,
+ d int,
+ unique(a,c,d)
+ );
+ }
+} {0 {}}
+do_test unique-3.2 {
+ catchsql {
+ INSERT INTO t3(a,b,c,d) VALUES(1,2,3,4);
+ SELECT * FROM t3 ORDER BY a,b,c,d;
+ }
+} {0 {1 2 3 4}}
+do_test unique-3.3 {
+ catchsql {
+ INSERT INTO t3(a,b,c,d) VALUES(1,2,3,5);
+ SELECT * FROM t3 ORDER BY a,b,c,d;
+ }
+} {0 {1 2 3 4 1 2 3 5}}
+do_test unique-3.4 {
+ catchsql {
+ INSERT INTO t3(a,b,c,d) VALUES(1,4,3,5);
+ SELECT * FROM t3 ORDER BY a,b,c,d;
+ }
+} {1 {columns a, c, d are not unique}}
+integrity_check unique-3.5
+
+# Make sure NULLs are distinct as far as the UNIQUE tests are
+# concerned.
+#
+do_test unique-4.1 {
+ execsql {
+ CREATE TABLE t4(a UNIQUE, b, c, UNIQUE(b,c));
+ INSERT INTO t4 VALUES(1,2,3);
+ INSERT INTO t4 VALUES(NULL, 2, NULL);
+ SELECT * FROM t4;
+ }
+} {1 2 3 {} 2 {}}
+do_test unique-4.2 {
+ catchsql {
+ INSERT INTO t4 VALUES(NULL, 3, 4);
+ }
+} {0 {}}
+do_test unique-4.3 {
+ execsql {
+ SELECT * FROM t4
+ }
+} {1 2 3 {} 2 {} {} 3 4}
+do_test unique-4.4 {
+ catchsql {
+ INSERT INTO t4 VALUES(2, 2, NULL);
+ }
+} {0 {}}
+do_test unique-4.5 {
+ execsql {
+ SELECT * FROM t4
+ }
+} {1 2 3 {} 2 {} {} 3 4 2 2 {}}
+integrity_check unique-4.6
+
+# Test the error message generation logic. In particular, make sure we
+# do not overflow the static buffer used to generate the error message.
+#
+do_test unique-5.1 {
+ execsql {
+ CREATE TABLE t5(
+ first_column_with_long_name,
+ second_column_with_long_name,
+ third_column_with_long_name,
+ fourth_column_with_long_name,
+ fifth_column_with_long_name,
+ sixth_column_with_long_name,
+ UNIQUE(
+ first_column_with_long_name,
+ second_column_with_long_name,
+ third_column_with_long_name,
+ fourth_column_with_long_name,
+ fifth_column_with_long_name,
+ sixth_column_with_long_name
+ )
+ );
+ INSERT INTO t5 VALUES(1,2,3,4,5,6);
+ SELECT * FROM t5;
+ }
+} {1 2 3 4 5 6}
+do_test unique-5.2 {
+ catchsql {
+ INSERT INTO t5 VALUES(1,2,3,4,5,6);
+ }
+} {1 {columns first_column_with_long_name, second_column_with_long_name, third_column_with_long_name, fourth_column_with_long_name, fifth_column_with_long_name, ... are not unique}}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/update.test b/usr/src/cmd/svc/configd/sqlite/test/update.test
new file mode 100644
index 0000000000..48e270f704
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/update.test
@@ -0,0 +1,565 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the UPDATE statement.
+#
+# $Id: update.test,v 1.15 2004/02/10 13:41:53 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Try to update an non-existent table
+#
+do_test update-1.1 {
+ set v [catch {execsql {UPDATE test1 SET f2=5 WHERE f1<1}} msg]
+ lappend v $msg
+} {1 {no such table: test1}}
+
+# Try to update a read-only table
+#
+do_test update-2.1 {
+ set v [catch \
+ {execsql {UPDATE sqlite_master SET name='xyz' WHERE name='123'}} msg]
+ lappend v $msg
+} {1 {table sqlite_master may not be modified}}
+
+# Create a table to work with
+#
+do_test update-3.1 {
+ execsql {CREATE TABLE test1(f1 int,f2 int)}
+ for {set i 1} {$i<=10} {incr i} {
+ set sql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])"
+ execsql $sql
+ }
+ execsql {SELECT * FROM test1 ORDER BY f1}
+} {1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024}
+
+# Unknown column name in an expression
+#
+do_test update-3.2 {
+ set v [catch {execsql {UPDATE test1 SET f1=f3*2 WHERE f2==32}} msg]
+ lappend v $msg
+} {1 {no such column: f3}}
+do_test update-3.3 {
+ set v [catch {execsql {UPDATE test1 SET f1=test2.f1*2 WHERE f2==32}} msg]
+ lappend v $msg
+} {1 {no such column: test2.f1}}
+do_test update-3.4 {
+ set v [catch {execsql {UPDATE test1 SET f3=f1*2 WHERE f2==32}} msg]
+ lappend v $msg
+} {1 {no such column: f3}}
+
+# Actually do some updates
+#
+do_test update-3.5 {
+ execsql {UPDATE test1 SET f2=f2*3}
+} {}
+do_test update-3.6 {
+ execsql {SELECT * FROM test1 ORDER BY f1}
+} {1 6 2 12 3 24 4 48 5 96 6 192 7 384 8 768 9 1536 10 3072}
+do_test update-3.7 {
+ execsql {PRAGMA count_changes=on}
+ execsql {UPDATE test1 SET f2=f2/3 WHERE f1<=5}
+} {5}
+do_test update-3.8 {
+ execsql {SELECT * FROM test1 ORDER BY f1}
+} {1 2 2 4 3 8 4 16 5 32 6 192 7 384 8 768 9 1536 10 3072}
+do_test update-3.9 {
+ execsql {UPDATE test1 SET f2=f2/3 WHERE f1>5}
+} {5}
+do_test update-3.10 {
+ execsql {SELECT * FROM test1 ORDER BY f1}
+} {1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024}
+
+# Swap the values of f1 and f2 for all elements
+#
+do_test update-3.11 {
+ execsql {UPDATE test1 SET F2=f1, F1=f2}
+} {10}
+do_test update-3.12 {
+ execsql {SELECT * FROM test1 ORDER BY F1}
+} {2 1 4 2 8 3 16 4 32 5 64 6 128 7 256 8 512 9 1024 10}
+do_test update-3.13 {
+ execsql {PRAGMA count_changes=off}
+ execsql {UPDATE test1 SET F2=f1, F1=f2}
+} {}
+do_test update-3.14 {
+ execsql {SELECT * FROM test1 ORDER BY F1}
+} {1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024}
+
+# Create duplicate entries and make sure updating still
+# works.
+#
+do_test update-4.0 {
+ execsql {
+ DELETE FROM test1 WHERE f1<=5;
+ INSERT INTO test1(f1,f2) VALUES(8,88);
+ INSERT INTO test1(f1,f2) VALUES(8,888);
+ INSERT INTO test1(f1,f2) VALUES(77,128);
+ INSERT INTO test1(f1,f2) VALUES(777,128);
+ }
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-4.1 {
+ execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128}
+do_test update-4.2 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128}
+do_test update-4.3 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-4.4 {
+ execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128}
+do_test update-4.5 {
+ execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128}
+do_test update-4.6 {
+ execsql {
+ PRAGMA count_changes=on;
+ UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128;
+ }
+} {2}
+do_test update-4.7 {
+ execsql {
+ PRAGMA count_changes=off;
+ SELECT * FROM test1 ORDER BY f1,f2
+ }
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+
+# Repeat the previous sequence of tests with an index.
+#
+do_test update-5.0 {
+ execsql {CREATE INDEX idx1 ON test1(f1)}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-5.1 {
+ execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128}
+do_test update-5.2 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128}
+do_test update-5.3 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-5.4 {
+ execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128}
+do_test update-5.4.1 {
+ execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2}
+} {78 128}
+do_test update-5.4.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {778 128}
+do_test update-5.4.3 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 128 8 256 8 888}
+do_test update-5.5 {
+ execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128}
+} {}
+do_test update-5.5.1 {
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128}
+do_test update-5.5.2 {
+ execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2}
+} {78 128}
+do_test update-5.5.3 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {}
+do_test update-5.5.4 {
+ execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2}
+} {777 128}
+do_test update-5.5.5 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 128 8 256 8 888}
+do_test update-5.6 {
+ execsql {
+ PRAGMA count_changes=on;
+ UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128;
+ }
+} {2}
+do_test update-5.6.1 {
+ execsql {
+ PRAGMA count_changes=off;
+ SELECT * FROM test1 ORDER BY f1,f2
+ }
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-5.6.2 {
+ execsql {SELECT * FROM test1 WHERE f1==77 ORDER BY f1,f2}
+} {77 128}
+do_test update-5.6.3 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {}
+do_test update-5.6.4 {
+ execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2}
+} {777 128}
+do_test update-5.6.5 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 256 8 888}
+
+# Repeat the previous sequence of tests with a different index.
+#
+execsql {PRAGMA synchronous=FULL}
+do_test update-6.0 {
+ execsql {DROP INDEX idx1}
+ execsql {CREATE INDEX idx1 ON test1(f2)}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-6.1 {
+ execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128}
+do_test update-6.1.1 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 89 8 257 8 889}
+do_test update-6.1.2 {
+ execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2}
+} {8 89}
+do_test update-6.1.3 {
+ execsql {SELECT * FROM test1 WHERE f1==88 ORDER BY f1,f2}
+} {}
+do_test update-6.2 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128}
+do_test update-6.3 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-6.3.1 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 256 8 888}
+do_test update-6.3.2 {
+ execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2}
+} {}
+do_test update-6.3.3 {
+ execsql {SELECT * FROM test1 WHERE f2==88 ORDER BY f1,f2}
+} {8 88}
+do_test update-6.4 {
+ execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128}
+do_test update-6.4.1 {
+ execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2}
+} {78 128}
+do_test update-6.4.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {778 128}
+do_test update-6.4.3 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 128 8 256 8 888}
+do_test update-6.5 {
+ execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128}
+do_test update-6.5.1 {
+ execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2}
+} {78 128}
+do_test update-6.5.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {}
+do_test update-6.5.3 {
+ execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2}
+} {777 128}
+do_test update-6.5.4 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 128 8 256 8 888}
+do_test update-6.6 {
+ execsql {UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-6.6.1 {
+ execsql {SELECT * FROM test1 WHERE f1==77 ORDER BY f1,f2}
+} {77 128}
+do_test update-6.6.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {}
+do_test update-6.6.3 {
+ execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2}
+} {777 128}
+do_test update-6.6.4 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 256 8 888}
+
+# Repeat the previous sequence of tests with multiple
+# indices
+#
+do_test update-7.0 {
+ execsql {CREATE INDEX idx2 ON test1(f2)}
+ execsql {CREATE INDEX idx3 ON test1(f1,f2)}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-7.1 {
+ execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128}
+do_test update-7.1.1 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 89 8 257 8 889}
+do_test update-7.1.2 {
+ execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2}
+} {8 89}
+do_test update-7.1.3 {
+ execsql {SELECT * FROM test1 WHERE f1==88 ORDER BY f1,f2}
+} {}
+do_test update-7.2 {
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128}
+do_test update-7.3 {
+ # explain {UPDATE test1 SET f2=f2-1 WHERE f1==8 and F2<300}
+ execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-7.3.1 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 256 8 888}
+do_test update-7.3.2 {
+ execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2}
+} {}
+do_test update-7.3.3 {
+ execsql {SELECT * FROM test1 WHERE f2==88 ORDER BY f1,f2}
+} {8 88}
+do_test update-7.4 {
+ execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128}
+do_test update-7.4.1 {
+ execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2}
+} {78 128}
+do_test update-7.4.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {778 128}
+do_test update-7.4.3 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 128 8 256 8 888}
+do_test update-7.5 {
+ execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128}
+do_test update-7.5.1 {
+ execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2}
+} {78 128}
+do_test update-7.5.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {}
+do_test update-7.5.3 {
+ execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2}
+} {777 128}
+do_test update-7.5.4 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 128 8 256 8 888}
+do_test update-7.6 {
+ execsql {UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128}
+ execsql {SELECT * FROM test1 ORDER BY f1,f2}
+} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128}
+do_test update-7.6.1 {
+ execsql {SELECT * FROM test1 WHERE f1==77 ORDER BY f1,f2}
+} {77 128}
+do_test update-7.6.2 {
+ execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2}
+} {}
+do_test update-7.6.3 {
+ execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2}
+} {777 128}
+do_test update-7.6.4 {
+ execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2}
+} {8 88 8 256 8 888}
+
+# Error messages
+#
+do_test update-9.1 {
+ set v [catch {execsql {
+ UPDATE test1 SET x=11 WHERE f1=1025
+ }} msg]
+ lappend v $msg
+} {1 {no such column: x}}
+do_test update-9.2 {
+ set v [catch {execsql {
+ UPDATE test1 SET f1=x(11) WHERE f1=1025
+ }} msg]
+ lappend v $msg
+} {1 {no such function: x}}
+do_test update-9.3 {
+ set v [catch {execsql {
+ UPDATE test1 SET f1=11 WHERE x=1025
+ }} msg]
+ lappend v $msg
+} {1 {no such column: x}}
+do_test update-9.4 {
+ set v [catch {execsql {
+ UPDATE test1 SET f1=11 WHERE x(f1)=1025
+ }} msg]
+ lappend v $msg
+} {1 {no such function: x}}
+
+# Try doing updates on a unique column where the value does not
+# really change.
+#
+do_test update-10.1 {
+ execsql {
+ DROP TABLE test1;
+ CREATE TABLE t1(
+ a integer primary key,
+ b UNIQUE,
+ c, d,
+ e, f,
+ UNIQUE(c,d)
+ );
+ INSERT INTO t1 VALUES(1,2,3,4,5,6);
+ INSERT INTO t1 VALUES(2,3,4,4,6,7);
+ SELECT * FROM t1
+ }
+} {1 2 3 4 5 6 2 3 4 4 6 7}
+do_test update-10.2 {
+ catchsql {
+ UPDATE t1 SET a=1, e=9 WHERE f=6;
+ SELECT * FROM t1;
+ }
+} {0 {1 2 3 4 9 6 2 3 4 4 6 7}}
+do_test update-10.3 {
+ catchsql {
+ UPDATE t1 SET a=1, e=10 WHERE f=7;
+ SELECT * FROM t1;
+ }
+} {1 {PRIMARY KEY must be unique}}
+do_test update-10.4 {
+ catchsql {
+ SELECT * FROM t1;
+ }
+} {0 {1 2 3 4 9 6 2 3 4 4 6 7}}
+do_test update-10.5 {
+ catchsql {
+ UPDATE t1 SET b=2, e=11 WHERE f=6;
+ SELECT * FROM t1;
+ }
+} {0 {1 2 3 4 11 6 2 3 4 4 6 7}}
+do_test update-10.6 {
+ catchsql {
+ UPDATE t1 SET b=2, e=12 WHERE f=7;
+ SELECT * FROM t1;
+ }
+} {1 {column b is not unique}}
+do_test update-10.7 {
+ catchsql {
+ SELECT * FROM t1;
+ }
+} {0 {1 2 3 4 11 6 2 3 4 4 6 7}}
+do_test update-10.8 {
+ catchsql {
+ UPDATE t1 SET c=3, d=4, e=13 WHERE f=6;
+ SELECT * FROM t1;
+ }
+} {0 {1 2 3 4 13 6 2 3 4 4 6 7}}
+do_test update-10.9 {
+ catchsql {
+ UPDATE t1 SET c=3, d=4, e=14 WHERE f=7;
+ SELECT * FROM t1;
+ }
+} {1 {columns c, d are not unique}}
+do_test update-10.10 {
+ catchsql {
+ SELECT * FROM t1;
+ }
+} {0 {1 2 3 4 13 6 2 3 4 4 6 7}}
+
+# Make sure we can handle a subquery in the where clause.
+#
+do_test update-11.1 {
+ execsql {
+ UPDATE t1 SET e=e+1 WHERE b IN (SELECT b FROM t1);
+ SELECT b,e FROM t1;
+ }
+} {2 14 3 7}
+do_test update-11.2 {
+ execsql {
+ UPDATE t1 SET e=e+1 WHERE a IN (SELECT a FROM t1);
+ SELECT a,e FROM t1;
+ }
+} {1 15 2 8}
+
+integrity_check update-12.1
+
+# Ticket 602. Updates should occur in the same order as the records
+# were discovered in the WHERE clause.
+#
+do_test update-13.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t2(a);
+ INSERT INTO t2 VALUES(1);
+ INSERT INTO t2 VALUES(2);
+ INSERT INTO t2 SELECT a+2 FROM t2;
+ INSERT INTO t2 SELECT a+4 FROM t2;
+ INSERT INTO t2 SELECT a+8 FROM t2;
+ INSERT INTO t2 SELECT a+16 FROM t2;
+ INSERT INTO t2 SELECT a+32 FROM t2;
+ INSERT INTO t2 SELECT a+64 FROM t2;
+ INSERT INTO t2 SELECT a+128 FROM t2;
+ INSERT INTO t2 SELECT a+256 FROM t2;
+ INSERT INTO t2 SELECT a+512 FROM t2;
+ INSERT INTO t2 SELECT a+1024 FROM t2;
+ COMMIT;
+ SELECT count(*) FROM t2;
+ }
+} {2048}
+do_test update-13.2 {
+ execsql {
+ SELECT count(*) FROM t2 WHERE a=rowid;
+ }
+} {2048}
+do_test update-13.3 {
+ execsql {
+ UPDATE t2 SET rowid=rowid-1;
+ SELECT count(*) FROM t2 WHERE a=rowid+1;
+ }
+} {2048}
+do_test update-13.3 {
+ execsql {
+ UPDATE t2 SET rowid=rowid+10000;
+ UPDATE t2 SET rowid=rowid-9999;
+ SELECT count(*) FROM t2 WHERE a=rowid;
+ }
+} {2048}
+do_test update-13.4 {
+ execsql {
+ BEGIN;
+ INSERT INTO t2 SELECT a+2048 FROM t2;
+ INSERT INTO t2 SELECT a+4096 FROM t2;
+ INSERT INTO t2 SELECT a+8192 FROM t2;
+ SELECT count(*) FROM t2 WHERE a=rowid;
+ COMMIT;
+ }
+} 16384
+do_test update-13.5 {
+ execsql {
+ UPDATE t2 SET rowid=rowid-1;
+ SELECT count(*) FROM t2 WHERE a=rowid+1;
+ }
+} 16384
+
+integrity_check update-13.6
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/vacuum.test b/usr/src/cmd/svc/configd/sqlite/test/vacuum.test
new file mode 100644
index 0000000000..4154107c97
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/vacuum.test
@@ -0,0 +1,176 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the VACUUM statement.
+#
+# $Id: vacuum.test,v 1.15 2004/02/14 16:31:04 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+proc cksum {{db db}} {
+ set txt [$db eval {SELECT name, type, sql FROM sqlite_master}]\n
+ foreach tbl [$db eval {SELECT name FROM sqlite_master WHERE type='table'}] {
+ append txt [$db eval "SELECT * FROM $tbl"]\n
+ }
+ foreach prag {default_synchronous default_cache_size} {
+ append txt $prag-[$db eval "PRAGMA $prag"]\n
+ }
+ set cksum [string length $txt]-[md5 $txt]
+ # puts $cksum-[file size test.db]
+ return $cksum
+}
+do_test vacuum-1.1 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c);
+ INSERT INTO t1 VALUES(NULL,randstr(10,100),randstr(5,50));
+ INSERT INTO t1 VALUES(123456,randstr(10,100),randstr(5,50));
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1;
+ CREATE INDEX i1 ON t1(b,c);
+ CREATE TABLE t2 AS SELECT * FROM t1;
+ COMMIT;
+ DROP TABLE t2;
+ }
+ set ::size1 [file size test.db]
+ set ::cksum [cksum]
+ expr {$::cksum!=""}
+} {1}
+do_test vacuum-1.2 {
+ execsql {
+ VACUUM;
+ }
+ cksum
+} $cksum
+do_test vacuum-1.3 {
+ expr {[file size test.db]<$::size1}
+} {1}
+do_test vacuum-1.4 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t2 AS SELECT * FROM t1;
+ CREATE TABLE t3 AS SELECT * FROM t1;
+ CREATE VIEW v1 AS SELECT b, c FROM t3;
+ CREATE TRIGGER r1 AFTER DELETE ON t2 BEGIN
+ SELECT 1;
+ END;
+ COMMIT;
+ DROP TABLE t2;
+ }
+ set ::size1 [file size test.db]
+ set ::cksum [cksum]
+ expr {$::cksum!=""}
+} {1}
+do_test vacuum-1.5 {
+ execsql {
+ VACUUM;
+ }
+ cksum
+} $cksum
+do_test vacuum-1.6 {
+ expr {[file size test.db]<$::size1}
+} {1}
+
+do_test vacuum-2.1 {
+ catchsql {
+ BEGIN;
+ VACUUM;
+ COMMIT;
+ }
+} {1 {cannot VACUUM from within a transaction}}
+catch {db eval COMMIT}
+do_test vacuum-2.2 {
+ sqlite db2 test.db
+ execsql {
+ BEGIN;
+ CREATE TABLE t4 AS SELECT * FROM t1;
+ CREATE TABLE t5 AS SELECT * FROM t1;
+ COMMIT;
+ DROP TABLE t4;
+ DROP TABLE t5;
+ } db2
+ set ::cksum [cksum db2]
+ catchsql {
+ VACUUM
+ }
+} {0 {}}
+do_test vacuum-2.3 {
+ cksum
+} $cksum
+do_test vacuum-2.4 {
+ catch {db2 eval {SELECT count(*) FROM sqlite_master}}
+ cksum db2
+} $cksum
+
+# Ticket #427. Make sure VACUUM works when the EMPTY_RESULT_CALLBACKS
+# pragma is turned on.
+#
+do_test vacuum-3.1 {
+ db close
+ db2 close
+ file delete test.db
+ sqlite db test.db
+ execsql {
+ PRAGMA empty_result_callbacks=on;
+ VACUUM;
+ }
+} {}
+
+# Ticket #464. Make sure VACUUM works with the sqlite_compile() API.
+#
+do_test vacuum-4.1 {
+ db close
+ set DB [sqlite db test.db]
+ set VM [sqlite_compile $DB {VACUUM} TAIL]
+ sqlite_step $VM N VALUES COLNAMES
+} {SQLITE_DONE}
+do_test vacuum-4.2 {
+ sqlite_finalize $VM
+} {}
+
+# Ticket #515. VACUUM after deleting and recreating the table that
+# a view refers to.
+#
+do_test vacuum-5.1 {
+ db close
+ file delete -force test.db
+ sqlite db test.db
+ catchsql {
+ CREATE TABLE Test (TestID int primary key);
+ INSERT INTO Test VALUES (NULL);
+ CREATE VIEW viewTest AS SELECT * FROM Test;
+
+ BEGIN;
+ CREATE TEMP TABLE tempTest (TestID int primary key, Test2 int NULL);
+ INSERT INTO tempTest SELECT TestID, 1 FROM Test;
+ DROP TABLE Test;
+ CREATE TABLE Test(TestID int primary key, Test2 int NULL);
+ INSERT INTO Test SELECT * FROM tempTest;
+ COMMIT;
+ VACUUM;
+ }
+} {0 {}}
+do_test vacuum-5.2 {
+ catchsql {
+ VACUUM;
+ }
+} {0 {}}
+
+# finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/version.test b/usr/src/cmd/svc/configd/sqlite/test/version.test
new file mode 100644
index 0000000000..2e44e0b6ab
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/version.test
@@ -0,0 +1,201 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 July 17
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the ability of the library to detect
+# past or future file format version numbers and respond appropriately.
+#
+# $Id: version.test,v 1.9 2004/02/12 19:01:05 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Current file format version
+set VX 4
+
+# Create a new database
+#
+do_test version-1.1 {
+ execsql {
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES(1);
+ INSERT INTO t1 SELECT x+1 FROM t1;
+ INSERT INTO t1 SELECT x+2 FROM t1;
+ INSERT INTO t1 SELECT x+4 FROM t1;
+ SELECT * FROM t1;
+ }
+} {1 2 3 4 5 6 7 8}
+
+# Make sure the version number is set correctly
+#
+do_test version-1.2 {
+ db close
+ set ::bt [btree_open test.db]
+ btree_begin_transaction $::bt
+ set ::meta [btree_get_meta $::bt]
+ btree_rollback $::bt
+ lindex $::meta 2
+} $VX
+
+# Increase the file_format number by one. Verify that the
+# file will refuse to open.
+#
+do_test version-1.3 {
+ set m2 [lreplace $::meta 2 2 [expr {$::VX+1}]]
+ btree_begin_transaction $::bt
+ eval btree_update_meta $::bt $m2
+ btree_commit $::bt
+ set rc [catch {sqlite db test.db} msg]
+ lappend rc $msg
+} {1 {unsupported file format}}
+
+# Decrease the file_format number by one. Verify that the
+# file will open correctly.
+#
+do_test version-1.4 {
+ set m2 [lreplace $::meta 2 2 [expr {$::VX-1}]]
+ btree_begin_transaction $::bt
+ eval btree_update_meta $::bt $m2
+ btree_commit $::bt
+ sqlite db test.db
+ execsql {
+ SELECT * FROM t1;
+ }
+} {1 2 3 4 5 6 7 8}
+
+# Set the file_format number to 2. This should cause the automatic
+# upgrade processing to run.
+#
+do_test version-1.5 {
+ set m2 [lreplace $::meta 2 2 2]
+ btree_begin_transaction $::bt
+ eval btree_update_meta $::bt $m2
+ btree_commit $::bt
+ sqlite db test.db
+ execsql {
+ SELECT * FROM t1;
+ }
+} {1 2 3 4 5 6 7 8}
+do_test version-1.6 {
+ set ::meta [btree_get_meta $::bt]
+ lindex $::meta 2
+} $VX
+
+# Add some triggers, views, and indices to the schema and make sure the
+# automatic upgrade still works.
+#
+do_test version-1.7 {
+ execsql {
+ CREATE INDEX i1 ON t1(x);
+ DELETE FROM t1;
+ CREATE TABLE t2(a INTEGER PRIMARY KEY, b UNIQUE, c);
+ CREATE TABLE cnt(name,ins, del);
+ INSERT INTO cnt VALUES('t1',0,0);
+ INSERT INTO cnt VALUES('t2',0,0);
+ CREATE TRIGGER r1 AFTER INSERT ON t1 FOR EACH ROW BEGIN
+ UPDATE cnt SET ins=ins+1 WHERE name='t1';
+ END;
+ CREATE TRIGGER r2 AFTER DELETE ON t1 FOR EACH ROW BEGIN
+ UPDATE cnt SET del=del+1 WHERE name='t1';
+ END;
+ CREATE TRIGGER r3 AFTER INSERT ON t2 FOR EACH ROW BEGIN
+ UPDATE cnt SET ins=ins+1 WHERE name='t2';
+ END;
+ CREATE TRIGGER r4 AFTER DELETE ON t2 FOR EACH ROW BEGIN
+ UPDATE cnt SET del=del+1 WHERE name='t2';
+ END;
+ CREATE VIEW v1 AS SELECT x+100 FROM t1;
+ CREATE VIEW v2 AS SELECT sum(ins), sum(del) FROM cnt;
+ INSERT INTO t1 VALUES(1);
+ INSERT INTO t1 SELECT x+1 FROM t1;
+ INSERT INTO t1 SELECT x+2 FROM t1;
+ INSERT INTO t1 SELECT x+4 FROM t1;
+ SELECT * FROM t1;
+ }
+} {1 2 3 4 5 6 7 8}
+do_test version-1.8 {
+ execsql {
+ SELECT * FROM v2;
+ }
+} {8 0}
+do_test version-1.9 {
+ execsql {
+ SELECT * FROM cnt;
+ }
+} {t1 8 0 t2 0 0}
+do_test version-1.10 {
+ execsql {
+ INSERT INTO t2 SELECT x*3, x*2, x FROM t1;
+ SELECT * FROM t2;
+ }
+} {3 2 1 6 4 2 9 6 3 12 8 4 15 10 5 18 12 6 21 14 7 24 16 8}
+do_test version-1.11 {
+ execsql {
+ SELECT * FROM cnt;
+ }
+} {t1 8 0 t2 8 0}
+
+# Here we do the upgrade test.
+#
+do_test version-1.12 {
+ db close
+ set m2 [lreplace $::meta 2 2 2]
+ btree_begin_transaction $::bt
+ eval btree_update_meta $::bt $m2
+ btree_commit $::bt
+ sqlite db test.db
+ execsql {
+ SELECT * FROM cnt;
+ }
+} {t1 8 0 t2 8 0}
+do_test version-1.13 {
+ execsql {
+ SELECT * FROM v1;
+ }
+} {101 102 103 104 105 106 107 108}
+do_test version-1.14 {
+ execsql {
+ SELECT * FROM v2;
+ }
+} {16 0}
+
+# Try to do an upgrade where the database file is read-only
+#
+do_test version-2.1 {
+ db close
+ set m2 [lreplace $::meta 2 2 2]
+ btree_begin_transaction $::bt
+ eval btree_update_meta $::bt $m2
+ btree_commit $::bt
+ btree_close $::bt
+ catch {file attributes test.db -permissions 0444}
+ catch {file attributes test.db -readonly 1}
+ if {[file writable test.db]} {
+ error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user"
+ }
+ set rc [catch {sqlite db test.db} msg]
+ lappend rc $msg
+} {1 {unable to upgrade database to the version 2.6 format: attempt to write a readonly database}}
+do_test version-2.2 {
+ file delete -force test.db
+ set fd [open test.db w]
+ set txt "This is not a valid database file\n"
+ while {[string length $txt]<4092} {append txt $txt}
+ puts $fd $txt
+ close $fd
+ set rc [catch {sqlite db test.db} msg]
+ lappend rc $msg
+} {1 {file is encrypted or is not a database}}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/view.test b/usr/src/cmd/svc/configd/sqlite/test/view.test
new file mode 100644
index 0000000000..2da7e8deb6
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/view.test
@@ -0,0 +1,410 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2002 February 26
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing VIEW statements.
+#
+# $Id: view.test,v 1.16.2.1 2004/07/20 00:20:47 drh Exp $
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test view-1.0 {
+ execsql {
+ CREATE TABLE t1(a,b,c);
+ INSERT INTO t1 VALUES(1,2,3);
+ INSERT INTO t1 VALUES(4,5,6);
+ INSERT INTO t1 VALUES(7,8,9);
+ SELECT * FROM t1;
+ }
+} {1 2 3 4 5 6 7 8 9}
+
+do_test view-1.1 {
+ execsql {
+ BEGIN;
+ CREATE VIEW v1 AS SELECT a,b FROM t1;
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 2 4 5 7 8}
+do_test view-1.2 {
+ catchsql {
+ ROLLBACK;
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 {no such table: v1}}
+do_test view-1.3 {
+ execsql {
+ CREATE VIEW v1 AS SELECT a,b FROM t1;
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 2 4 5 7 8}
+do_test view-1.3.1 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 2 4 5 7 8}
+do_test view-1.4 {
+ catchsql {
+ DROP VIEW v1;
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 {no such table: v1}}
+do_test view-1.5 {
+ execsql {
+ CREATE VIEW v1 AS SELECT a,b FROM t1;
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 2 4 5 7 8}
+do_test view-1.6 {
+ catchsql {
+ DROP TABLE t1;
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {1 {no such table: main.t1}}
+do_test view-1.7 {
+ execsql {
+ CREATE TABLE t1(x,a,b,c);
+ INSERT INTO t1 VALUES(1,2,3,4);
+ INSERT INTO t1 VALUES(4,5,6,7);
+ INSERT INTO t1 VALUES(7,8,9,10);
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {2 3 5 6 8 9}
+do_test view-1.8 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM v1 ORDER BY a;
+ }
+} {2 3 5 6 8 9}
+
+do_test view-2.1 {
+ execsql {
+ CREATE VIEW v2 AS SELECT * FROM t1 WHERE a>5
+ }; # No semicolon
+ execsql2 {
+ SELECT * FROM v2;
+ }
+} {x 7 a 8 b 9 c 10}
+do_test view-2.2 {
+ catchsql {
+ INSERT INTO v2 VALUES(1,2,3,4);
+ }
+} {1 {cannot modify v2 because it is a view}}
+do_test view-2.3 {
+ catchsql {
+ UPDATE v2 SET a=10 WHERE a=5;
+ }
+} {1 {cannot modify v2 because it is a view}}
+do_test view-2.4 {
+ catchsql {
+ DELETE FROM v2;
+ }
+} {1 {cannot modify v2 because it is a view}}
+do_test view-2.5 {
+ execsql {
+ INSERT INTO t1 VALUES(11,12,13,14);
+ SELECT * FROM v2 ORDER BY x;
+ }
+} {7 8 9 10 11 12 13 14}
+do_test view-2.6 {
+ execsql {
+ SELECT x FROM v2 WHERE a>10
+ }
+} {11}
+
+# Test that column name of views are generated correctly.
+#
+do_test view-3.1 {
+ execsql2 {
+ SELECT * FROM v1 LIMIT 1
+ }
+} {a 2 b 3}
+do_test view-3.2 {
+ execsql2 {
+ SELECT * FROM v2 LIMIT 1
+ }
+} {x 7 a 8 b 9 c 10}
+do_test view-3.3 {
+ execsql2 {
+ DROP VIEW v1;
+ CREATE VIEW v1 AS SELECT a AS 'xyz', b+c AS 'pqr', c-b FROM t1;
+ SELECT * FROM v1 LIMIT 1
+ }
+} {xyz 2 pqr 7 c-b 1}
+do_test view-3.4 {
+ execsql2 {
+ CREATE VIEW v3 AS SELECT a FROM t1 UNION SELECT b FROM t1 ORDER BY b;
+ SELECT * FROM v3 LIMIT 4;
+ }
+} {b 2 b 3 b 5 b 6}
+do_test view-3.5 {
+ execsql2 {
+ CREATE VIEW v4 AS
+ SELECT a, b FROM t1
+ UNION
+ SELECT b AS 'x', a AS 'y' FROM t1
+ ORDER BY x, y;
+ SELECT y FROM v4 ORDER BY y LIMIT 4;
+ }
+} {y 2 y 3 y 5 y 6}
+
+
+do_test view-4.1 {
+ catchsql {
+ DROP VIEW t1;
+ }
+} {1 {use DROP TABLE to delete table t1}}
+do_test view-4.2 {
+ execsql {
+ SELECT 1 FROM t1 LIMIT 1;
+ }
+} 1
+do_test view-4.3 {
+ catchsql {
+ DROP TABLE v1;
+ }
+} {1 {use DROP VIEW to delete view v1}}
+do_test view-4.4 {
+ execsql {
+ SELECT 1 FROM v1 LIMIT 1;
+ }
+} {1}
+do_test view-4.5 {
+ catchsql {
+ CREATE INDEX i1v1 ON v1(xyz);
+ }
+} {1 {views may not be indexed}}
+
+do_test view-5.1 {
+ execsql {
+ CREATE TABLE t2(y,a);
+ INSERT INTO t2 VALUES(22,2);
+ INSERT INTO t2 VALUES(33,3);
+ INSERT INTO t2 VALUES(44,4);
+ INSERT INTO t2 VALUES(55,5);
+ SELECT * FROM t2;
+ }
+} {22 2 33 3 44 4 55 5}
+do_test view-5.2 {
+ execsql {
+ CREATE VIEW v5 AS
+ SELECT t1.x AS v, t2.y AS w FROM t1 JOIN t2 USING(a);
+ SELECT * FROM v5;
+ }
+} {1 22 4 55}
+
+# Verify that the view v5 gets flattened. see sqliteFlattenSubquery().
+# Ticket #272
+do_test view-5.3 {
+ lsearch [execsql {
+ EXPLAIN SELECT * FROM v5;
+ }] OpenTemp
+} {-1}
+do_test view-5.4 {
+ execsql {
+ SELECT * FROM v5 AS a, t2 AS b WHERE a.w=b.y;
+ }
+} {1 22 22 2 4 55 55 5}
+do_test view-5.5 {
+ lsearch [execsql {
+ EXPLAIN SELECT * FROM v5 AS a, t2 AS b WHERE a.w=b.y;
+ }] OpenTemp
+} {-1}
+do_test view-5.6 {
+ execsql {
+ SELECT * FROM t2 AS b, v5 AS a WHERE a.w=b.y;
+ }
+} {22 2 1 22 55 5 4 55}
+do_test view-5.7 {
+ lsearch [execsql {
+ EXPLAIN SELECT * FROM t2 AS b, v5 AS a WHERE a.w=b.y;
+ }] OpenTemp
+} {-1}
+do_test view-5.8 {
+ execsql {
+ SELECT * FROM t1 AS a, v5 AS b, t2 AS c WHERE a.x=b.v AND b.w=c.y;
+ }
+} {1 2 3 4 1 22 22 2 4 5 6 7 4 55 55 5}
+do_test view-5.9 {
+ lsearch [execsql {
+ EXPLAIN SELECT * FROM t1 AS a, v5 AS b, t2 AS c WHERE a.x=b.v AND b.w=c.y;
+ }] OpenTemp
+} {-1}
+
+do_test view-6.1 {
+ execsql {
+ SELECT min(x), min(a), min(b), min(c), min(a+b+c) FROM v2;
+ }
+} {7 8 9 10 27}
+do_test view-6.2 {
+ execsql {
+ SELECT max(x), max(a), max(b), max(c), max(a+b+c) FROM v2;
+ }
+} {11 12 13 14 39}
+
+do_test view-7.1 {
+ execsql {
+ CREATE TABLE test1(id integer primary key, a);
+ CREATE TABLE test2(id integer, b);
+ INSERT INTO test1 VALUES(1,2);
+ INSERT INTO test2 VALUES(1,3);
+ CREATE VIEW test AS
+ SELECT test1.id, a, b
+ FROM test1 JOIN test2 ON test2.id=test1.id;
+ SELECT * FROM test;
+ }
+} {1 2 3}
+do_test view-7.2 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM test;
+ }
+} {1 2 3}
+do_test view-7.3 {
+ execsql {
+ DROP VIEW test;
+ CREATE VIEW test AS
+ SELECT test1.id, a, b
+ FROM test1 JOIN test2 USING(id);
+ SELECT * FROM test;
+ }
+} {1 2 3}
+do_test view-7.4 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM test;
+ }
+} {1 2 3}
+do_test view-7.5 {
+ execsql {
+ DROP VIEW test;
+ CREATE VIEW test AS
+ SELECT test1.id, a, b
+ FROM test1 NATURAL JOIN test2;
+ SELECT * FROM test;
+ }
+} {1 2 3}
+do_test view-7.6 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM test;
+ }
+} {1 2 3}
+
+do_test view-8.1 {
+ execsql {
+ CREATE VIEW v6 AS SELECT pqr, xyz FROM v1;
+ SELECT * FROM v6 ORDER BY xyz;
+ }
+} {7 2 13 5 19 8 27 12}
+do_test view-8.2 {
+ db close
+ sqlite db test.db
+ execsql {
+ SELECT * FROM v6 ORDER BY xyz;
+ }
+} {7 2 13 5 19 8 27 12}
+do_test view-8.3 {
+ execsql {
+ CREATE VIEW v7 AS SELECT pqr+xyz AS a FROM v6;
+ SELECT * FROM v7 ORDER BY a;
+ }
+} {9 18 27 39}
+do_test view-8.4 {
+ execsql {
+ CREATE VIEW v8 AS SELECT max(cnt) AS mx FROM
+ (SELECT a%2 AS eo, count(*) AS cnt FROM t1 GROUP BY eo);
+ SELECT * FROM v8;
+ }
+} 3
+do_test view-8.5 {
+ execsql {
+ SELECT mx+10, mx*2 FROM v8;
+ }
+} {13 6}
+do_test view-8.6 {
+ execsql {
+ SELECT mx+10, pqr FROM v6, v8 WHERE xyz=2;
+ }
+} {13 7}
+do_test view-8.7 {
+ execsql {
+ SELECT mx+10, pqr FROM v6, v8 WHERE xyz>2;
+ }
+} {13 13 13 19 13 27}
+
+# Tests for a bug found by Michiel de Wit involving ORDER BY in a VIEW.
+#
+do_test view-9.1 {
+ execsql {
+ INSERT INTO t2 SELECT * FROM t2 WHERE a<5;
+ INSERT INTO t2 SELECT * FROM t2 WHERE a<4;
+ INSERT INTO t2 SELECT * FROM t2 WHERE a<3;
+ SELECT DISTINCT count(*) FROM t2 GROUP BY a ORDER BY 1;
+ }
+} {1 2 4 8}
+do_test view-9.2 {
+ execsql {
+ SELECT DISTINCT count(*) FROM t2 GROUP BY a ORDER BY 1 LIMIT 3;
+ }
+} {1 2 4}
+do_test view-9.3 {
+ execsql {
+ CREATE VIEW v9 AS
+ SELECT DISTINCT count(*) FROM t2 GROUP BY a ORDER BY 1 LIMIT 3;
+ SELECT * FROM v9;
+ }
+} {1 2 4}
+do_test view-9.4 {
+ execsql {
+ SELECT * FROM v9 ORDER BY 1 DESC;
+ }
+} {4 2 1}
+do_test view-9.5 {
+ execsql {
+ CREATE VIEW v10 AS
+ SELECT DISTINCT a, count(*) FROM t2 GROUP BY a ORDER BY 2 LIMIT 3;
+ SELECT * FROM v10;
+ }
+} {5 1 4 2 3 4}
+do_test view-9.6 {
+ execsql {
+ SELECT * FROM v10 ORDER BY 1;
+ }
+} {3 4 4 2 5 1}
+
+# Tables with columns having peculiar quoted names used in views
+# Ticket #756.
+#
+do_test view-10.1 {
+ execsql {
+ CREATE TABLE t3("9" integer, [4] text);
+ INSERT INTO t3 VALUES(1,2);
+ CREATE VIEW v_t3_a AS SELECT a.[9] FROM t3 AS a;
+ CREATE VIEW v_t3_b AS SELECT "4" FROM t3;
+ SELECT * FROM v_t3_a;
+ }
+} {1}
+do_test view-10.2 {
+ execsql {
+ SELECT * FROM v_t3_b;
+ }
+} {2}
+
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/test/where.test b/usr/src/cmd/svc/configd/sqlite/test/where.test
new file mode 100644
index 0000000000..5671de9b7d
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/test/where.test
@@ -0,0 +1,745 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# 2001 September 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the use of indices in WHERE clases.
+#
+# $Id: where.test,v 1.17 2003/06/15 23:42:25 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Build some test data
+#
+do_test where-1.0 {
+ execsql {
+ CREATE TABLE t1(w int, x int, y int);
+ CREATE TABLE t2(p int, q int, r int, s int);
+ }
+ for {set i 1} {$i<=100} {incr i} {
+ set w $i
+ set x [expr {int(log($i)/log(2))}]
+ set y [expr {$i*$i + 2*$i + 1}]
+ execsql "INSERT INTO t1 VALUES($w,$x,$y)"
+ }
+ execsql {
+ INSERT INTO t2 SELECT 101-w, x, (SELECT max(y) FROM t1)+1-y, y FROM t1;
+ CREATE INDEX i1w ON t1(w);
+ CREATE INDEX i1xy ON t1(x,y);
+ CREATE INDEX i2p ON t2(p);
+ CREATE INDEX i2r ON t2(r);
+ CREATE INDEX i2qs ON t2(q, s);
+ }
+} {}
+
+# Do an SQL statement. Append the search count to the end of the result.
+#
+proc count sql {
+ set ::sqlite_search_count 0
+ return [concat [execsql $sql] $::sqlite_search_count]
+}
+
+# Verify that queries use an index. We are using the special variable
+# "sqlite_search_count" which tallys the number of executions of MoveTo
+# and Next operators in the VDBE. By verifing that the search count is
+# small we can be assured that indices are being used properly.
+#
+do_test where-1.1 {
+ count {SELECT x, y FROM t1 WHERE w=10}
+} {3 121 3}
+do_test where-1.2 {
+ count {SELECT x, y FROM t1 WHERE w=11}
+} {3 144 3}
+do_test where-1.3 {
+ count {SELECT x, y FROM t1 WHERE 11=w}
+} {3 144 3}
+do_test where-1.4 {
+ count {SELECT x, y FROM t1 WHERE 11=w AND x>2}
+} {3 144 3}
+do_test where-1.5 {
+ count {SELECT x, y FROM t1 WHERE y<200 AND w=11 AND x>2}
+} {3 144 3}
+do_test where-1.6 {
+ count {SELECT x, y FROM t1 WHERE y<200 AND x>2 AND w=11}
+} {3 144 3}
+do_test where-1.7 {
+ count {SELECT x, y FROM t1 WHERE w=11 AND y<200 AND x>2}
+} {3 144 3}
+do_test where-1.8 {
+ count {SELECT x, y FROM t1 WHERE w>10 AND y=144 AND x=3}
+} {3 144 3}
+do_test where-1.9 {
+ count {SELECT x, y FROM t1 WHERE y=144 AND w>10 AND x=3}
+} {3 144 3}
+do_test where-1.10 {
+ count {SELECT x, y FROM t1 WHERE x=3 AND w>=10 AND y=121}
+} {3 121 3}
+do_test where-1.11 {
+ count {SELECT x, y FROM t1 WHERE x=3 AND y=100 AND w<10}
+} {3 100 3}
+
+# New for SQLite version 2.1: Verify that that inequality constraints
+# are used correctly.
+#
+do_test where-1.12 {
+ count {SELECT w FROM t1 WHERE x=3 AND y<100}
+} {8 3}
+do_test where-1.13 {
+ count {SELECT w FROM t1 WHERE x=3 AND 100>y}
+} {8 3}
+do_test where-1.14 {
+ count {SELECT w FROM t1 WHERE 3=x AND y<100}
+} {8 3}
+do_test where-1.15 {
+ count {SELECT w FROM t1 WHERE 3=x AND 100>y}
+} {8 3}
+do_test where-1.16 {
+ count {SELECT w FROM t1 WHERE x=3 AND y<=100}
+} {8 9 5}
+do_test where-1.17 {
+ count {SELECT w FROM t1 WHERE x=3 AND 100>=y}
+} {8 9 5}
+do_test where-1.18 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>225}
+} {15 3}
+do_test where-1.19 {
+ count {SELECT w FROM t1 WHERE x=3 AND 225<y}
+} {15 3}
+do_test where-1.20 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>=225}
+} {14 15 5}
+do_test where-1.21 {
+ count {SELECT w FROM t1 WHERE x=3 AND 225<=y}
+} {14 15 5}
+do_test where-1.22 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>121 AND y<196}
+} {11 12 5}
+do_test where-1.23 {
+ count {SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196}
+} {10 11 12 13 9}
+do_test where-1.24 {
+ count {SELECT w FROM t1 WHERE x=3 AND 121<y AND 196>y}
+} {11 12 5}
+do_test where-1.25 {
+ count {SELECT w FROM t1 WHERE x=3 AND 121<=y AND 196>=y}
+} {10 11 12 13 9}
+
+# Need to work on optimizing the BETWEEN operator.
+#
+# do_test where-1.26 {
+# count {SELECT w FROM t1 WHERE x=3 AND y BETWEEN 121 AND 196}
+# } {10 11 12 13 9}
+
+do_test where-1.27 {
+ count {SELECT w FROM t1 WHERE x=3 AND y+1==122}
+} {10 17}
+do_test where-1.28 {
+ count {SELECT w FROM t1 WHERE x+1=4 AND y+1==122}
+} {10 99}
+do_test where-1.29 {
+ count {SELECT w FROM t1 WHERE y==121}
+} {10 99}
+
+
+do_test where-1.30 {
+ count {SELECT w FROM t1 WHERE w>97}
+} {98 99 100 6}
+do_test where-1.31 {
+ count {SELECT w FROM t1 WHERE w>=97}
+} {97 98 99 100 8}
+do_test where-1.33 {
+ count {SELECT w FROM t1 WHERE w==97}
+} {97 3}
+do_test where-1.34 {
+ count {SELECT w FROM t1 WHERE w+1==98}
+} {97 99}
+do_test where-1.35 {
+ count {SELECT w FROM t1 WHERE w<3}
+} {1 2 4}
+do_test where-1.36 {
+ count {SELECT w FROM t1 WHERE w<=3}
+} {1 2 3 6}
+do_test where-1.37 {
+ count {SELECT w FROM t1 WHERE w+1<=4 ORDER BY w}
+} {1 2 3 199}
+
+do_test where-1.38 {
+ count {SELECT (w) FROM t1 WHERE (w)>(97)}
+} {98 99 100 6}
+do_test where-1.39 {
+ count {SELECT (w) FROM t1 WHERE (w)>=(97)}
+} {97 98 99 100 8}
+do_test where-1.40 {
+ count {SELECT (w) FROM t1 WHERE (w)==(97)}
+} {97 3}
+do_test where-1.41 {
+ count {SELECT (w) FROM t1 WHERE ((w)+(1))==(98)}
+} {97 99}
+
+
+# Do the same kind of thing except use a join as the data source.
+#
+do_test where-2.1 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND y=s AND r=8977
+ }
+} {34 67 6}
+do_test where-2.2 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND s=y AND r=8977
+ }
+} {34 67 6}
+do_test where-2.3 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND s=y AND r=8977 AND w>10
+ }
+} {34 67 6}
+do_test where-2.4 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE p<80 AND x=q AND s=y AND r=8977 AND w>10
+ }
+} {34 67 6}
+do_test where-2.5 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE p<80 AND x=q AND 8977=r AND s=y AND w>10
+ }
+} {34 67 6}
+do_test where-2.6 {
+ count {
+ SELECT w, p FROM t2, t1
+ WHERE x=q AND p=77 AND s=y AND w>5
+ }
+} {24 77 6}
+do_test where-2.7 {
+ count {
+ SELECT w, p FROM t1, t2
+ WHERE x=q AND p>77 AND s=y AND w=5
+ }
+} {5 96 6}
+
+# Lets do a 3-way join.
+#
+do_test where-3.1 {
+ count {
+ SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C
+ WHERE C.w=101-B.p AND B.r=10202-A.y AND A.w=11
+ }
+} {11 90 11 9}
+do_test where-3.2 {
+ count {
+ SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C
+ WHERE C.w=101-B.p AND B.r=10202-A.y AND A.w=12
+ }
+} {12 89 12 9}
+do_test where-3.3 {
+ count {
+ SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C
+ WHERE A.w=15 AND B.p=C.w AND B.r=10202-A.y
+ }
+} {15 86 86 9}
+
+# Test to see that the special case of a constant WHERE clause is
+# handled.
+#
+do_test where-4.1 {
+ count {
+ SELECT * FROM t1 WHERE 0
+ }
+} {0}
+do_test where-4.2 {
+ count {
+ SELECT * FROM t1 WHERE 1 LIMIT 1
+ }
+} {1 0 4 1}
+do_test where-4.3 {
+ execsql {
+ SELECT 99 WHERE 0
+ }
+} {}
+do_test where-4.4 {
+ execsql {
+ SELECT 99 WHERE 1
+ }
+} {99}
+
+# Verify that IN operators in a WHERE clause are handled correctly.
+#
+do_test where-5.1 {
+ count {
+ SELECT * FROM t1 WHERE rowid IN (1,2,3,1234) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 0}
+do_test where-5.2 {
+ count {
+ SELECT * FROM t1 WHERE rowid+0 IN (1,2,3,1234) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 199}
+do_test where-5.3 {
+ count {
+ SELECT * FROM t1 WHERE w IN (-1,1,2,3) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 10}
+do_test where-5.4 {
+ count {
+ SELECT * FROM t1 WHERE w+0 IN (-1,1,2,3) order by 1;
+ }
+} {1 0 4 2 1 9 3 1 16 199}
+do_test where-5.5 {
+ count {
+ SELECT * FROM t1 WHERE rowid IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 1}
+do_test where-5.6 {
+ count {
+ SELECT * FROM t1 WHERE rowid+0 IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 199}
+do_test where-5.7 {
+ count {
+ SELECT * FROM t1 WHERE w IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 7}
+do_test where-5.8 {
+ count {
+ SELECT * FROM t1 WHERE w+0 IN
+ (select rowid from t1 where rowid IN (-1,2,4))
+ ORDER BY 1;
+ }
+} {2 1 9 4 2 25 199}
+do_test where-5.9 {
+ count {
+ SELECT * FROM t1 WHERE x IN (1,7) ORDER BY 1;
+ }
+} {2 1 9 3 1 16 6}
+do_test where-5.10 {
+ count {
+ SELECT * FROM t1 WHERE x+0 IN (1,7) ORDER BY 1;
+ }
+} {2 1 9 3 1 16 199}
+do_test where-5.11 {
+ count {
+ SELECT * FROM t1 WHERE y IN (6400,8100) ORDER BY 1;
+ }
+} {79 6 6400 89 6 8100 199}
+do_test where-5.12 {
+ count {
+ SELECT * FROM t1 WHERE x=6 AND y IN (6400,8100) ORDER BY 1;
+ }
+} {79 6 6400 89 6 8100 74}
+do_test where-5.13 {
+ count {
+ SELECT * FROM t1 WHERE x IN (1,7) AND y NOT IN (6400,8100) ORDER BY 1;
+ }
+} {2 1 9 3 1 16 6}
+do_test where-5.14 {
+ count {
+ SELECT * FROM t1 WHERE x IN (1,7) AND y IN (9,10) ORDER BY 1;
+ }
+} {2 1 9 6}
+
+# This procedure executes the SQL. Then it checks the generated program
+# for the SQL and appends a "nosort" to the result if the program contains the
+# SortCallback opcode. If the program does not contain the SortCallback
+# opcode it appends "sort"
+#
+proc cksort {sql} {
+ set data [execsql $sql]
+ set prog [execsql "EXPLAIN $sql"]
+ if {[regexp SortCallback $prog]} {set x sort} {set x nosort}
+ lappend data $x
+ return $data
+}
+# Check out the logic that attempts to implement the ORDER BY clause
+# using an index rather than by sorting.
+#
+do_test where-6.1 {
+ execsql {
+ CREATE TABLE t3(a,b,c);
+ CREATE INDEX t3a ON t3(a);
+ CREATE INDEX t3bc ON t3(b,c);
+ CREATE INDEX t3acb ON t3(a,c,b);
+ INSERT INTO t3 SELECT w, 101-w, y FROM t1;
+ SELECT count(*), sum(a), sum(b), sum(c) FROM t3;
+ }
+} {100 5050 5050 348550}
+do_test where-6.2 {
+ cksort {
+ SELECT * FROM t3 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test where-6.3 {
+ cksort {
+ SELECT * FROM t3 ORDER BY a+1 LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 sort}
+do_test where-6.4 {
+ cksort {
+ SELECT * FROM t3 WHERE a<10 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test where-6.5 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 AND a<10 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test where-6.6 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 nosort}
+do_test where-6.7 {
+ cksort {
+ SELECT * FROM t3 WHERE b>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 sort}
+do_test where-6.8 {
+ cksort {
+ SELECT * FROM t3 WHERE a IN (3,5,7,1,9,4,2) ORDER BY a LIMIT 3
+ }
+} {1 100 4 2 99 9 3 98 16 sort}
+do_test where-6.9.1 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.9.2 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.9.3 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY c LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.9.4 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a DESC LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.9.5 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a DESC, c DESC LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.9.6 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY c DESC LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.9.7 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY c,a LIMIT 3
+ }
+} {1 100 4 sort}
+do_test where-6.9.8 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a DESC, c ASC LIMIT 3
+ }
+} {1 100 4 sort}
+do_test where-6.9.9 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a ASC, c DESC LIMIT 3
+ }
+} {1 100 4 sort}
+do_test where-6.10 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.11 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.12 {
+ cksort {
+ SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c,b LIMIT 3
+ }
+} {1 100 4 nosort}
+do_test where-6.13 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 ORDER BY a DESC LIMIT 3
+ }
+} {100 1 10201 99 2 10000 98 3 9801 nosort}
+do_test where-6.13.1 {
+ cksort {
+ SELECT * FROM t3 WHERE a>0 ORDER BY -a LIMIT 3
+ }
+} {100 1 10201 99 2 10000 98 3 9801 sort}
+do_test where-6.14 {
+ cksort {
+ SELECT * FROM t3 ORDER BY b LIMIT 3
+ }
+} {100 1 10201 99 2 10000 98 3 9801 nosort}
+do_test where-6.15 {
+ cksort {
+ SELECT t3.a, t1.x FROM t3, t1 WHERE t3.a=t1.w ORDER BY t3.a LIMIT 3
+ }
+} {1 0 2 1 3 1 nosort}
+do_test where-6.16 {
+ cksort {
+ SELECT t3.a, t1.x FROM t3, t1 WHERE t3.a=t1.w ORDER BY t1.x, t3.a LIMIT 3
+ }
+} {1 0 2 1 3 1 sort}
+do_test where-6.17 {
+ cksort {
+ SELECT y FROM t1 ORDER BY w COLLATE text LIMIT 3;
+ }
+} {4 121 10201 sort}
+do_test where-6.18 {
+ cksort {
+ SELECT y FROM t1 ORDER BY w COLLATE numeric LIMIT 3;
+ }
+} {4 9 16 sort}
+do_test where-6.19 {
+ cksort {
+ SELECT y FROM t1 ORDER BY w LIMIT 3;
+ }
+} {4 9 16 nosort}
+
+# Tests for reverse-order sorting.
+#
+do_test where-7.1 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 ORDER BY y;
+ }
+} {8 9 10 11 12 13 14 15 nosort}
+do_test where-7.2 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 ORDER BY y DESC;
+ }
+} {15 14 13 12 11 10 9 8 nosort}
+do_test where-7.3 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>100 ORDER BY y LIMIT 3;
+ }
+} {10 11 12 nosort}
+do_test where-7.4 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>100 ORDER BY y DESC LIMIT 3;
+ }
+} {15 14 13 nosort}
+do_test where-7.5 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>121 ORDER BY y DESC;
+ }
+} {15 14 13 12 11 nosort}
+do_test where-7.6 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=121 ORDER BY y DESC;
+ }
+} {15 14 13 12 11 10 nosort}
+do_test where-7.7 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<196 ORDER BY y DESC;
+ }
+} {12 11 10 nosort}
+do_test where-7.8 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196 ORDER BY y DESC;
+ }
+} {13 12 11 10 nosort}
+do_test where-7.9 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>121 AND y<=196 ORDER BY y DESC;
+ }
+} {13 12 11 nosort}
+do_test where-7.10 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>100 AND y<196 ORDER BY y DESC;
+ }
+} {12 11 10 nosort}
+do_test where-7.11 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<196 ORDER BY y;
+ }
+} {10 11 12 nosort}
+do_test where-7.12 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196 ORDER BY y;
+ }
+} {10 11 12 13 nosort}
+do_test where-7.13 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>121 AND y<=196 ORDER BY y;
+ }
+} {11 12 13 nosort}
+do_test where-7.14 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>100 AND y<196 ORDER BY y;
+ }
+} {10 11 12 nosort}
+do_test where-7.15 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y<81 ORDER BY y;
+ }
+} {nosort}
+do_test where-7.16 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y<=81 ORDER BY y;
+ }
+} {8 nosort}
+do_test where-7.17 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>256 ORDER BY y;
+ }
+} {nosort}
+do_test where-7.18 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=256 ORDER BY y;
+ }
+} {15 nosort}
+do_test where-7.19 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y<81 ORDER BY y DESC;
+ }
+} {nosort}
+do_test where-7.20 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y<=81 ORDER BY y DESC;
+ }
+} {8 nosort}
+do_test where-7.21 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>256 ORDER BY y DESC;
+ }
+} {nosort}
+do_test where-7.22 {
+ cksort {
+ SELECT w FROM t1 WHERE x=3 AND y>=256 ORDER BY y DESC;
+ }
+} {15 nosort}
+do_test where-7.23 {
+ cksort {
+ SELECT w FROM t1 WHERE x=0 AND y<4 ORDER BY y;
+ }
+} {nosort}
+do_test where-7.24 {
+ cksort {
+ SELECT w FROM t1 WHERE x=0 AND y<=4 ORDER BY y;
+ }
+} {1 nosort}
+do_test where-7.25 {
+ cksort {
+ SELECT w FROM t1 WHERE x=6 AND y>10201 ORDER BY y;
+ }
+} {nosort}
+do_test where-7.26 {
+ cksort {
+ SELECT w FROM t1 WHERE x=6 AND y>=10201 ORDER BY y;
+ }
+} {100 nosort}
+do_test where-7.27 {
+ cksort {
+ SELECT w FROM t1 WHERE x=0 AND y<4 ORDER BY y DESC;
+ }
+} {nosort}
+do_test where-7.28 {
+ cksort {
+ SELECT w FROM t1 WHERE x=0 AND y<=4 ORDER BY y DESC;
+ }
+} {1 nosort}
+do_test where-7.29 {
+ cksort {
+ SELECT w FROM t1 WHERE x=6 AND y>10201 ORDER BY y DESC;
+ }
+} {nosort}
+do_test where-7.30 {
+ cksort {
+ SELECT w FROM t1 WHERE x=6 AND y>=10201 ORDER BY y DESC;
+ }
+} {100 nosort}
+
+do_test where-8.1 {
+ execsql {
+ CREATE TABLE t4 AS SELECT * FROM t1;
+ CREATE INDEX i4xy ON t4(x,y);
+ }
+ cksort {
+ SELECT w FROM t4 WHERE x=4 and y<1000 ORDER BY y DESC limit 3;
+ }
+} {30 29 28 nosort}
+do_test where-8.2 {
+ execsql {
+ DELETE FROM t4;
+ }
+ cksort {
+ SELECT w FROM t4 WHERE x=4 and y<1000 ORDER BY y DESC limit 3;
+ }
+} {nosort}
+
+# Make sure searches with an index work with an empty table.
+#
+do_test where-9.1 {
+ execsql {
+ CREATE TABLE t5(x PRIMARY KEY);
+ SELECT * FROM t5 WHERE x<10;
+ }
+} {}
+do_test where-9.2 {
+ execsql {
+ SELECT * FROM t5 WHERE x<10 ORDER BY x DESC;
+ }
+} {}
+do_test where-9.3 {
+ execsql {
+ SELECT * FROM t5 WHERE x=10;
+ }
+} {}
+
+do_test where-10.1 {
+ execsql {
+ SELECT 1 WHERE abs(random())<0
+ }
+} {}
+do_test where-10.2 {
+ proc tclvar_func {vname} {return [set ::$vname]}
+ db function tclvar tclvar_func
+ set ::v1 0
+ execsql {
+ SELECT count(*) FROM t1 WHERE tclvar('v1');
+ }
+} {0}
+do_test where-10.3 {
+ set ::v1 1
+ execsql {
+ SELECT count(*) FROM t1 WHERE tclvar('v1');
+ }
+} {100}
+do_test where-10.4 {
+ set ::v1 1
+ proc tclvar_func {vname} {
+ upvar #0 $vname v
+ set v [expr {!$v}]
+ return $v
+ }
+ execsql {
+ SELECT count(*) FROM t1 WHERE tclvar('v1');
+ }
+} {50}
+
+integrity_check {where-99.0}
+
+finish_test
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/diffdb.c b/usr/src/cmd/svc/configd/sqlite/tool/diffdb.c
new file mode 100644
index 0000000000..8b94871fd5
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/diffdb.c
@@ -0,0 +1,47 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** A utility for printing the differences between two SQLite database files.
+*/
+#include <stdio.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+#define PAGESIZE 1024
+static int db1 = -1;
+static int db2 = -1;
+
+int main(int argc, char **argv){
+ int iPg;
+ unsigned char a1[PAGESIZE], a2[PAGESIZE];
+ if( argc!=3 ){
+ fprintf(stderr,"Usage: %s FILENAME FILENAME\n", argv[0]);
+ exit(1);
+ }
+ db1 = open(argv[1], O_RDONLY);
+ if( db1<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
+ exit(1);
+ }
+ db2 = open(argv[2], O_RDONLY);
+ if( db2<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[2]);
+ exit(1);
+ }
+ iPg = 1;
+ while( read(db1, a1, PAGESIZE)==PAGESIZE && read(db2,a2,PAGESIZE)==PAGESIZE ){
+ if( memcmp(a1,a2,PAGESIZE) ){
+ printf("Page %d\n", iPg);
+ }
+ iPg++;
+ }
+ printf("%d pages checked\n", iPg-1);
+ close(db1);
+ close(db2);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/lemon.c b/usr/src/cmd/svc/configd/sqlite/tool/lemon.c
new file mode 100644
index 0000000000..d0321b0f55
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/lemon.c
@@ -0,0 +1,4386 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** This file contains all sources (including headers) to the LEMON
+** LALR(1) parser generator. The sources have been combined into a
+** single file to make it easy to include LEMON in the source tree
+** and Makefile of another program.
+**
+** The author of this program disclaims copyright.
+*/
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdlib.h>
+
+#ifndef __WIN32__
+# if defined(_WIN32) || defined(WIN32)
+# define __WIN32__
+# endif
+#endif
+
+/* #define PRIVATE static */
+#define PRIVATE
+
+#ifdef TEST
+#define MAXRHS 5 /* Set low to exercise exception code */
+#else
+#define MAXRHS 1000
+#endif
+
+char *msort();
+extern void *malloc();
+
+/******** From the file "action.h" *************************************/
+struct action *Action_new();
+struct action *Action_sort();
+
+/********* From the file "assert.h" ************************************/
+void myassert();
+#ifndef NDEBUG
+# define assert(X) if(!(X))myassert(__FILE__,__LINE__)
+#else
+# define assert(X)
+#endif
+
+/********** From the file "build.h" ************************************/
+void FindRulePrecedences();
+void FindFirstSets();
+void FindStates();
+void FindLinks();
+void FindFollowSets();
+void FindActions();
+
+/********* From the file "configlist.h" *********************************/
+void Configlist_init(/* void */);
+struct config *Configlist_add(/* struct rule *, int */);
+struct config *Configlist_addbasis(/* struct rule *, int */);
+void Configlist_closure(/* void */);
+void Configlist_sort(/* void */);
+void Configlist_sortbasis(/* void */);
+struct config *Configlist_return(/* void */);
+struct config *Configlist_basis(/* void */);
+void Configlist_eat(/* struct config * */);
+void Configlist_reset(/* void */);
+
+/********* From the file "error.h" ***************************************/
+void ErrorMsg(const char *, int,const char *, ...);
+
+/****** From the file "option.h" ******************************************/
+struct s_options {
+ enum { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR,
+ OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR} type;
+ char *label;
+ char *arg;
+ char *message;
+};
+int OptInit(/* char**,struct s_options*,FILE* */);
+int OptNArgs(/* void */);
+char *OptArg(/* int */);
+void OptErr(/* int */);
+void OptPrint(/* void */);
+
+/******** From the file "parse.h" *****************************************/
+void Parse(/* struct lemon *lemp */);
+
+/********* From the file "plink.h" ***************************************/
+struct plink *Plink_new(/* void */);
+void Plink_add(/* struct plink **, struct config * */);
+void Plink_copy(/* struct plink **, struct plink * */);
+void Plink_delete(/* struct plink * */);
+
+/********** From the file "report.h" *************************************/
+void Reprint(/* struct lemon * */);
+void ReportOutput(/* struct lemon * */);
+void ReportTable(/* struct lemon * */);
+void ReportHeader(/* struct lemon * */);
+void CompressTables(/* struct lemon * */);
+
+/********** From the file "set.h" ****************************************/
+void SetSize(/* int N */); /* All sets will be of size N */
+char *SetNew(/* void */); /* A new set for element 0..N */
+void SetFree(/* char* */); /* Deallocate a set */
+
+int SetAdd(/* char*,int */); /* Add element to a set */
+int SetUnion(/* char *A,char *B */); /* A <- A U B, thru element N */
+
+#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */
+
+/********** From the file "struct.h" *************************************/
+/*
+** Principal data structures for the LEMON parser generator.
+*/
+
+typedef enum {B_FALSE=0, B_TRUE} Boolean;
+
+/* Symbols (terminals and nonterminals) of the grammar are stored
+** in the following: */
+struct symbol {
+ char *name; /* Name of the symbol */
+ int index; /* Index number for this symbol */
+ enum {
+ TERMINAL,
+ NONTERMINAL
+ } type; /* Symbols are all either TERMINALS or NTs */
+ struct rule *rule; /* Linked list of rules of this (if an NT) */
+ struct symbol *fallback; /* fallback token in case this token doesn't parse */
+ int prec; /* Precedence if defined (-1 otherwise) */
+ enum e_assoc {
+ LEFT,
+ RIGHT,
+ NONE,
+ UNK
+ } assoc; /* Associativity if predecence is defined */
+ char *firstset; /* First-set for all rules of this symbol */
+ Boolean lambda; /* True if NT and can generate an empty string */
+ char *destructor; /* Code which executes whenever this symbol is
+ ** popped from the stack during error processing */
+ int destructorln; /* Line number of destructor code */
+ char *datatype; /* The data type of information held by this
+ ** object. Only used if type==NONTERMINAL */
+ int dtnum; /* The data type number. In the parser, the value
+ ** stack is a union. The .yy%d element of this
+ ** union is the correct data type for this object */
+};
+
+/* Each production rule in the grammar is stored in the following
+** structure. */
+struct rule {
+ struct symbol *lhs; /* Left-hand side of the rule */
+ char *lhsalias; /* Alias for the LHS (NULL if none) */
+ int ruleline; /* Line number for the rule */
+ int nrhs; /* Number of RHS symbols */
+ struct symbol **rhs; /* The RHS symbols */
+ char **rhsalias; /* An alias for each RHS symbol (NULL if none) */
+ int line; /* Line number at which code begins */
+ char *code; /* The code executed when this rule is reduced */
+ struct symbol *precsym; /* Precedence symbol for this rule */
+ int index; /* An index number for this rule */
+ Boolean canReduce; /* True if this rule is ever reduced */
+ struct rule *nextlhs; /* Next rule with the same LHS */
+ struct rule *next; /* Next rule in the global list */
+};
+
+/* A configuration is a production rule of the grammar together with
+** a mark (dot) showing how much of that rule has been processed so far.
+** Configurations also contain a follow-set which is a list of terminal
+** symbols which are allowed to immediately follow the end of the rule.
+** Every configuration is recorded as an instance of the following: */
+struct config {
+ struct rule *rp; /* The rule upon which the configuration is based */
+ int dot; /* The parse point */
+ char *fws; /* Follow-set for this configuration only */
+ struct plink *fplp; /* Follow-set forward propagation links */
+ struct plink *bplp; /* Follow-set backwards propagation links */
+ struct state *stp; /* Pointer to state which contains this */
+ enum {
+ COMPLETE, /* The status is used during followset and */
+ INCOMPLETE /* shift computations */
+ } status;
+ struct config *next; /* Next configuration in the state */
+ struct config *bp; /* The next basis configuration */
+};
+
+/* Every shift or reduce operation is stored as one of the following */
+struct action {
+ struct symbol *sp; /* The look-ahead symbol */
+ enum e_action {
+ SHIFT,
+ ACCEPT,
+ REDUCE,
+ ERROR,
+ CONFLICT, /* Was a reduce, but part of a conflict */
+ SH_RESOLVED, /* Was a shift. Precedence resolved conflict */
+ RD_RESOLVED, /* Was reduce. Precedence resolved conflict */
+ NOT_USED /* Deleted by compression */
+ } type;
+ union {
+ struct state *stp; /* The new state, if a shift */
+ struct rule *rp; /* The rule, if a reduce */
+ } x;
+ struct action *next; /* Next action for this state */
+ struct action *collide; /* Next action with the same hash */
+};
+
+/* Each state of the generated parser's finite state machine
+** is encoded as an instance of the following structure. */
+struct state {
+ struct config *bp; /* The basis configurations for this state */
+ struct config *cfp; /* All configurations in this set */
+ int index; /* Sequencial number for this state */
+ struct action *ap; /* Array of actions for this state */
+ int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */
+ int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */
+ int iDflt; /* Default action */
+};
+#define NO_OFFSET (-2147483647)
+
+/* A followset propagation link indicates that the contents of one
+** configuration followset should be propagated to another whenever
+** the first changes. */
+struct plink {
+ struct config *cfp; /* The configuration to which linked */
+ struct plink *next; /* The next propagate link */
+};
+
+/* The state vector for the entire parser generator is recorded as
+** follows. (LEMON uses no global variables and makes little use of
+** static variables. Fields in the following structure can be thought
+** of as begin global variables in the program.) */
+struct lemon {
+ struct state **sorted; /* Table of states sorted by state number */
+ struct rule *rule; /* List of all rules */
+ int nstate; /* Number of states */
+ int nrule; /* Number of rules */
+ int nsymbol; /* Number of terminal and nonterminal symbols */
+ int nterminal; /* Number of terminal symbols */
+ struct symbol **symbols; /* Sorted array of pointers to symbols */
+ int errorcnt; /* Number of errors */
+ struct symbol *errsym; /* The error symbol */
+ char *name; /* Name of the generated parser */
+ char *arg; /* Declaration of the 3th argument to parser */
+ char *tokentype; /* Type of terminal symbols in the parser stack */
+ char *vartype; /* The default type of non-terminal symbols */
+ char *start; /* Name of the start symbol for the grammar */
+ char *stacksize; /* Size of the parser stack */
+ char *include; /* Code to put at the start of the C file */
+ int includeln; /* Line number for start of include code */
+ char *error; /* Code to execute when an error is seen */
+ int errorln; /* Line number for start of error code */
+ char *overflow; /* Code to execute on a stack overflow */
+ int overflowln; /* Line number for start of overflow code */
+ char *failure; /* Code to execute on parser failure */
+ int failureln; /* Line number for start of failure code */
+ char *accept; /* Code to execute when the parser excepts */
+ int acceptln; /* Line number for the start of accept code */
+ char *extracode; /* Code appended to the generated file */
+ int extracodeln; /* Line number for the start of the extra code */
+ char *tokendest; /* Code to execute to destroy token data */
+ int tokendestln; /* Line number for token destroyer code */
+ char *vardest; /* Code for the default non-terminal destructor */
+ int vardestln; /* Line number for default non-term destructor code*/
+ char *filename; /* Name of the input file */
+ char *outname; /* Name of the current output file */
+ char *tokenprefix; /* A prefix added to token names in the .h file */
+ int nconflict; /* Number of parsing conflicts */
+ int tablesize; /* Size of the parse tables */
+ int basisflag; /* Print only basis configurations */
+ int has_fallback; /* True if any %fallback is seen in the grammer */
+ char *argv0; /* Name of the program */
+};
+
+#define MemoryCheck(X) if((X)==0){ \
+ extern void memory_error(); \
+ memory_error(); \
+}
+
+/**************** From the file "table.h" *********************************/
+/*
+** All code in this file has been automatically generated
+** from a specification in the file
+** "table.q"
+** by the associative array code building program "aagen".
+** Do not edit this file! Instead, edit the specification
+** file, then rerun aagen.
+*/
+/*
+** Code for processing tables in the LEMON parser generator.
+*/
+
+/* Routines for handling a strings */
+
+char *Strsafe();
+
+void Strsafe_init(/* void */);
+int Strsafe_insert(/* char * */);
+char *Strsafe_find(/* char * */);
+
+/* Routines for handling symbols of the grammar */
+
+struct symbol *Symbol_new();
+int Symbolcmpp(/* struct symbol **, struct symbol ** */);
+void Symbol_init(/* void */);
+int Symbol_insert(/* struct symbol *, char * */);
+struct symbol *Symbol_find(/* char * */);
+struct symbol *Symbol_Nth(/* int */);
+int Symbol_count(/* */);
+struct symbol **Symbol_arrayof(/* */);
+
+/* Routines to manage the state table */
+
+int Configcmp(/* struct config *, struct config * */);
+struct state *State_new();
+void State_init(/* void */);
+int State_insert(/* struct state *, struct config * */);
+struct state *State_find(/* struct config * */);
+struct state **State_arrayof(/* */);
+
+/* Routines used for efficiency in Configlist_add */
+
+void Configtable_init(/* void */);
+int Configtable_insert(/* struct config * */);
+struct config *Configtable_find(/* struct config * */);
+void Configtable_clear(/* int(*)(struct config *) */);
+/****************** From the file "action.c" *******************************/
+/*
+** Routines processing parser actions in the LEMON parser generator.
+*/
+
+/* Allocate a new parser action */
+struct action *Action_new(){
+ static struct action *freelist = 0;
+ struct action *new;
+
+ if( freelist==0 ){
+ int i;
+ int amt = 100;
+ freelist = (struct action *)malloc( sizeof(struct action)*amt );
+ if( freelist==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new parser action.");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) freelist[i].next = &freelist[i+1];
+ freelist[amt-1].next = 0;
+ }
+ new = freelist;
+ freelist = freelist->next;
+ return new;
+}
+
+/* Compare two actions */
+static int actioncmp(ap1,ap2)
+struct action *ap1;
+struct action *ap2;
+{
+ int rc;
+ rc = ap1->sp->index - ap2->sp->index;
+ if( rc==0 ) rc = (int)ap1->type - (int)ap2->type;
+ if( rc==0 ){
+ assert( ap1->type==REDUCE || ap1->type==RD_RESOLVED || ap1->type==CONFLICT);
+ assert( ap2->type==REDUCE || ap2->type==RD_RESOLVED || ap2->type==CONFLICT);
+ rc = ap1->x.rp->index - ap2->x.rp->index;
+ }
+ return rc;
+}
+
+/* Sort parser actions */
+struct action *Action_sort(ap)
+struct action *ap;
+{
+ ap = (struct action *)msort((char *)ap,(char **)&ap->next,actioncmp);
+ return ap;
+}
+
+void Action_add(app,type,sp,arg)
+struct action **app;
+enum e_action type;
+struct symbol *sp;
+char *arg;
+{
+ struct action *new;
+ new = Action_new();
+ new->next = *app;
+ *app = new;
+ new->type = type;
+ new->sp = sp;
+ if( type==SHIFT ){
+ new->x.stp = (struct state *)arg;
+ }else{
+ new->x.rp = (struct rule *)arg;
+ }
+}
+/********************** New code to implement the "acttab" module ***********/
+/*
+** This module implements routines use to construct the yy_action[] table.
+*/
+
+/*
+** The state of the yy_action table under construction is an instance of
+** the following structure
+*/
+typedef struct acttab acttab;
+struct acttab {
+ int nAction; /* Number of used slots in aAction[] */
+ int nActionAlloc; /* Slots allocated for aAction[] */
+ struct {
+ int lookahead; /* Value of the lookahead token */
+ int action; /* Action to take on the given lookahead */
+ } *aAction, /* The yy_action[] table under construction */
+ *aLookahead; /* A single new transaction set */
+ int mnLookahead; /* Minimum aLookahead[].lookahead */
+ int mnAction; /* Action associated with mnLookahead */
+ int mxLookahead; /* Maximum aLookahead[].lookahead */
+ int nLookahead; /* Used slots in aLookahead[] */
+ int nLookaheadAlloc; /* Slots allocated in aLookahead[] */
+};
+
+/* Return the number of entries in the yy_action table */
+#define acttab_size(X) ((X)->nAction)
+
+/* The value for the N-th entry in yy_action */
+#define acttab_yyaction(X,N) ((X)->aAction[N].action)
+
+/* The value for the N-th entry in yy_lookahead */
+#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead)
+
+/* Free all memory associated with the given acttab */
+void acttab_free(acttab *p){
+ free( p->aAction );
+ free( p->aLookahead );
+ free( p );
+}
+
+/* Allocate a new acttab structure */
+acttab *acttab_alloc(void){
+ acttab *p = malloc( sizeof(*p) );
+ if( p==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new acttab.");
+ exit(1);
+ }
+ memset(p, 0, sizeof(*p));
+ return p;
+}
+
+/* Add a new action to the current transaction set
+*/
+void acttab_action(acttab *p, int lookahead, int action){
+ if( p->nLookahead>=p->nLookaheadAlloc ){
+ p->nLookaheadAlloc += 25;
+ p->aLookahead = realloc( p->aLookahead,
+ sizeof(p->aLookahead[0])*p->nLookaheadAlloc );
+ if( p->aLookahead==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ }
+ if( p->nLookahead==0 ){
+ p->mxLookahead = lookahead;
+ p->mnLookahead = lookahead;
+ p->mnAction = action;
+ }else{
+ if( p->mxLookahead<lookahead ) p->mxLookahead = lookahead;
+ if( p->mnLookahead>lookahead ){
+ p->mnLookahead = lookahead;
+ p->mnAction = action;
+ }
+ }
+ p->aLookahead[p->nLookahead].lookahead = lookahead;
+ p->aLookahead[p->nLookahead].action = action;
+ p->nLookahead++;
+}
+
+/*
+** Add the transaction set built up with prior calls to acttab_action()
+** into the current action table. Then reset the transaction set back
+** to an empty set in preparation for a new round of acttab_action() calls.
+**
+** Return the offset into the action table of the new transaction.
+*/
+int acttab_insert(acttab *p){
+ int i, j, k, n;
+ assert( p->nLookahead>0 );
+
+ /* Make sure we have enough space to hold the expanded action table
+ ** in the worst case. The worst case occurs if the transaction set
+ ** must be appended to the current action table
+ */
+ n = p->mxLookahead + 1;
+ if( p->nAction + n >= p->nActionAlloc ){
+ int oldAlloc = p->nActionAlloc;
+ p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20;
+ p->aAction = realloc( p->aAction,
+ sizeof(p->aAction[0])*p->nActionAlloc);
+ if( p->aAction==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ for(i=oldAlloc; i<p->nActionAlloc; i++){
+ p->aAction[i].lookahead = -1;
+ p->aAction[i].action = -1;
+ }
+ }
+
+ /* Scan the existing action table looking for an offset where we can
+ ** insert the current transaction set. Fall out of the loop when that
+ ** offset is found. In the worst case, we fall out of the loop when
+ ** i reaches p->nAction, which means we append the new transaction set.
+ **
+ ** i is the index in p->aAction[] where p->mnLookahead is inserted.
+ */
+ for(i=0; i<p->nAction+p->mnLookahead; i++){
+ if( p->aAction[i].lookahead<0 ){
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ if( k<0 ) break;
+ if( p->aAction[k].lookahead>=0 ) break;
+ }
+ if( j<p->nLookahead ) continue;
+ for(j=0; j<p->nAction; j++){
+ if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break;
+ }
+ if( j==p->nAction ){
+ break; /* Fits in empty slots */
+ }
+ }else if( p->aAction[i].lookahead==p->mnLookahead ){
+ if( p->aAction[i].action!=p->mnAction ) continue;
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ if( k<0 || k>=p->nAction ) break;
+ if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break;
+ if( p->aLookahead[j].action!=p->aAction[k].action ) break;
+ }
+ if( j<p->nLookahead ) continue;
+ n = 0;
+ for(j=0; j<p->nAction; j++){
+ if( p->aAction[j].lookahead<0 ) continue;
+ if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++;
+ }
+ if( n==p->nLookahead ){
+ break; /* Same as a prior transaction set */
+ }
+ }
+ }
+ /* Insert transaction set at index i. */
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ p->aAction[k] = p->aLookahead[j];
+ if( k>=p->nAction ) p->nAction = k+1;
+ }
+ p->nLookahead = 0;
+
+ /* Return the offset that is added to the lookahead in order to get the
+ ** index into yy_action of the action */
+ return i - p->mnLookahead;
+}
+
+/********************** From the file "assert.c" ****************************/
+/*
+** A more efficient way of handling assertions.
+*/
+void myassert(file,line)
+char *file;
+int line;
+{
+ fprintf(stderr,"Assertion failed on line %d of file \"%s\"\n",line,file);
+ exit(1);
+}
+/********************** From the file "build.c" *****************************/
+/*
+** Routines to construction the finite state machine for the LEMON
+** parser generator.
+*/
+
+/* Find a precedence symbol of every rule in the grammar.
+**
+** Those rules which have a precedence symbol coded in the input
+** grammar using the "[symbol]" construct will already have the
+** rp->precsym field filled. Other rules take as their precedence
+** symbol the first RHS symbol with a defined precedence. If there
+** are not RHS symbols with a defined precedence, the precedence
+** symbol field is left blank.
+*/
+void FindRulePrecedences(xp)
+struct lemon *xp;
+{
+ struct rule *rp;
+ for(rp=xp->rule; rp; rp=rp->next){
+ if( rp->precsym==0 ){
+ int i;
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhs[i]->prec>=0 ){
+ rp->precsym = rp->rhs[i];
+ break;
+ }
+ }
+ }
+ }
+ return;
+}
+
+/* Find all nonterminals which will generate the empty string.
+** Then go back and compute the first sets of every nonterminal.
+** The first set is the set of all terminal symbols which can begin
+** a string generated by that nonterminal.
+*/
+void FindFirstSets(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct rule *rp;
+ int progress;
+
+ for(i=0; i<lemp->nsymbol; i++){
+ lemp->symbols[i]->lambda = B_FALSE;
+ }
+ for(i=lemp->nterminal; i<lemp->nsymbol; i++){
+ lemp->symbols[i]->firstset = SetNew();
+ }
+
+ /* First compute all lambdas */
+ do{
+ progress = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->lhs->lambda ) continue;
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhs[i]->lambda==B_FALSE ) break;
+ }
+ if( i==rp->nrhs ){
+ rp->lhs->lambda = B_TRUE;
+ progress = 1;
+ }
+ }
+ }while( progress );
+
+ /* Now compute all first sets */
+ do{
+ struct symbol *s1, *s2;
+ progress = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ s1 = rp->lhs;
+ for(i=0; i<rp->nrhs; i++){
+ s2 = rp->rhs[i];
+ if( s2->type==TERMINAL ){
+ progress += SetAdd(s1->firstset,s2->index);
+ break;
+ }else if( s1==s2 ){
+ if( s1->lambda==B_FALSE ) break;
+ }else{
+ progress += SetUnion(s1->firstset,s2->firstset);
+ if( s2->lambda==B_FALSE ) break;
+ }
+ }
+ }
+ }while( progress );
+ return;
+}
+
+/* Compute all LR(0) states for the grammar. Links
+** are added to between some states so that the LR(1) follow sets
+** can be computed later.
+*/
+PRIVATE struct state *getstate(/* struct lemon * */); /* forward reference */
+void FindStates(lemp)
+struct lemon *lemp;
+{
+ struct symbol *sp;
+ struct rule *rp;
+
+ Configlist_init();
+
+ /* Find the start symbol */
+ if( lemp->start ){
+ sp = Symbol_find(lemp->start);
+ if( sp==0 ){
+ ErrorMsg(lemp->filename,0,
+"The specified start symbol \"%s\" is not \
+in a nonterminal of the grammar. \"%s\" will be used as the start \
+symbol instead.",lemp->start,lemp->rule->lhs->name);
+ lemp->errorcnt++;
+ sp = lemp->rule->lhs;
+ }
+ }else{
+ sp = lemp->rule->lhs;
+ }
+
+ /* Make sure the start symbol doesn't occur on the right-hand side of
+ ** any rule. Report an error if it does. (YACC would generate a new
+ ** start symbol in this case.) */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ int i;
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhs[i]==sp ){
+ ErrorMsg(lemp->filename,0,
+"The start symbol \"%s\" occurs on the \
+right-hand side of a rule. This will result in a parser which \
+does not work properly.",sp->name);
+ lemp->errorcnt++;
+ }
+ }
+ }
+
+ /* The basis configuration set for the first state
+ ** is all rules which have the start symbol as their
+ ** left-hand side */
+ for(rp=sp->rule; rp; rp=rp->nextlhs){
+ struct config *newcfp;
+ newcfp = Configlist_addbasis(rp,0);
+ SetAdd(newcfp->fws,0);
+ }
+
+ /* Compute the first state. All other states will be
+ ** computed automatically during the computation of the first one.
+ ** The returned pointer to the first state is not used. */
+ (void)getstate(lemp);
+ return;
+}
+
+/* Return a pointer to a state which is described by the configuration
+** list which has been built from calls to Configlist_add.
+*/
+PRIVATE void buildshifts(/* struct lemon *, struct state * */); /* Forwd ref */
+PRIVATE struct state *getstate(lemp)
+struct lemon *lemp;
+{
+ struct config *cfp, *bp;
+ struct state *stp;
+
+ /* Extract the sorted basis of the new state. The basis was constructed
+ ** by prior calls to "Configlist_addbasis()". */
+ Configlist_sortbasis();
+ bp = Configlist_basis();
+
+ /* Get a state with the same basis */
+ stp = State_find(bp);
+ if( stp ){
+ /* A state with the same basis already exists! Copy all the follow-set
+ ** propagation links from the state under construction into the
+ ** preexisting state, then return a pointer to the preexisting state */
+ struct config *x, *y;
+ for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){
+ Plink_copy(&y->bplp,x->bplp);
+ Plink_delete(x->fplp);
+ x->fplp = x->bplp = 0;
+ }
+ cfp = Configlist_return();
+ Configlist_eat(cfp);
+ }else{
+ /* This really is a new state. Construct all the details */
+ Configlist_closure(lemp); /* Compute the configuration closure */
+ Configlist_sort(); /* Sort the configuration closure */
+ cfp = Configlist_return(); /* Get a pointer to the config list */
+ stp = State_new(); /* A new state structure */
+ MemoryCheck(stp);
+ stp->bp = bp; /* Remember the configuration basis */
+ stp->cfp = cfp; /* Remember the configuration closure */
+ stp->index = lemp->nstate++; /* Every state gets a sequence number */
+ stp->ap = 0; /* No actions, yet. */
+ State_insert(stp,stp->bp); /* Add to the state table */
+ buildshifts(lemp,stp); /* Recursively compute successor states */
+ }
+ return stp;
+}
+
+/* Construct all successor states to the given state. A "successor"
+** state is any state which can be reached by a shift action.
+*/
+PRIVATE void buildshifts(lemp,stp)
+struct lemon *lemp;
+struct state *stp; /* The state from which successors are computed */
+{
+ struct config *cfp; /* For looping thru the config closure of "stp" */
+ struct config *bcfp; /* For the inner loop on config closure of "stp" */
+ struct config *new; /* */
+ struct symbol *sp; /* Symbol following the dot in configuration "cfp" */
+ struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */
+ struct state *newstp; /* A pointer to a successor state */
+
+ /* Each configuration becomes complete after it contibutes to a successor
+ ** state. Initially, all configurations are incomplete */
+ for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE;
+
+ /* Loop through all configurations of the state "stp" */
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */
+ if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */
+ Configlist_reset(); /* Reset the new config set */
+ sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */
+
+ /* For every configuration in the state "stp" which has the symbol "sp"
+ ** following its dot, add the same configuration to the basis set under
+ ** construction but with the dot shifted one symbol to the right. */
+ for(bcfp=cfp; bcfp; bcfp=bcfp->next){
+ if( bcfp->status==COMPLETE ) continue; /* Already used */
+ if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */
+ bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */
+ if( bsp!=sp ) continue; /* Must be same as for "cfp" */
+ bcfp->status = COMPLETE; /* Mark this config as used */
+ new = Configlist_addbasis(bcfp->rp,bcfp->dot+1);
+ Plink_add(&new->bplp,bcfp);
+ }
+
+ /* Get a pointer to the state described by the basis configuration set
+ ** constructed in the preceding loop */
+ newstp = getstate(lemp);
+
+ /* The state "newstp" is reached from the state "stp" by a shift action
+ ** on the symbol "sp" */
+ Action_add(&stp->ap,SHIFT,sp,(char *)newstp);
+ }
+}
+
+/*
+** Construct the propagation links
+*/
+void FindLinks(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct config *cfp, *other;
+ struct state *stp;
+ struct plink *plp;
+
+ /* Housekeeping detail:
+ ** Add to every propagate link a pointer back to the state to
+ ** which the link is attached. */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ cfp->stp = stp;
+ }
+ }
+
+ /* Convert all backlinks into forward links. Only the forward
+ ** links are used in the follow-set computation. */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ for(plp=cfp->bplp; plp; plp=plp->next){
+ other = plp->cfp;
+ Plink_add(&other->fplp,cfp);
+ }
+ }
+ }
+}
+
+/* Compute all followsets.
+**
+** A followset is the set of all symbols which can come immediately
+** after a configuration.
+*/
+void FindFollowSets(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct config *cfp;
+ struct plink *plp;
+ int progress;
+ int change;
+
+ for(i=0; i<lemp->nstate; i++){
+ for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
+ cfp->status = INCOMPLETE;
+ }
+ }
+
+ do{
+ progress = 0;
+ for(i=0; i<lemp->nstate; i++){
+ for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
+ if( cfp->status==COMPLETE ) continue;
+ for(plp=cfp->fplp; plp; plp=plp->next){
+ change = SetUnion(plp->cfp->fws,cfp->fws);
+ if( change ){
+ plp->cfp->status = INCOMPLETE;
+ progress = 1;
+ }
+ }
+ cfp->status = COMPLETE;
+ }
+ }
+ }while( progress );
+}
+
+static int resolve_conflict();
+
+/* Compute the reduce actions, and resolve conflicts.
+*/
+void FindActions(lemp)
+struct lemon *lemp;
+{
+ int i,j;
+ struct config *cfp;
+ struct state *stp;
+ struct symbol *sp;
+ struct rule *rp;
+
+ /* Add all of the reduce actions
+ ** A reduce action is added for each element of the followset of
+ ** a configuration which has its dot at the extreme right.
+ */
+ for(i=0; i<lemp->nstate; i++){ /* Loop over all states */
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */
+ if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */
+ for(j=0; j<lemp->nterminal; j++){
+ if( SetFind(cfp->fws,j) ){
+ /* Add a reduce action to the state "stp" which will reduce by the
+ ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */
+ Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp);
+ }
+ }
+ }
+ }
+ }
+
+ /* Add the accepting token */
+ if( lemp->start ){
+ sp = Symbol_find(lemp->start);
+ if( sp==0 ) sp = lemp->rule->lhs;
+ }else{
+ sp = lemp->rule->lhs;
+ }
+ /* Add to the first state (which is always the starting state of the
+ ** finite state machine) an action to ACCEPT if the lookahead is the
+ ** start nonterminal. */
+ Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0);
+
+ /* Resolve conflicts */
+ for(i=0; i<lemp->nstate; i++){
+ struct action *ap, *nap;
+ struct state *stp;
+ stp = lemp->sorted[i];
+ assert( stp->ap );
+ stp->ap = Action_sort(stp->ap);
+ for(ap=stp->ap; ap && ap->next; ap=ap->next){
+ for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){
+ /* The two actions "ap" and "nap" have the same lookahead.
+ ** Figure out which one should be used */
+ lemp->nconflict += resolve_conflict(ap,nap,lemp->errsym);
+ }
+ }
+ }
+
+ /* Report an error for each rule that can never be reduced. */
+ for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = B_FALSE;
+ for(i=0; i<lemp->nstate; i++){
+ struct action *ap;
+ for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE ) ap->x.rp->canReduce = B_TRUE;
+ }
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->canReduce ) continue;
+ ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n");
+ lemp->errorcnt++;
+ }
+}
+
+/* Resolve a conflict between the two given actions. If the
+** conflict can't be resolve, return non-zero.
+**
+** NO LONGER TRUE:
+** To resolve a conflict, first look to see if either action
+** is on an error rule. In that case, take the action which
+** is not associated with the error rule. If neither or both
+** actions are associated with an error rule, then try to
+** use precedence to resolve the conflict.
+**
+** If either action is a SHIFT, then it must be apx. This
+** function won't work if apx->type==REDUCE and apy->type==SHIFT.
+*/
+static int resolve_conflict(apx,apy,errsym)
+struct action *apx;
+struct action *apy;
+struct symbol *errsym; /* The error symbol (if defined. NULL otherwise) */
+{
+ struct symbol *spx, *spy;
+ int errcnt = 0;
+ assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */
+ if( apx->type==SHIFT && apy->type==REDUCE ){
+ spx = apx->sp;
+ spy = apy->x.rp->precsym;
+ if( spy==0 || spx->prec<0 || spy->prec<0 ){
+ /* Not enough precedence information. */
+ apy->type = CONFLICT;
+ errcnt++;
+ }else if( spx->prec>spy->prec ){ /* Lower precedence wins */
+ apy->type = RD_RESOLVED;
+ }else if( spx->prec<spy->prec ){
+ apx->type = SH_RESOLVED;
+ }else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */
+ apy->type = RD_RESOLVED; /* associativity */
+ }else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */
+ apx->type = SH_RESOLVED;
+ }else{
+ assert( spx->prec==spy->prec && spx->assoc==NONE );
+ apy->type = CONFLICT;
+ errcnt++;
+ }
+ }else if( apx->type==REDUCE && apy->type==REDUCE ){
+ spx = apx->x.rp->precsym;
+ spy = apy->x.rp->precsym;
+ if( spx==0 || spy==0 || spx->prec<0 ||
+ spy->prec<0 || spx->prec==spy->prec ){
+ apy->type = CONFLICT;
+ errcnt++;
+ }else if( spx->prec>spy->prec ){
+ apy->type = RD_RESOLVED;
+ }else if( spx->prec<spy->prec ){
+ apx->type = RD_RESOLVED;
+ }
+ }else{
+ assert(
+ apx->type==SH_RESOLVED ||
+ apx->type==RD_RESOLVED ||
+ apx->type==CONFLICT ||
+ apy->type==SH_RESOLVED ||
+ apy->type==RD_RESOLVED ||
+ apy->type==CONFLICT
+ );
+ /* The REDUCE/SHIFT case cannot happen because SHIFTs come before
+ ** REDUCEs on the list. If we reach this point it must be because
+ ** the parser conflict had already been resolved. */
+ }
+ return errcnt;
+}
+/********************* From the file "configlist.c" *************************/
+/*
+** Routines to processing a configuration list and building a state
+** in the LEMON parser generator.
+*/
+
+static struct config *freelist = 0; /* List of free configurations */
+static struct config *current = 0; /* Top of list of configurations */
+static struct config **currentend = 0; /* Last on list of configs */
+static struct config *basis = 0; /* Top of list of basis configs */
+static struct config **basisend = 0; /* End of list of basis configs */
+
+/* Return a pointer to a new configuration */
+PRIVATE struct config *newconfig(){
+ struct config *new;
+ if( freelist==0 ){
+ int i;
+ int amt = 3;
+ freelist = (struct config *)malloc( sizeof(struct config)*amt );
+ if( freelist==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new configuration.");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) freelist[i].next = &freelist[i+1];
+ freelist[amt-1].next = 0;
+ }
+ new = freelist;
+ freelist = freelist->next;
+ return new;
+}
+
+/* The configuration "old" is no longer used */
+PRIVATE void deleteconfig(old)
+struct config *old;
+{
+ old->next = freelist;
+ freelist = old;
+}
+
+/* Initialized the configuration list builder */
+void Configlist_init(){
+ current = 0;
+ currentend = &current;
+ basis = 0;
+ basisend = &basis;
+ Configtable_init();
+ return;
+}
+
+/* Initialized the configuration list builder */
+void Configlist_reset(){
+ current = 0;
+ currentend = &current;
+ basis = 0;
+ basisend = &basis;
+ Configtable_clear(0);
+ return;
+}
+
+/* Add another configuration to the configuration list */
+struct config *Configlist_add(rp,dot)
+struct rule *rp; /* The rule */
+int dot; /* Index into the RHS of the rule where the dot goes */
+{
+ struct config *cfp, model;
+
+ assert( currentend!=0 );
+ model.rp = rp;
+ model.dot = dot;
+ cfp = Configtable_find(&model);
+ if( cfp==0 ){
+ cfp = newconfig();
+ cfp->rp = rp;
+ cfp->dot = dot;
+ cfp->fws = SetNew();
+ cfp->stp = 0;
+ cfp->fplp = cfp->bplp = 0;
+ cfp->next = 0;
+ cfp->bp = 0;
+ *currentend = cfp;
+ currentend = &cfp->next;
+ Configtable_insert(cfp);
+ }
+ return cfp;
+}
+
+/* Add a basis configuration to the configuration list */
+struct config *Configlist_addbasis(rp,dot)
+struct rule *rp;
+int dot;
+{
+ struct config *cfp, model;
+
+ assert( basisend!=0 );
+ assert( currentend!=0 );
+ model.rp = rp;
+ model.dot = dot;
+ cfp = Configtable_find(&model);
+ if( cfp==0 ){
+ cfp = newconfig();
+ cfp->rp = rp;
+ cfp->dot = dot;
+ cfp->fws = SetNew();
+ cfp->stp = 0;
+ cfp->fplp = cfp->bplp = 0;
+ cfp->next = 0;
+ cfp->bp = 0;
+ *currentend = cfp;
+ currentend = &cfp->next;
+ *basisend = cfp;
+ basisend = &cfp->bp;
+ Configtable_insert(cfp);
+ }
+ return cfp;
+}
+
+/* Compute the closure of the configuration list */
+void Configlist_closure(lemp)
+struct lemon *lemp;
+{
+ struct config *cfp, *newcfp;
+ struct rule *rp, *newrp;
+ struct symbol *sp, *xsp;
+ int i, dot;
+
+ assert( currentend!=0 );
+ for(cfp=current; cfp; cfp=cfp->next){
+ rp = cfp->rp;
+ dot = cfp->dot;
+ if( dot>=rp->nrhs ) continue;
+ sp = rp->rhs[dot];
+ if( sp->type==NONTERMINAL ){
+ if( sp->rule==0 && sp!=lemp->errsym ){
+ ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.",
+ sp->name);
+ lemp->errorcnt++;
+ }
+ for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){
+ newcfp = Configlist_add(newrp,0);
+ for(i=dot+1; i<rp->nrhs; i++){
+ xsp = rp->rhs[i];
+ if( xsp->type==TERMINAL ){
+ SetAdd(newcfp->fws,xsp->index);
+ break;
+ }else{
+ SetUnion(newcfp->fws,xsp->firstset);
+ if( xsp->lambda==B_FALSE ) break;
+ }
+ }
+ if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp);
+ }
+ }
+ }
+ return;
+}
+
+/* Sort the configuration list */
+void Configlist_sort(){
+ current = (struct config *)msort((char *)current,(char **)&(current->next),Configcmp);
+ currentend = 0;
+ return;
+}
+
+/* Sort the basis configuration list */
+void Configlist_sortbasis(){
+ basis = (struct config *)msort((char *)current,(char **)&(current->bp),Configcmp);
+ basisend = 0;
+ return;
+}
+
+/* Return a pointer to the head of the configuration list and
+** reset the list */
+struct config *Configlist_return(){
+ struct config *old;
+ old = current;
+ current = 0;
+ currentend = 0;
+ return old;
+}
+
+/* Return a pointer to the head of the configuration list and
+** reset the list */
+struct config *Configlist_basis(){
+ struct config *old;
+ old = basis;
+ basis = 0;
+ basisend = 0;
+ return old;
+}
+
+/* Free all elements of the given configuration list */
+void Configlist_eat(cfp)
+struct config *cfp;
+{
+ struct config *nextcfp;
+ for(; cfp; cfp=nextcfp){
+ nextcfp = cfp->next;
+ assert( cfp->fplp==0 );
+ assert( cfp->bplp==0 );
+ if( cfp->fws ) SetFree(cfp->fws);
+ deleteconfig(cfp);
+ }
+ return;
+}
+/***************** From the file "error.c" *********************************/
+/*
+** Code for printing error message.
+*/
+
+/* Find a good place to break "msg" so that its length is at least "min"
+** but no more than "max". Make the point as close to max as possible.
+*/
+static int findbreak(msg,min,max)
+char *msg;
+int min;
+int max;
+{
+ int i,spot;
+ char c;
+ for(i=spot=min; i<=max; i++){
+ c = msg[i];
+ if( c=='\t' ) msg[i] = ' ';
+ if( c=='\n' ){ msg[i] = ' '; spot = i; break; }
+ if( c==0 ){ spot = i; break; }
+ if( c=='-' && i<max-1 ) spot = i+1;
+ if( c==' ' ) spot = i;
+ }
+ return spot;
+}
+
+/*
+** The error message is split across multiple lines if necessary. The
+** splits occur at a space, if there is a space available near the end
+** of the line.
+*/
+#define ERRMSGSIZE 10000 /* Hope this is big enough. No way to error check */
+#define LINEWIDTH 79 /* Max width of any output line */
+#define PREFIXLIMIT 30 /* Max width of the prefix on each line */
+void ErrorMsg(const char *filename, int lineno, const char *format, ...){
+ char errmsg[ERRMSGSIZE];
+ char prefix[PREFIXLIMIT+10];
+ int errmsgsize;
+ int prefixsize;
+ int availablewidth;
+ va_list ap;
+ int end, restart, base;
+
+ va_start(ap, format);
+ /* Prepare a prefix to be prepended to every output line */
+ if( lineno>0 ){
+ sprintf(prefix,"%.*s:%d: ",PREFIXLIMIT-10,filename,lineno);
+ }else{
+ sprintf(prefix,"%.*s: ",PREFIXLIMIT-10,filename);
+ }
+ prefixsize = strlen(prefix);
+ availablewidth = LINEWIDTH - prefixsize;
+
+ /* Generate the error message */
+ vsprintf(errmsg,format,ap);
+ va_end(ap);
+ errmsgsize = strlen(errmsg);
+ /* Remove trailing '\n's from the error message. */
+ while( errmsgsize>0 && errmsg[errmsgsize-1]=='\n' ){
+ errmsg[--errmsgsize] = 0;
+ }
+
+ /* Print the error message */
+ base = 0;
+ while( errmsg[base]!=0 ){
+ end = restart = findbreak(&errmsg[base],0,availablewidth);
+ restart += base;
+ while( errmsg[restart]==' ' ) restart++;
+ fprintf(stdout,"%s%.*s\n",prefix,end,&errmsg[base]);
+ base = restart;
+ }
+}
+/**************** From the file "main.c" ************************************/
+/*
+** Main program file for the LEMON parser generator.
+*/
+
+/* Report an out-of-memory condition and abort. This function
+** is used mostly by the "MemoryCheck" macro in struct.h
+*/
+void memory_error(){
+ fprintf(stderr,"Out of memory. Aborting...\n");
+ exit(1);
+}
+
+
+/* The main program. Parse the command line and do it... */
+int main(argc,argv)
+int argc;
+char **argv;
+{
+ static int version = 0;
+ static int rpflag = 0;
+ static int basisflag = 0;
+ static int compress = 0;
+ static int quiet = 0;
+ static int statistics = 0;
+ static int mhflag = 0;
+ static struct s_options options[] = {
+ {OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."},
+ {OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."},
+ {OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."},
+ {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file"},
+ {OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."},
+ {OPT_FLAG, "s", (char*)&statistics, "Print parser stats to standard output."},
+ {OPT_FLAG, "x", (char*)&version, "Print the version number."},
+ {OPT_FLAG,0,0,0}
+ };
+ int i;
+ struct lemon lem;
+
+ OptInit(argv,options,stderr);
+ if( version ){
+ printf("Lemon version 1.0\n");
+ exit(0);
+ }
+ if( OptNArgs()!=1 ){
+ fprintf(stderr,"Exactly one filename argument is required.\n");
+ exit(1);
+ }
+ lem.errorcnt = 0;
+
+ /* Initialize the machine */
+ Strsafe_init();
+ Symbol_init();
+ State_init();
+ lem.argv0 = argv[0];
+ lem.filename = OptArg(0);
+ lem.basisflag = basisflag;
+ lem.has_fallback = 0;
+ lem.nconflict = 0;
+ lem.name = lem.include = lem.arg = lem.tokentype = lem.start = 0;
+ lem.vartype = 0;
+ lem.stacksize = 0;
+ lem.error = lem.overflow = lem.failure = lem.accept = lem.tokendest =
+ lem.tokenprefix = lem.outname = lem.extracode = 0;
+ lem.vardest = 0;
+ lem.tablesize = 0;
+ Symbol_new("$");
+ lem.errsym = Symbol_new("error");
+
+ /* Parse the input file */
+ Parse(&lem);
+ if( lem.errorcnt ) exit(lem.errorcnt);
+ if( lem.rule==0 ){
+ fprintf(stderr,"Empty grammar.\n");
+ exit(1);
+ }
+
+ /* Count and index the symbols of the grammar */
+ lem.nsymbol = Symbol_count();
+ Symbol_new("{default}");
+ lem.symbols = Symbol_arrayof();
+ for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
+ qsort(lem.symbols,lem.nsymbol+1,sizeof(struct symbol*),
+ (int(*)())Symbolcmpp);
+ for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
+ for(i=1; isupper(lem.symbols[i]->name[0]); i++);
+ lem.nterminal = i;
+
+ /* Generate a reprint of the grammar, if requested on the command line */
+ if( rpflag ){
+ Reprint(&lem);
+ }else{
+ /* Initialize the size for all follow and first sets */
+ SetSize(lem.nterminal);
+
+ /* Find the precedence for every production rule (that has one) */
+ FindRulePrecedences(&lem);
+
+ /* Compute the lambda-nonterminals and the first-sets for every
+ ** nonterminal */
+ FindFirstSets(&lem);
+
+ /* Compute all LR(0) states. Also record follow-set propagation
+ ** links so that the follow-set can be computed later */
+ lem.nstate = 0;
+ FindStates(&lem);
+ lem.sorted = State_arrayof();
+
+ /* Tie up loose ends on the propagation links */
+ FindLinks(&lem);
+
+ /* Compute the follow set of every reducible configuration */
+ FindFollowSets(&lem);
+
+ /* Compute the action tables */
+ FindActions(&lem);
+
+ /* Compress the action tables */
+ if( compress==0 ) CompressTables(&lem);
+
+ /* Generate a report of the parser generated. (the "y.output" file) */
+ if( !quiet ) ReportOutput(&lem);
+
+ /* Generate the source code for the parser */
+ ReportTable(&lem, mhflag);
+
+ /* Produce a header file for use by the scanner. (This step is
+ ** omitted if the "-m" option is used because makeheaders will
+ ** generate the file for us.) */
+ if( !mhflag ) ReportHeader(&lem);
+ }
+ if( statistics ){
+ printf("Parser statistics: %d terminals, %d nonterminals, %d rules\n",
+ lem.nterminal, lem.nsymbol - lem.nterminal, lem.nrule);
+ printf(" %d states, %d parser table entries, %d conflicts\n",
+ lem.nstate, lem.tablesize, lem.nconflict);
+ }
+ if( lem.nconflict ){
+ fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict);
+ }
+ exit(lem.errorcnt + lem.nconflict);
+ return (lem.errorcnt + lem.nconflict);
+}
+/******************** From the file "msort.c" *******************************/
+/*
+** A generic merge-sort program.
+**
+** USAGE:
+** Let "ptr" be a pointer to some structure which is at the head of
+** a null-terminated list. Then to sort the list call:
+**
+** ptr = msort(ptr,&(ptr->next),cmpfnc);
+**
+** In the above, "cmpfnc" is a pointer to a function which compares
+** two instances of the structure and returns an integer, as in
+** strcmp. The second argument is a pointer to the pointer to the
+** second element of the linked list. This address is used to compute
+** the offset to the "next" field within the structure. The offset to
+** the "next" field must be constant for all structures in the list.
+**
+** The function returns a new pointer which is the head of the list
+** after sorting.
+**
+** ALGORITHM:
+** Merge-sort.
+*/
+
+/*
+** Return a pointer to the next structure in the linked list.
+*/
+#define NEXT(A) (*(char**)(((unsigned long)A)+offset))
+
+/*
+** Inputs:
+** a: A sorted, null-terminated linked list. (May be null).
+** b: A sorted, null-terminated linked list. (May be null).
+** cmp: A pointer to the comparison function.
+** offset: Offset in the structure to the "next" field.
+**
+** Return Value:
+** A pointer to the head of a sorted list containing the elements
+** of both a and b.
+**
+** Side effects:
+** The "next" pointers for elements in the lists a and b are
+** changed.
+*/
+static char *merge(a,b,cmp,offset)
+char *a;
+char *b;
+int (*cmp)();
+int offset;
+{
+ char *ptr, *head;
+
+ if( a==0 ){
+ head = b;
+ }else if( b==0 ){
+ head = a;
+ }else{
+ if( (*cmp)(a,b)<0 ){
+ ptr = a;
+ a = NEXT(a);
+ }else{
+ ptr = b;
+ b = NEXT(b);
+ }
+ head = ptr;
+ while( a && b ){
+ if( (*cmp)(a,b)<0 ){
+ NEXT(ptr) = a;
+ ptr = a;
+ a = NEXT(a);
+ }else{
+ NEXT(ptr) = b;
+ ptr = b;
+ b = NEXT(b);
+ }
+ }
+ if( a ) NEXT(ptr) = a;
+ else NEXT(ptr) = b;
+ }
+ return head;
+}
+
+/*
+** Inputs:
+** list: Pointer to a singly-linked list of structures.
+** next: Pointer to pointer to the second element of the list.
+** cmp: A comparison function.
+**
+** Return Value:
+** A pointer to the head of a sorted list containing the elements
+** orginally in list.
+**
+** Side effects:
+** The "next" pointers for elements in list are changed.
+*/
+#define LISTSIZE 30
+char *msort(list,next,cmp)
+char *list;
+char **next;
+int (*cmp)();
+{
+ unsigned long offset;
+ char *ep;
+ char *set[LISTSIZE];
+ int i;
+ offset = (unsigned long)next - (unsigned long)list;
+ for(i=0; i<LISTSIZE; i++) set[i] = 0;
+ while( list ){
+ ep = list;
+ list = NEXT(list);
+ NEXT(ep) = 0;
+ for(i=0; i<LISTSIZE-1 && set[i]!=0; i++){
+ ep = merge(ep,set[i],cmp,offset);
+ set[i] = 0;
+ }
+ set[i] = ep;
+ }
+ ep = 0;
+ for(i=0; i<LISTSIZE; i++) if( set[i] ) ep = merge(ep,set[i],cmp,offset);
+ return ep;
+}
+/************************ From the file "option.c" **************************/
+static char **argv;
+static struct s_options *op;
+static FILE *errstream;
+
+#define ISOPT(X) ((X)[0]=='-'||(X)[0]=='+'||strchr((X),'=')!=0)
+
+/*
+** Print the command line with a carrot pointing to the k-th character
+** of the n-th field.
+*/
+static void errline(n,k,err)
+int n;
+int k;
+FILE *err;
+{
+ int spcnt, i;
+ spcnt = 0;
+ if( argv[0] ) fprintf(err,"%s",argv[0]);
+ spcnt = strlen(argv[0]) + 1;
+ for(i=1; i<n && argv[i]; i++){
+ fprintf(err," %s",argv[i]);
+ spcnt += strlen(argv[i]+1);
+ }
+ spcnt += k;
+ for(; argv[i]; i++) fprintf(err," %s",argv[i]);
+ if( spcnt<20 ){
+ fprintf(err,"\n%*s^-- here\n",spcnt,"");
+ }else{
+ fprintf(err,"\n%*shere --^\n",spcnt-7,"");
+ }
+}
+
+/*
+** Return the index of the N-th non-switch argument. Return -1
+** if N is out of range.
+*/
+static int argindex(n)
+int n;
+{
+ int i;
+ int dashdash = 0;
+ if( argv!=0 && *argv!=0 ){
+ for(i=1; argv[i]; i++){
+ if( dashdash || !ISOPT(argv[i]) ){
+ if( n==0 ) return i;
+ n--;
+ }
+ if( strcmp(argv[i],"--")==0 ) dashdash = 1;
+ }
+ }
+ return -1;
+}
+
+static char emsg[] = "Command line syntax error: ";
+
+/*
+** Process a flag command line argument.
+*/
+static int handleflags(i,err)
+int i;
+FILE *err;
+{
+ int v;
+ int errcnt = 0;
+ int j;
+ for(j=0; op[j].label; j++){
+ if( strcmp(&argv[i][1],op[j].label)==0 ) break;
+ }
+ v = argv[i][0]=='-' ? 1 : 0;
+ if( op[j].label==0 ){
+ if( err ){
+ fprintf(err,"%sundefined option.\n",emsg);
+ errline(i,1,err);
+ }
+ errcnt++;
+ }else if( op[j].type==OPT_FLAG ){
+ *((int*)op[j].arg) = v;
+ }else if( op[j].type==OPT_FFLAG ){
+ (*(void(*)())(op[j].arg))(v);
+ }else{
+ if( err ){
+ fprintf(err,"%smissing argument on switch.\n",emsg);
+ errline(i,1,err);
+ }
+ errcnt++;
+ }
+ return errcnt;
+}
+
+/*
+** Process a command line switch which has an argument.
+*/
+static int handleswitch(i,err)
+int i;
+FILE *err;
+{
+ int lv = 0;
+ double dv = 0.0;
+ char *sv = 0, *end;
+ char *cp;
+ int j;
+ int errcnt = 0;
+ cp = strchr(argv[i],'=');
+ *cp = 0;
+ for(j=0; op[j].label; j++){
+ if( strcmp(argv[i],op[j].label)==0 ) break;
+ }
+ *cp = '=';
+ if( op[j].label==0 ){
+ if( err ){
+ fprintf(err,"%sundefined option.\n",emsg);
+ errline(i,0,err);
+ }
+ errcnt++;
+ }else{
+ cp++;
+ switch( op[j].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ if( err ){
+ fprintf(err,"%soption requires an argument.\n",emsg);
+ errline(i,0,err);
+ }
+ errcnt++;
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ dv = strtod(cp,&end);
+ if( *end ){
+ if( err ){
+ fprintf(err,"%sillegal character in floating-point argument.\n",emsg);
+ errline(i,((unsigned long)end)-(unsigned long)argv[i],err);
+ }
+ errcnt++;
+ }
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ lv = strtol(cp,&end,0);
+ if( *end ){
+ if( err ){
+ fprintf(err,"%sillegal character in integer argument.\n",emsg);
+ errline(i,((unsigned long)end)-(unsigned long)argv[i],err);
+ }
+ errcnt++;
+ }
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ sv = cp;
+ break;
+ }
+ switch( op[j].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ break;
+ case OPT_DBL:
+ *(double*)(op[j].arg) = dv;
+ break;
+ case OPT_FDBL:
+ (*(void(*)())(op[j].arg))(dv);
+ break;
+ case OPT_INT:
+ *(int*)(op[j].arg) = lv;
+ break;
+ case OPT_FINT:
+ (*(void(*)())(op[j].arg))((int)lv);
+ break;
+ case OPT_STR:
+ *(char**)(op[j].arg) = sv;
+ break;
+ case OPT_FSTR:
+ (*(void(*)())(op[j].arg))(sv);
+ break;
+ }
+ }
+ return errcnt;
+}
+
+int OptInit(a,o,err)
+char **a;
+struct s_options *o;
+FILE *err;
+{
+ int errcnt = 0;
+ argv = a;
+ op = o;
+ errstream = err;
+ if( argv && *argv && op ){
+ int i;
+ for(i=1; argv[i]; i++){
+ if( argv[i][0]=='+' || argv[i][0]=='-' ){
+ errcnt += handleflags(i,err);
+ }else if( strchr(argv[i],'=') ){
+ errcnt += handleswitch(i,err);
+ }
+ }
+ }
+ if( errcnt>0 ){
+ fprintf(err,"Valid command line options for \"%s\" are:\n",*a);
+ OptPrint();
+ exit(1);
+ }
+ return 0;
+}
+
+int OptNArgs(){
+ int cnt = 0;
+ int dashdash = 0;
+ int i;
+ if( argv!=0 && argv[0]!=0 ){
+ for(i=1; argv[i]; i++){
+ if( dashdash || !ISOPT(argv[i]) ) cnt++;
+ if( strcmp(argv[i],"--")==0 ) dashdash = 1;
+ }
+ }
+ return cnt;
+}
+
+char *OptArg(n)
+int n;
+{
+ int i;
+ i = argindex(n);
+ return i>=0 ? argv[i] : 0;
+}
+
+void OptErr(n)
+int n;
+{
+ int i;
+ i = argindex(n);
+ if( i>=0 ) errline(i,0,errstream);
+}
+
+void OptPrint(){
+ int i;
+ int max, len;
+ max = 0;
+ for(i=0; op[i].label; i++){
+ len = strlen(op[i].label) + 1;
+ switch( op[i].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ len += 9; /* length of "<integer>" */
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ len += 6; /* length of "<real>" */
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ len += 8; /* length of "<string>" */
+ break;
+ }
+ if( len>max ) max = len;
+ }
+ for(i=0; op[i].label; i++){
+ switch( op[i].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ fprintf(errstream," -%-*s %s\n",max,op[i].label,op[i].message);
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ fprintf(errstream," %s=<integer>%*s %s\n",op[i].label,
+ (int)(max-strlen(op[i].label)-9),"",op[i].message);
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ fprintf(errstream," %s=<real>%*s %s\n",op[i].label,
+ (int)(max-strlen(op[i].label)-6),"",op[i].message);
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ fprintf(errstream," %s=<string>%*s %s\n",op[i].label,
+ (int)(max-strlen(op[i].label)-8),"",op[i].message);
+ break;
+ }
+ }
+}
+/*********************** From the file "parse.c" ****************************/
+/*
+** Input file parser for the LEMON parser generator.
+*/
+
+/* The state of the parser */
+struct pstate {
+ char *filename; /* Name of the input file */
+ int tokenlineno; /* Linenumber at which current token starts */
+ int errorcnt; /* Number of errors so far */
+ char *tokenstart; /* Text of current token */
+ struct lemon *gp; /* Global state vector */
+ enum e_state {
+ INITIALIZE,
+ WAITING_FOR_DECL_OR_RULE,
+ WAITING_FOR_DECL_KEYWORD,
+ WAITING_FOR_DECL_ARG,
+ WAITING_FOR_PRECEDENCE_SYMBOL,
+ WAITING_FOR_ARROW,
+ IN_RHS,
+ LHS_ALIAS_1,
+ LHS_ALIAS_2,
+ LHS_ALIAS_3,
+ RHS_ALIAS_1,
+ RHS_ALIAS_2,
+ PRECEDENCE_MARK_1,
+ PRECEDENCE_MARK_2,
+ RESYNC_AFTER_RULE_ERROR,
+ RESYNC_AFTER_DECL_ERROR,
+ WAITING_FOR_DESTRUCTOR_SYMBOL,
+ WAITING_FOR_DATATYPE_SYMBOL,
+ WAITING_FOR_FALLBACK_ID
+ } state; /* The state of the parser */
+ struct symbol *fallback; /* The fallback token */
+ struct symbol *lhs; /* Left-hand side of current rule */
+ char *lhsalias; /* Alias for the LHS */
+ int nrhs; /* Number of right-hand side symbols seen */
+ struct symbol *rhs[MAXRHS]; /* RHS symbols */
+ char *alias[MAXRHS]; /* Aliases for each RHS symbol (or NULL) */
+ struct rule *prevrule; /* Previous rule parsed */
+ char *declkeyword; /* Keyword of a declaration */
+ char **declargslot; /* Where the declaration argument should be put */
+ int *decllnslot; /* Where the declaration linenumber is put */
+ enum e_assoc declassoc; /* Assign this association to decl arguments */
+ int preccounter; /* Assign this precedence to decl arguments */
+ struct rule *firstrule; /* Pointer to first rule in the grammar */
+ struct rule *lastrule; /* Pointer to the most recently parsed rule */
+};
+
+/* Parse a single token */
+static void parseonetoken(psp)
+struct pstate *psp;
+{
+ char *x;
+ x = Strsafe(psp->tokenstart); /* Save the token permanently */
+#if 0
+ printf("%s:%d: Token=[%s] state=%d\n",psp->filename,psp->tokenlineno,
+ x,psp->state);
+#endif
+ switch( psp->state ){
+ case INITIALIZE:
+ psp->prevrule = 0;
+ psp->preccounter = 0;
+ psp->firstrule = psp->lastrule = 0;
+ psp->gp->nrule = 0;
+ /* Fall thru to next case */
+ case WAITING_FOR_DECL_OR_RULE:
+ if( x[0]=='%' ){
+ psp->state = WAITING_FOR_DECL_KEYWORD;
+ }else if( islower(x[0]) ){
+ psp->lhs = Symbol_new(x);
+ psp->nrhs = 0;
+ psp->lhsalias = 0;
+ psp->state = WAITING_FOR_ARROW;
+ }else if( x[0]=='{' ){
+ if( psp->prevrule==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+"There is not prior rule opon which to attach the code \
+fragment which begins on this line.");
+ psp->errorcnt++;
+ }else if( psp->prevrule->code!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+"Code fragment beginning on this line is not the first \
+to follow the previous rule.");
+ psp->errorcnt++;
+ }else{
+ psp->prevrule->line = psp->tokenlineno;
+ psp->prevrule->code = &x[1];
+ }
+ }else if( x[0]=='[' ){
+ psp->state = PRECEDENCE_MARK_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Token \"%s\" should be either \"%%\" or a nonterminal name.",
+ x);
+ psp->errorcnt++;
+ }
+ break;
+ case PRECEDENCE_MARK_1:
+ if( !isupper(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "The precedence symbol must be a terminal.");
+ psp->errorcnt++;
+ }else if( psp->prevrule==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "There is no prior rule to assign precedence \"[%s]\".",x);
+ psp->errorcnt++;
+ }else if( psp->prevrule->precsym!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+"Precedence mark on this line is not the first \
+to follow the previous rule.");
+ psp->errorcnt++;
+ }else{
+ psp->prevrule->precsym = Symbol_new(x);
+ }
+ psp->state = PRECEDENCE_MARK_2;
+ break;
+ case PRECEDENCE_MARK_2:
+ if( x[0]!=']' ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \"]\" on precedence mark.");
+ psp->errorcnt++;
+ }
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ break;
+ case WAITING_FOR_ARROW:
+ if( x[0]==':' && x[1]==':' && x[2]=='=' ){
+ psp->state = IN_RHS;
+ }else if( x[0]=='(' ){
+ psp->state = LHS_ALIAS_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Expected to see a \":\" following the LHS symbol \"%s\".",
+ psp->lhs->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_1:
+ if( isalpha(x[0]) ){
+ psp->lhsalias = x;
+ psp->state = LHS_ALIAS_2;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "\"%s\" is not a valid alias for the LHS \"%s\"\n",
+ x,psp->lhs->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_2:
+ if( x[0]==')' ){
+ psp->state = LHS_ALIAS_3;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_3:
+ if( x[0]==':' && x[1]==':' && x[2]=='=' ){
+ psp->state = IN_RHS;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \"->\" following: \"%s(%s)\".",
+ psp->lhs->name,psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case IN_RHS:
+ if( x[0]=='.' ){
+ struct rule *rp;
+ rp = (struct rule *)malloc( sizeof(struct rule) +
+ sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs );
+ if( rp==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Can't allocate enough memory for this rule.");
+ psp->errorcnt++;
+ psp->prevrule = 0;
+ }else{
+ int i;
+ rp->ruleline = psp->tokenlineno;
+ rp->rhs = (struct symbol**)&rp[1];
+ rp->rhsalias = (char**)&(rp->rhs[psp->nrhs]);
+ for(i=0; i<psp->nrhs; i++){
+ rp->rhs[i] = psp->rhs[i];
+ rp->rhsalias[i] = psp->alias[i];
+ }
+ rp->lhs = psp->lhs;
+ rp->lhsalias = psp->lhsalias;
+ rp->nrhs = psp->nrhs;
+ rp->code = 0;
+ rp->precsym = 0;
+ rp->index = psp->gp->nrule++;
+ rp->nextlhs = rp->lhs->rule;
+ rp->lhs->rule = rp;
+ rp->next = 0;
+ if( psp->firstrule==0 ){
+ psp->firstrule = psp->lastrule = rp;
+ }else{
+ psp->lastrule->next = rp;
+ psp->lastrule = rp;
+ }
+ psp->prevrule = rp;
+ }
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( isalpha(x[0]) ){
+ if( psp->nrhs>=MAXRHS ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Too many symbol on RHS or rule beginning at \"%s\".",
+ x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }else{
+ psp->rhs[psp->nrhs] = Symbol_new(x);
+ psp->alias[psp->nrhs] = 0;
+ psp->nrhs++;
+ }
+ }else if( x[0]=='(' && psp->nrhs>0 ){
+ psp->state = RHS_ALIAS_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal character on RHS of rule: \"%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case RHS_ALIAS_1:
+ if( isalpha(x[0]) ){
+ psp->alias[psp->nrhs-1] = x;
+ psp->state = RHS_ALIAS_2;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "\"%s\" is not a valid alias for the RHS symbol \"%s\"\n",
+ x,psp->rhs[psp->nrhs-1]->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case RHS_ALIAS_2:
+ if( x[0]==')' ){
+ psp->state = IN_RHS;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case WAITING_FOR_DECL_KEYWORD:
+ if( isalpha(x[0]) ){
+ psp->declkeyword = x;
+ psp->declargslot = 0;
+ psp->decllnslot = 0;
+ psp->state = WAITING_FOR_DECL_ARG;
+ if( strcmp(x,"name")==0 ){
+ psp->declargslot = &(psp->gp->name);
+ }else if( strcmp(x,"include")==0 ){
+ psp->declargslot = &(psp->gp->include);
+ psp->decllnslot = &psp->gp->includeln;
+ }else if( strcmp(x,"code")==0 ){
+ psp->declargslot = &(psp->gp->extracode);
+ psp->decllnslot = &psp->gp->extracodeln;
+ }else if( strcmp(x,"token_destructor")==0 ){
+ psp->declargslot = &psp->gp->tokendest;
+ psp->decllnslot = &psp->gp->tokendestln;
+ }else if( strcmp(x,"default_destructor")==0 ){
+ psp->declargslot = &psp->gp->vardest;
+ psp->decllnslot = &psp->gp->vardestln;
+ }else if( strcmp(x,"token_prefix")==0 ){
+ psp->declargslot = &psp->gp->tokenprefix;
+ }else if( strcmp(x,"syntax_error")==0 ){
+ psp->declargslot = &(psp->gp->error);
+ psp->decllnslot = &psp->gp->errorln;
+ }else if( strcmp(x,"parse_accept")==0 ){
+ psp->declargslot = &(psp->gp->accept);
+ psp->decllnslot = &psp->gp->acceptln;
+ }else if( strcmp(x,"parse_failure")==0 ){
+ psp->declargslot = &(psp->gp->failure);
+ psp->decllnslot = &psp->gp->failureln;
+ }else if( strcmp(x,"stack_overflow")==0 ){
+ psp->declargslot = &(psp->gp->overflow);
+ psp->decllnslot = &psp->gp->overflowln;
+ }else if( strcmp(x,"extra_argument")==0 ){
+ psp->declargslot = &(psp->gp->arg);
+ }else if( strcmp(x,"token_type")==0 ){
+ psp->declargslot = &(psp->gp->tokentype);
+ }else if( strcmp(x,"default_type")==0 ){
+ psp->declargslot = &(psp->gp->vartype);
+ }else if( strcmp(x,"stack_size")==0 ){
+ psp->declargslot = &(psp->gp->stacksize);
+ }else if( strcmp(x,"start_symbol")==0 ){
+ psp->declargslot = &(psp->gp->start);
+ }else if( strcmp(x,"left")==0 ){
+ psp->preccounter++;
+ psp->declassoc = LEFT;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"right")==0 ){
+ psp->preccounter++;
+ psp->declassoc = RIGHT;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"nonassoc")==0 ){
+ psp->preccounter++;
+ psp->declassoc = NONE;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"destructor")==0 ){
+ psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL;
+ }else if( strcmp(x,"type")==0 ){
+ psp->state = WAITING_FOR_DATATYPE_SYMBOL;
+ }else if( strcmp(x,"fallback")==0 ){
+ psp->fallback = 0;
+ psp->state = WAITING_FOR_FALLBACK_ID;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Unknown declaration keyword: \"%%%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal declaration keyword: \"%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case WAITING_FOR_DESTRUCTOR_SYMBOL:
+ if( !isalpha(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol name missing after %destructor keyword");
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ psp->declargslot = &sp->destructor;
+ psp->decllnslot = &sp->destructorln;
+ psp->state = WAITING_FOR_DECL_ARG;
+ }
+ break;
+ case WAITING_FOR_DATATYPE_SYMBOL:
+ if( !isalpha(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol name missing after %destructor keyword");
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ psp->declargslot = &sp->datatype;
+ psp->decllnslot = 0;
+ psp->state = WAITING_FOR_DECL_ARG;
+ }
+ break;
+ case WAITING_FOR_PRECEDENCE_SYMBOL:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( isupper(x[0]) ){
+ struct symbol *sp;
+ sp = Symbol_new(x);
+ if( sp->prec>=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol \"%s\" has already be given a precedence.",x);
+ psp->errorcnt++;
+ }else{
+ sp->prec = psp->preccounter;
+ sp->assoc = psp->declassoc;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Can't assign a precedence to \"%s\".",x);
+ psp->errorcnt++;
+ }
+ break;
+ case WAITING_FOR_DECL_ARG:
+ if( (x[0]=='{' || x[0]=='\"' || isalnum(x[0])) ){
+ if( *(psp->declargslot)!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "The argument \"%s\" to declaration \"%%%s\" is not the first.",
+ x[0]=='\"' ? &x[1] : x,psp->declkeyword);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ *(psp->declargslot) = (x[0]=='\"' || x[0]=='{') ? &x[1] : x;
+ if( psp->decllnslot ) *psp->decllnslot = psp->tokenlineno;
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal argument to %%%s: %s",psp->declkeyword,x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case WAITING_FOR_FALLBACK_ID:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( !isupper(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%fallback argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ if( psp->fallback==0 ){
+ psp->fallback = sp;
+ }else if( sp->fallback ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "More than one fallback assigned to token %s", x);
+ psp->errorcnt++;
+ }else{
+ sp->fallback = psp->fallback;
+ psp->gp->has_fallback = 1;
+ }
+ }
+ break;
+ case RESYNC_AFTER_RULE_ERROR:
+/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
+** break; */
+ case RESYNC_AFTER_DECL_ERROR:
+ if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
+ if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD;
+ break;
+ }
+}
+
+/* In spite of its name, this function is really a scanner. It read
+** in the entire input file (all at once) then tokenizes it. Each
+** token is passed to the function "parseonetoken" which builds all
+** the appropriate data structures in the global state vector "gp".
+*/
+void Parse(gp)
+struct lemon *gp;
+{
+ struct pstate ps;
+ FILE *fp;
+ char *filebuf;
+ int filesize;
+ int lineno;
+ int c;
+ char *cp, *nextcp;
+ int startline = 0;
+
+ ps.gp = gp;
+ ps.filename = gp->filename;
+ ps.errorcnt = 0;
+ ps.state = INITIALIZE;
+
+ /* Begin by reading the input file */
+ fp = fopen(ps.filename,"rb");
+ if( fp==0 ){
+ ErrorMsg(ps.filename,0,"Can't open this file for reading.");
+ gp->errorcnt++;
+ return;
+ }
+ fseek(fp,0,2);
+ filesize = ftell(fp);
+ rewind(fp);
+ filebuf = (char *)malloc( filesize+1 );
+ if( filebuf==0 ){
+ ErrorMsg(ps.filename,0,"Can't allocate %d of memory to hold this file.",
+ filesize+1);
+ gp->errorcnt++;
+ return;
+ }
+ if( fread(filebuf,1,filesize,fp)!=filesize ){
+ ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.",
+ filesize);
+ free(filebuf);
+ gp->errorcnt++;
+ return;
+ }
+ fclose(fp);
+ filebuf[filesize] = 0;
+
+ /* Now scan the text of the input file */
+ lineno = 1;
+ for(cp=filebuf; (c= *cp)!=0; ){
+ if( c=='\n' ) lineno++; /* Keep track of the line number */
+ if( isspace(c) ){ cp++; continue; } /* Skip all white space */
+ if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */
+ cp+=2;
+ while( (c= *cp)!=0 && c!='\n' ) cp++;
+ continue;
+ }
+ if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */
+ cp+=2;
+ while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){
+ if( c=='\n' ) lineno++;
+ cp++;
+ }
+ if( c ) cp++;
+ continue;
+ }
+ ps.tokenstart = cp; /* Mark the beginning of the token */
+ ps.tokenlineno = lineno; /* Linenumber on which token begins */
+ if( c=='\"' ){ /* String literals */
+ cp++;
+ while( (c= *cp)!=0 && c!='\"' ){
+ if( c=='\n' ) lineno++;
+ cp++;
+ }
+ if( c==0 ){
+ ErrorMsg(ps.filename,startline,
+"String starting on this line is not terminated before the end of the file.");
+ ps.errorcnt++;
+ nextcp = cp;
+ }else{
+ nextcp = cp+1;
+ }
+ }else if( c=='{' ){ /* A block of C code */
+ int level;
+ cp++;
+ for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){
+ if( c=='\n' ) lineno++;
+ else if( c=='{' ) level++;
+ else if( c=='}' ) level--;
+ else if( c=='/' && cp[1]=='*' ){ /* Skip comments */
+ int prevc;
+ cp = &cp[2];
+ prevc = 0;
+ while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){
+ if( c=='\n' ) lineno++;
+ prevc = c;
+ cp++;
+ }
+ }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
+ cp = &cp[2];
+ while( (c= *cp)!=0 && c!='\n' ) cp++;
+ if( c ) lineno++;
+ }else if( c=='\'' || c=='\"' ){ /* String a character literals */
+ int startchar, prevc;
+ startchar = c;
+ prevc = 0;
+ for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){
+ if( c=='\n' ) lineno++;
+ if( prevc=='\\' ) prevc = 0;
+ else prevc = c;
+ }
+ }
+ }
+ if( c==0 ){
+ ErrorMsg(ps.filename,ps.tokenlineno,
+"C code starting on this line is not terminated before the end of the file.");
+ ps.errorcnt++;
+ nextcp = cp;
+ }else{
+ nextcp = cp+1;
+ }
+ }else if( isalnum(c) ){ /* Identifiers */
+ while( (c= *cp)!=0 && (isalnum(c) || c=='_') ) cp++;
+ nextcp = cp;
+ }else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */
+ cp += 3;
+ nextcp = cp;
+ }else{ /* All other (one character) operators */
+ cp++;
+ nextcp = cp;
+ }
+ c = *cp;
+ *cp = 0; /* Null terminate the token */
+ parseonetoken(&ps); /* Parse the token */
+ *cp = c; /* Restore the buffer */
+ cp = nextcp;
+ }
+ free(filebuf); /* Release the buffer after parsing */
+ gp->rule = ps.firstrule;
+ gp->errorcnt = ps.errorcnt;
+}
+/*************************** From the file "plink.c" *********************/
+/*
+** Routines processing configuration follow-set propagation links
+** in the LEMON parser generator.
+*/
+static struct plink *plink_freelist = 0;
+
+/* Allocate a new plink */
+struct plink *Plink_new(){
+ struct plink *new;
+
+ if( plink_freelist==0 ){
+ int i;
+ int amt = 100;
+ plink_freelist = (struct plink *)malloc( sizeof(struct plink)*amt );
+ if( plink_freelist==0 ){
+ fprintf(stderr,
+ "Unable to allocate memory for a new follow-set propagation link.\n");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) plink_freelist[i].next = &plink_freelist[i+1];
+ plink_freelist[amt-1].next = 0;
+ }
+ new = plink_freelist;
+ plink_freelist = plink_freelist->next;
+ return new;
+}
+
+/* Add a plink to a plink list */
+void Plink_add(plpp,cfp)
+struct plink **plpp;
+struct config *cfp;
+{
+ struct plink *new;
+ new = Plink_new();
+ new->next = *plpp;
+ *plpp = new;
+ new->cfp = cfp;
+}
+
+/* Transfer every plink on the list "from" to the list "to" */
+void Plink_copy(to,from)
+struct plink **to;
+struct plink *from;
+{
+ struct plink *nextpl;
+ while( from ){
+ nextpl = from->next;
+ from->next = *to;
+ *to = from;
+ from = nextpl;
+ }
+}
+
+/* Delete every plink on the list */
+void Plink_delete(plp)
+struct plink *plp;
+{
+ struct plink *nextpl;
+
+ while( plp ){
+ nextpl = plp->next;
+ plp->next = plink_freelist;
+ plink_freelist = plp;
+ plp = nextpl;
+ }
+}
+/*********************** From the file "report.c" **************************/
+/*
+** Procedures for generating reports and tables in the LEMON parser generator.
+*/
+
+/* Generate a filename with the given suffix. Space to hold the
+** name comes from malloc() and must be freed by the calling
+** function.
+*/
+PRIVATE char *file_makename(lemp,suffix)
+struct lemon *lemp;
+char *suffix;
+{
+ char *name;
+ char *cp;
+
+ name = malloc( strlen(lemp->filename) + strlen(suffix) + 5 );
+ if( name==0 ){
+ fprintf(stderr,"Can't allocate space for a filename.\n");
+ exit(1);
+ }
+ strcpy(name,lemp->filename);
+ cp = strrchr(name,'.');
+ if( cp ) *cp = 0;
+ strcat(name,suffix);
+ return name;
+}
+
+/* Open a file with a name based on the name of the input file,
+** but with a different (specified) suffix, and return a pointer
+** to the stream */
+PRIVATE FILE *file_open(lemp,suffix,mode)
+struct lemon *lemp;
+char *suffix;
+char *mode;
+{
+ FILE *fp;
+
+ if( lemp->outname ) free(lemp->outname);
+ lemp->outname = file_makename(lemp, suffix);
+ fp = fopen(lemp->outname,mode);
+ if( fp==0 && *mode=='w' ){
+ fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname);
+ lemp->errorcnt++;
+ return 0;
+ }
+ return fp;
+}
+
+/* Duplicate the input file without comments and without actions
+** on rules */
+void Reprint(lemp)
+struct lemon *lemp;
+{
+ struct rule *rp;
+ struct symbol *sp;
+ int i, j, maxlen, len, ncolumns, skip;
+ printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename);
+ maxlen = 10;
+ for(i=0; i<lemp->nsymbol; i++){
+ sp = lemp->symbols[i];
+ len = strlen(sp->name);
+ if( len>maxlen ) maxlen = len;
+ }
+ ncolumns = 76/(maxlen+5);
+ if( ncolumns<1 ) ncolumns = 1;
+ skip = (lemp->nsymbol + ncolumns - 1)/ncolumns;
+ for(i=0; i<skip; i++){
+ printf("//");
+ for(j=i; j<lemp->nsymbol; j+=skip){
+ sp = lemp->symbols[j];
+ assert( sp->index==j );
+ printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name);
+ }
+ printf("\n");
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ printf("%s",rp->lhs->name);
+/* if( rp->lhsalias ) printf("(%s)",rp->lhsalias); */
+ printf(" ::=");
+ for(i=0; i<rp->nrhs; i++){
+ printf(" %s",rp->rhs[i]->name);
+/* if( rp->rhsalias[i] ) printf("(%s)",rp->rhsalias[i]); */
+ }
+ printf(".");
+ if( rp->precsym ) printf(" [%s]",rp->precsym->name);
+/* if( rp->code ) printf("\n %s",rp->code); */
+ printf("\n");
+ }
+}
+
+void ConfigPrint(fp,cfp)
+FILE *fp;
+struct config *cfp;
+{
+ struct rule *rp;
+ int i;
+ rp = cfp->rp;
+ fprintf(fp,"%s ::=",rp->lhs->name);
+ for(i=0; i<=rp->nrhs; i++){
+ if( i==cfp->dot ) fprintf(fp," *");
+ if( i==rp->nrhs ) break;
+ fprintf(fp," %s",rp->rhs[i]->name);
+ }
+}
+
+/* #define TEST */
+#ifdef TEST
+/* Print a set */
+PRIVATE void SetPrint(out,set,lemp)
+FILE *out;
+char *set;
+struct lemon *lemp;
+{
+ int i;
+ char *spacer;
+ spacer = "";
+ fprintf(out,"%12s[","");
+ for(i=0; i<lemp->nterminal; i++){
+ if( SetFind(set,i) ){
+ fprintf(out,"%s%s",spacer,lemp->symbols[i]->name);
+ spacer = " ";
+ }
+ }
+ fprintf(out,"]\n");
+}
+
+/* Print a plink chain */
+PRIVATE void PlinkPrint(out,plp,tag)
+FILE *out;
+struct plink *plp;
+char *tag;
+{
+ while( plp ){
+ fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->index);
+ ConfigPrint(out,plp->cfp);
+ fprintf(out,"\n");
+ plp = plp->next;
+ }
+}
+#endif
+
+/* Print an action to the given file descriptor. Return FALSE if
+** nothing was actually printed.
+*/
+int PrintAction(struct action *ap, FILE *fp, int indent){
+ int result = 1;
+ switch( ap->type ){
+ case SHIFT:
+ fprintf(fp,"%*s shift %d",indent,ap->sp->name,ap->x.stp->index);
+ break;
+ case REDUCE:
+ fprintf(fp,"%*s reduce %d",indent,ap->sp->name,ap->x.rp->index);
+ break;
+ case ACCEPT:
+ fprintf(fp,"%*s accept",indent,ap->sp->name);
+ break;
+ case ERROR:
+ fprintf(fp,"%*s error",indent,ap->sp->name);
+ break;
+ case CONFLICT:
+ fprintf(fp,"%*s reduce %-3d ** Parsing conflict **",
+ indent,ap->sp->name,ap->x.rp->index);
+ break;
+ case SH_RESOLVED:
+ case RD_RESOLVED:
+ case NOT_USED:
+ result = 0;
+ break;
+ }
+ return result;
+}
+
+/* Generate the "y.output" log file */
+void ReportOutput(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct state *stp;
+ struct config *cfp;
+ struct action *ap;
+ FILE *fp;
+
+ fp = file_open(lemp,".out","w");
+ if( fp==0 ) return;
+ fprintf(fp," \b");
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ fprintf(fp,"State %d:\n",stp->index);
+ if( lemp->basisflag ) cfp=stp->bp;
+ else cfp=stp->cfp;
+ while( cfp ){
+ char buf[20];
+ if( cfp->dot==cfp->rp->nrhs ){
+ sprintf(buf,"(%d)",cfp->rp->index);
+ fprintf(fp," %5s ",buf);
+ }else{
+ fprintf(fp," ");
+ }
+ ConfigPrint(fp,cfp);
+ fprintf(fp,"\n");
+#ifdef TEST
+ SetPrint(fp,cfp->fws,lemp);
+ PlinkPrint(fp,cfp->fplp,"To ");
+ PlinkPrint(fp,cfp->bplp,"From");
+#endif
+ if( lemp->basisflag ) cfp=cfp->bp;
+ else cfp=cfp->next;
+ }
+ fprintf(fp,"\n");
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( PrintAction(ap,fp,30) ) fprintf(fp,"\n");
+ }
+ fprintf(fp,"\n");
+ }
+ fclose(fp);
+ return;
+}
+
+/* Search for the file "name" which is in the same directory as
+** the exacutable */
+PRIVATE char *pathsearch(argv0,name,modemask)
+char *argv0;
+char *name;
+int modemask;
+{
+ char *pathlist;
+ char *path,*cp;
+ char c;
+ extern int access();
+
+#ifdef __WIN32__
+ cp = strrchr(argv0,'\\');
+#else
+ cp = strrchr(argv0,'/');
+#endif
+ if( cp ){
+ c = *cp;
+ *cp = 0;
+ path = (char *)malloc( strlen(argv0) + strlen(name) + 2 );
+ if( path ) sprintf(path,"%s/%s",argv0,name);
+ *cp = c;
+ }else{
+ extern char *getenv();
+ pathlist = getenv("PATH");
+ if( pathlist==0 ) pathlist = ".:/bin:/usr/bin";
+ path = (char *)malloc( strlen(pathlist)+strlen(name)+2 );
+ if( path!=0 ){
+ while( *pathlist ){
+ cp = strchr(pathlist,':');
+ if( cp==0 ) cp = &pathlist[strlen(pathlist)];
+ c = *cp;
+ *cp = 0;
+ sprintf(path,"%s/%s",pathlist,name);
+ *cp = c;
+ if( c==0 ) pathlist = "";
+ else pathlist = &cp[1];
+ if( access(path,modemask)==0 ) break;
+ }
+ }
+ }
+ return path;
+}
+
+/* Given an action, compute the integer value for that action
+** which is to be put in the action table of the generated machine.
+** Return negative if no action should be generated.
+*/
+PRIVATE int compute_action(lemp,ap)
+struct lemon *lemp;
+struct action *ap;
+{
+ int act;
+ switch( ap->type ){
+ case SHIFT: act = ap->x.stp->index; break;
+ case REDUCE: act = ap->x.rp->index + lemp->nstate; break;
+ case ERROR: act = lemp->nstate + lemp->nrule; break;
+ case ACCEPT: act = lemp->nstate + lemp->nrule + 1; break;
+ default: act = -1; break;
+ }
+ return act;
+}
+
+#define LINESIZE 1000
+/* The next cluster of routines are for reading the template file
+** and writing the results to the generated parser */
+/* The first function transfers data from "in" to "out" until
+** a line is seen which begins with "%%". The line number is
+** tracked.
+**
+** if name!=0, then any word that begin with "Parse" is changed to
+** begin with *name instead.
+*/
+PRIVATE void tplt_xfer(name,in,out,lineno)
+char *name;
+FILE *in;
+FILE *out;
+int *lineno;
+{
+ int i, iStart;
+ char line[LINESIZE];
+ while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
+ (*lineno)++;
+ iStart = 0;
+ if( name ){
+ for(i=0; line[i]; i++){
+ if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0
+ && (i==0 || !isalpha(line[i-1]))
+ ){
+ if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]);
+ fprintf(out,"%s",name);
+ i += 4;
+ iStart = i+1;
+ }
+ }
+ }
+ fprintf(out,"%s",&line[iStart]);
+ }
+}
+
+/* The next function finds the template file and opens it, returning
+** a pointer to the opened file. */
+PRIVATE FILE *tplt_open(lemp)
+struct lemon *lemp;
+{
+ static char templatename[] = "lempar.c";
+ char buf[1000];
+ FILE *in;
+ char *tpltname;
+ char *cp;
+
+ cp = strrchr(lemp->filename,'.');
+ if( cp ){
+ sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
+ }else{
+ sprintf(buf,"%s.lt",lemp->filename);
+ }
+ if( access(buf,004)==0 ){
+ tpltname = buf;
+ }else if( access(templatename,004)==0 ){
+ tpltname = templatename;
+ }else{
+ tpltname = pathsearch(lemp->argv0,templatename,0);
+ }
+ if( tpltname==0 ){
+ fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
+ templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ in = fopen(tpltname,"r");
+ if( in==0 ){
+ fprintf(stderr,"Can't open the template file \"%s\".\n",templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ return in;
+}
+
+/* Print a string to the file and keep the linenumber up to date */
+PRIVATE void tplt_print(out,lemp,str,strln,lineno)
+FILE *out;
+struct lemon *lemp;
+char *str;
+int strln;
+int *lineno;
+{
+ if( str==0 ) return;
+ fprintf(out,"#line %d \"%s\"\n",strln,lemp->filename); (*lineno)++;
+ while( *str ){
+ if( *str=='\n' ) (*lineno)++;
+ putc(*str,out);
+ str++;
+ }
+ fprintf(out,"\n#line %d \"%s\"\n",*lineno+2,lemp->outname); (*lineno)+=2;
+ return;
+}
+
+/*
+** The following routine emits code for the destructor for the
+** symbol sp
+*/
+void emit_destructor_code(out,sp,lemp,lineno)
+FILE *out;
+struct symbol *sp;
+struct lemon *lemp;
+int *lineno;
+{
+ char *cp = 0;
+
+ int linecnt = 0;
+ if( sp->type==TERMINAL ){
+ cp = lemp->tokendest;
+ if( cp==0 ) return;
+ fprintf(out,"#line %d \"%s\"\n{",lemp->tokendestln,lemp->filename);
+ }else if( sp->destructor ){
+ cp = sp->destructor;
+ fprintf(out,"#line %d \"%s\"\n{",sp->destructorln,lemp->filename);
+ }else if( lemp->vardest ){
+ cp = lemp->vardest;
+ if( cp==0 ) return;
+ fprintf(out,"#line %d \"%s\"\n{",lemp->vardestln,lemp->filename);
+ }else{
+ assert( 0 ); /* Cannot happen */
+ }
+ for(; *cp; cp++){
+ if( *cp=='$' && cp[1]=='$' ){
+ fprintf(out,"(yypminor->yy%d)",sp->dtnum);
+ cp++;
+ continue;
+ }
+ if( *cp=='\n' ) linecnt++;
+ fputc(*cp,out);
+ }
+ (*lineno) += 3 + linecnt;
+ fprintf(out,"}\n#line %d \"%s\"\n",*lineno,lemp->outname);
+ return;
+}
+
+/*
+** Return TRUE (non-zero) if the given symbol has a destructor.
+*/
+int has_destructor(sp, lemp)
+struct symbol *sp;
+struct lemon *lemp;
+{
+ int ret;
+ if( sp->type==TERMINAL ){
+ ret = lemp->tokendest!=0;
+ }else{
+ ret = lemp->vardest!=0 || sp->destructor!=0;
+ }
+ return ret;
+}
+
+/*
+** Generate code which executes when the rule "rp" is reduced. Write
+** the code to "out". Make sure lineno stays up-to-date.
+*/
+PRIVATE void emit_code(out,rp,lemp,lineno)
+FILE *out;
+struct rule *rp;
+struct lemon *lemp;
+int *lineno;
+{
+ char *cp, *xp;
+ int linecnt = 0;
+ int i;
+ char lhsused = 0; /* True if the LHS element has been used */
+ char used[MAXRHS]; /* True for each RHS element which is used */
+
+ for(i=0; i<rp->nrhs; i++) used[i] = 0;
+ lhsused = 0;
+
+ /* Generate code to do the reduce action */
+ if( rp->code ){
+ fprintf(out,"#line %d \"%s\"\n{",rp->line,lemp->filename);
+ for(cp=rp->code; *cp; cp++){
+ if( isalpha(*cp) && (cp==rp->code || (!isalnum(cp[-1]) && cp[-1]!='_')) ){
+ char saved;
+ for(xp= &cp[1]; isalnum(*xp) || *xp=='_'; xp++);
+ saved = *xp;
+ *xp = 0;
+ if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){
+ fprintf(out,"yygotominor.yy%d",rp->lhs->dtnum);
+ cp = xp;
+ lhsused = 1;
+ }else{
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){
+ fprintf(out,"yymsp[%d].minor.yy%d",i-rp->nrhs+1,rp->rhs[i]->dtnum);
+ cp = xp;
+ used[i] = 1;
+ break;
+ }
+ }
+ }
+ *xp = saved;
+ }
+ if( *cp=='\n' ) linecnt++;
+ fputc(*cp,out);
+ } /* End loop */
+ (*lineno) += 3 + linecnt;
+ fprintf(out,"}\n#line %d \"%s\"\n",*lineno,lemp->outname);
+ } /* End if( rp->code ) */
+
+ /* Check to make sure the LHS has been used */
+ if( rp->lhsalias && !lhsused ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label \"%s\" for \"%s(%s)\" is never used.",
+ rp->lhsalias,rp->lhs->name,rp->lhsalias);
+ lemp->errorcnt++;
+ }
+
+ /* Generate destructor code for RHS symbols which are not used in the
+ ** reduce code */
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhsalias[i] && !used[i] ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label %s for \"%s(%s)\" is never used.",
+ rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]);
+ lemp->errorcnt++;
+ }else if( rp->rhsalias[i]==0 ){
+ if( has_destructor(rp->rhs[i],lemp) ){
+ fprintf(out," yy_destructor(%d,&yymsp[%d].minor);\n",
+ rp->rhs[i]->index,i-rp->nrhs+1); (*lineno)++;
+ }else{
+ fprintf(out," /* No destructor defined for %s */\n",
+ rp->rhs[i]->name);
+ (*lineno)++;
+ }
+ }
+ }
+ return;
+}
+
+/*
+** Print the definition of the union used for the parser's data stack.
+** This union contains fields for every possible data type for tokens
+** and nonterminals. In the process of computing and printing this
+** union, also set the ".dtnum" field of every terminal and nonterminal
+** symbol.
+*/
+void print_stack_union(out,lemp,plineno,mhflag)
+FILE *out; /* The output stream */
+struct lemon *lemp; /* The main info structure for this parser */
+int *plineno; /* Pointer to the line number */
+int mhflag; /* True if generating makeheaders output */
+{
+ int lineno = *plineno; /* The line number of the output */
+ char **types; /* A hash table of datatypes */
+ int arraysize; /* Size of the "types" array */
+ int maxdtlength; /* Maximum length of any ".datatype" field. */
+ char *stddt; /* Standardized name for a datatype */
+ int i,j; /* Loop counters */
+ int hash; /* For hashing the name of a type */
+ char *name; /* Name of the parser */
+
+ /* Allocate and initialize types[] and allocate stddt[] */
+ arraysize = lemp->nsymbol * 2;
+ types = (char**)malloc( arraysize * sizeof(char*) );
+ for(i=0; i<arraysize; i++) types[i] = 0;
+ maxdtlength = 0;
+ if( lemp->vartype ){
+ maxdtlength = strlen(lemp->vartype);
+ }
+ for(i=0; i<lemp->nsymbol; i++){
+ int len;
+ struct symbol *sp = lemp->symbols[i];
+ if( sp->datatype==0 ) continue;
+ len = strlen(sp->datatype);
+ if( len>maxdtlength ) maxdtlength = len;
+ }
+ stddt = (char*)malloc( maxdtlength*2 + 1 );
+ if( types==0 || stddt==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+
+ /* Build a hash table of datatypes. The ".dtnum" field of each symbol
+ ** is filled in with the hash index plus 1. A ".dtnum" value of 0 is
+ ** used for terminal symbols. If there is no %default_type defined then
+ ** 0 is also used as the .dtnum value for nonterminals which do not specify
+ ** a datatype using the %type directive.
+ */
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ char *cp;
+ if( sp==lemp->errsym ){
+ sp->dtnum = arraysize+1;
+ continue;
+ }
+ if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){
+ sp->dtnum = 0;
+ continue;
+ }
+ cp = sp->datatype;
+ if( cp==0 ) cp = lemp->vartype;
+ j = 0;
+ while( isspace(*cp) ) cp++;
+ while( *cp ) stddt[j++] = *cp++;
+ while( j>0 && isspace(stddt[j-1]) ) j--;
+ stddt[j] = 0;
+ hash = 0;
+ for(j=0; stddt[j]; j++){
+ hash = hash*53 + stddt[j];
+ }
+ hash = (hash & 0x7fffffff)%arraysize;
+ while( types[hash] ){
+ if( strcmp(types[hash],stddt)==0 ){
+ sp->dtnum = hash + 1;
+ break;
+ }
+ hash++;
+ if( hash>=arraysize ) hash = 0;
+ }
+ if( types[hash]==0 ){
+ sp->dtnum = hash + 1;
+ types[hash] = (char*)malloc( strlen(stddt)+1 );
+ if( types[hash]==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+ strcpy(types[hash],stddt);
+ }
+ }
+
+ /* Print out the definition of YYTOKENTYPE and YYMINORTYPE */
+ name = lemp->name ? lemp->name : "Parse";
+ lineno = *plineno;
+ if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; }
+ fprintf(out,"#define %sTOKENTYPE %s\n",name,
+ lemp->tokentype?lemp->tokentype:"void*"); lineno++;
+ if( mhflag ){ fprintf(out,"#endif\n"); lineno++; }
+ fprintf(out,"typedef union {\n"); lineno++;
+ fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++;
+ for(i=0; i<arraysize; i++){
+ if( types[i]==0 ) continue;
+ fprintf(out," %s yy%d;\n",types[i],i+1); lineno++;
+ free(types[i]);
+ }
+ fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++;
+ free(stddt);
+ free(types);
+ fprintf(out,"} YYMINORTYPE;\n"); lineno++;
+ *plineno = lineno;
+}
+
+/*
+** Return the name of a C datatype able to represent values between
+** lwr and upr, inclusive.
+*/
+static const char *minimum_size_type(int lwr, int upr){
+ if( lwr>=0 ){
+ if( upr<=255 ){
+ return "unsigned char";
+ }else if( upr<65535 ){
+ return "unsigned short int";
+ }else{
+ return "unsigned int";
+ }
+ }else if( lwr>=-127 && upr<=127 ){
+ return "signed char";
+ }else if( lwr>=-32767 && upr<32767 ){
+ return "short";
+ }else{
+ return "int";
+ }
+}
+
+/*
+** Each state contains a set of token transaction and a set of
+** nonterminal transactions. Each of these sets makes an instance
+** of the following structure. An array of these structures is used
+** to order the creation of entries in the yy_action[] table.
+*/
+struct axset {
+ struct state *stp; /* A pointer to a state */
+ int isTkn; /* True to use tokens. False for non-terminals */
+ int nAction; /* Number of actions */
+};
+
+/*
+** Compare to axset structures for sorting purposes
+*/
+static int axset_compare(const void *a, const void *b){
+ struct axset *p1 = (struct axset*)a;
+ struct axset *p2 = (struct axset*)b;
+ return p2->nAction - p1->nAction;
+}
+
+/* Generate C source code for the parser */
+void ReportTable(lemp, mhflag)
+struct lemon *lemp;
+int mhflag; /* Output in makeheaders format if true */
+{
+ FILE *out, *in;
+ char line[LINESIZE];
+ int lineno;
+ struct state *stp;
+ struct action *ap;
+ struct rule *rp;
+ struct acttab *pActtab;
+ int i, j, n;
+ char *name;
+ int mnTknOfst, mxTknOfst;
+ int mnNtOfst, mxNtOfst;
+ struct axset *ax;
+
+ in = tplt_open(lemp);
+ if( in==0 ) return;
+ out = file_open(lemp,".c","w");
+ if( out==0 ){
+ fclose(in);
+ return;
+ }
+ lineno = 1;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the include code, if any */
+ tplt_print(out,lemp,lemp->include,lemp->includeln,&lineno);
+ if( mhflag ){
+ char *name = file_makename(lemp, ".h");
+ fprintf(out,"#include \"%s\"\n", name); lineno++;
+ free(name);
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate #defines for all tokens */
+ if( mhflag ){
+ char *prefix;
+ fprintf(out,"#if INTERFACE\n"); lineno++;
+ if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
+ else prefix = "";
+ for(i=1; i<lemp->nterminal; i++){
+ fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ lineno++;
+ }
+ fprintf(out,"#endif\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the defines */
+ fprintf(out,"/* \001 */\n");
+ fprintf(out,"#define YYCODETYPE %s\n",
+ minimum_size_type(0, lemp->nsymbol+5)); lineno++;
+ fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol+1); lineno++;
+ fprintf(out,"#define YYACTIONTYPE %s\n",
+ minimum_size_type(0, lemp->nstate+lemp->nrule+5)); lineno++;
+ print_stack_union(out,lemp,&lineno,mhflag);
+ if( lemp->stacksize ){
+ if( atoi(lemp->stacksize)<=0 ){
+ ErrorMsg(lemp->filename,0,
+"Illegal stack size: [%s]. The stack size should be an integer constant.",
+ lemp->stacksize);
+ lemp->errorcnt++;
+ lemp->stacksize = "100";
+ }
+ fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++;
+ }else{
+ fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++;
+ }
+ if( mhflag ){
+ fprintf(out,"#if INTERFACE\n"); lineno++;
+ }
+ name = lemp->name ? lemp->name : "Parse";
+ if( lemp->arg && lemp->arg[0] ){
+ int i;
+ i = strlen(lemp->arg);
+ while( i>=1 && isspace(lemp->arg[i-1]) ) i--;
+ while( i>=1 && (isalnum(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--;
+ fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++;
+ fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++;
+ fprintf(out,"#define %sARG_FETCH %s = yypParser->%s\n",
+ name,lemp->arg,&lemp->arg[i]); lineno++;
+ fprintf(out,"#define %sARG_STORE yypParser->%s = %s\n",
+ name,&lemp->arg[i],&lemp->arg[i]); lineno++;
+ }else{
+ fprintf(out,"#define %sARG_SDECL\n",name); lineno++;
+ fprintf(out,"#define %sARG_PDECL\n",name); lineno++;
+ fprintf(out,"#define %sARG_FETCH\n",name); lineno++;
+ fprintf(out,"#define %sARG_STORE\n",name); lineno++;
+ }
+ if( mhflag ){
+ fprintf(out,"#endif\n"); lineno++;
+ }
+ fprintf(out,"#define YYNSTATE %d\n",lemp->nstate); lineno++;
+ fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++;
+ fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++;
+ fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++;
+ if( lemp->has_fallback ){
+ fprintf(out,"#define YYFALLBACK 1\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the action table and its associates:
+ **
+ ** yy_action[] A single table containing all actions.
+ ** yy_lookahead[] A table containing the lookahead for each entry in
+ ** yy_action. Used to detect hash collisions.
+ ** yy_shift_ofst[] For each state, the offset into yy_action for
+ ** shifting terminals.
+ ** yy_reduce_ofst[] For each state, the offset into yy_action for
+ ** shifting non-terminals after a reduce.
+ ** yy_default[] Default action for each state.
+ */
+
+ /* Compute the actions on all states and count them up */
+ ax = malloc( sizeof(ax[0])*lemp->nstate*2 );
+ if( ax==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ stp->nTknAct = stp->nNtAct = 0;
+ stp->iDflt = lemp->nstate + lemp->nrule;
+ stp->iTknOfst = NO_OFFSET;
+ stp->iNtOfst = NO_OFFSET;
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( compute_action(lemp,ap)>=0 ){
+ if( ap->sp->index<lemp->nterminal ){
+ stp->nTknAct++;
+ }else if( ap->sp->index<lemp->nsymbol ){
+ stp->nNtAct++;
+ }else{
+ stp->iDflt = compute_action(lemp, ap);
+ }
+ }
+ }
+ ax[i*2].stp = stp;
+ ax[i*2].isTkn = 1;
+ ax[i*2].nAction = stp->nTknAct;
+ ax[i*2+1].stp = stp;
+ ax[i*2+1].isTkn = 0;
+ ax[i*2+1].nAction = stp->nNtAct;
+ }
+ mxTknOfst = mnTknOfst = 0;
+ mxNtOfst = mnNtOfst = 0;
+
+ /* Compute the action table. In order to try to keep the size of the
+ ** action table to a minimum, the heuristic of placing the largest action
+ ** sets first is used.
+ */
+ qsort(ax, lemp->nstate*2, sizeof(ax[0]), axset_compare);
+ pActtab = acttab_alloc();
+ for(i=0; i<lemp->nstate*2 && ax[i].nAction>0; i++){
+ stp = ax[i].stp;
+ if( ax[i].isTkn ){
+ for(ap=stp->ap; ap; ap=ap->next){
+ int action;
+ if( ap->sp->index>=lemp->nterminal ) continue;
+ action = compute_action(lemp, ap);
+ if( action<0 ) continue;
+ acttab_action(pActtab, ap->sp->index, action);
+ }
+ stp->iTknOfst = acttab_insert(pActtab);
+ if( stp->iTknOfst<mnTknOfst ) mnTknOfst = stp->iTknOfst;
+ if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst;
+ }else{
+ for(ap=stp->ap; ap; ap=ap->next){
+ int action;
+ if( ap->sp->index<lemp->nterminal ) continue;
+ if( ap->sp->index==lemp->nsymbol ) continue;
+ action = compute_action(lemp, ap);
+ if( action<0 ) continue;
+ acttab_action(pActtab, ap->sp->index, action);
+ }
+ stp->iNtOfst = acttab_insert(pActtab);
+ if( stp->iNtOfst<mnNtOfst ) mnNtOfst = stp->iNtOfst;
+ if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst;
+ }
+ }
+ free(ax);
+
+ /* Output the yy_action table */
+ fprintf(out,"static YYACTIONTYPE yy_action[] = {\n"); lineno++;
+ n = acttab_size(pActtab);
+ for(i=j=0; i<n; i++){
+ int action = acttab_yyaction(pActtab, i);
+ if( action<0 ) action = lemp->nsymbol + lemp->nrule + 2;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", action);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_lookahead table */
+ fprintf(out,"static YYCODETYPE yy_lookahead[] = {\n"); lineno++;
+ for(i=j=0; i<n; i++){
+ int la = acttab_yylookahead(pActtab, i);
+ if( la<0 ) la = lemp->nsymbol;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", la);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_shift_ofst[] table */
+ fprintf(out, "#define YY_SHIFT_USE_DFLT (%d)\n", mnTknOfst-1); lineno++;
+ fprintf(out, "static %s yy_shift_ofst[] = {\n",
+ minimum_size_type(mnTknOfst-1, mxTknOfst)); lineno++;
+ n = lemp->nstate;
+ for(i=j=0; i<n; i++){
+ int ofst;
+ stp = lemp->sorted[i];
+ ofst = stp->iTknOfst;
+ if( ofst==NO_OFFSET ) ofst = mnTknOfst - 1;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", ofst);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_reduce_ofst[] table */
+ fprintf(out, "#define YY_REDUCE_USE_DFLT (%d)\n", mnNtOfst-1); lineno++;
+ fprintf(out, "static %s yy_reduce_ofst[] = {\n",
+ minimum_size_type(mnNtOfst-1, mxNtOfst)); lineno++;
+ n = lemp->nstate;
+ for(i=j=0; i<n; i++){
+ int ofst;
+ stp = lemp->sorted[i];
+ ofst = stp->iNtOfst;
+ if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", ofst);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the default action table */
+ fprintf(out, "static YYACTIONTYPE yy_default[] = {\n"); lineno++;
+ n = lemp->nstate;
+ for(i=j=0; i<n; i++){
+ stp = lemp->sorted[i];
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", stp->iDflt);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the table of fallback tokens.
+ */
+ if( lemp->has_fallback ){
+ for(i=0; i<lemp->nterminal; i++){
+ struct symbol *p = lemp->symbols[i];
+ if( p->fallback==0 ){
+ fprintf(out, " 0, /* %10s => nothing */\n", p->name);
+ }else{
+ fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index,
+ p->name, p->fallback->name);
+ }
+ lineno++;
+ }
+ }
+ tplt_xfer(lemp->name, in, out, &lineno);
+
+ /* Generate a table containing the symbolic name of every symbol
+ */
+ for(i=0; i<lemp->nsymbol; i++){
+ sprintf(line,"\"%s\",",lemp->symbols[i]->name);
+ fprintf(out," %-15s",line);
+ if( (i&3)==3 ){ fprintf(out,"\n"); lineno++; }
+ }
+ if( (i&3)!=0 ){ fprintf(out,"\n"); lineno++; }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate a table containing a text string that describes every
+ ** rule in the rule set of the grammer. This information is used
+ ** when tracing REDUCE actions.
+ */
+ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
+ assert( rp->index==i );
+ fprintf(out," /* %3d */ \"%s ::=", i, rp->lhs->name);
+ for(j=0; j<rp->nrhs; j++) fprintf(out," %s",rp->rhs[j]->name);
+ fprintf(out,"\",\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes every time a symbol is popped from
+ ** the stack while processing errors or while destroying the parser.
+ ** (In other words, generate the %destructor actions)
+ */
+ if( lemp->tokendest ){
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type!=TERMINAL ) continue;
+ fprintf(out," case %d:\n",sp->index); lineno++;
+ }
+ for(i=0; i<lemp->nsymbol && lemp->symbols[i]->type!=TERMINAL; i++);
+ if( i<lemp->nsymbol ){
+ emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ }
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue;
+ fprintf(out," case %d:\n",sp->index); lineno++;
+ emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ if( lemp->vardest ){
+ struct symbol *dflt_sp = 0;
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type==TERMINAL ||
+ sp->index<=0 || sp->destructor!=0 ) continue;
+ fprintf(out," case %d:\n",sp->index); lineno++;
+ dflt_sp = sp;
+ }
+ if( dflt_sp!=0 ){
+ emit_destructor_code(out,dflt_sp,lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes whenever the parser stack overflows */
+ tplt_print(out,lemp,lemp->overflow,lemp->overflowln,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the table of rule information
+ **
+ ** Note: This code depends on the fact that rules are number
+ ** sequentually beginning with 0.
+ */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ fprintf(out," { %d, %d },\n",rp->lhs->index,rp->nrhs); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which execution during each REDUCE action */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ fprintf(out," case %d:\n",rp->index); lineno++;
+ emit_code(out,rp,lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes if a parse fails */
+ tplt_print(out,lemp,lemp->failure,lemp->failureln,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes when a syntax error occurs */
+ tplt_print(out,lemp,lemp->error,lemp->errorln,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes when the parser accepts its input */
+ tplt_print(out,lemp,lemp->accept,lemp->acceptln,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Append any addition code the user desires */
+ tplt_print(out,lemp,lemp->extracode,lemp->extracodeln,&lineno);
+
+ fclose(in);
+ fclose(out);
+ return;
+}
+
+/* Generate a header file for the parser */
+void ReportHeader(lemp)
+struct lemon *lemp;
+{
+ FILE *out, *in;
+ char *prefix;
+ char line[LINESIZE];
+ char pattern[LINESIZE];
+ int i;
+
+ if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
+ else prefix = "";
+ in = file_open(lemp,".h","r");
+ if( in ){
+ for(i=1; i<lemp->nterminal && fgets(line,LINESIZE,in); i++){
+ sprintf(pattern,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ if( strcmp(line,pattern) ) break;
+ }
+ fclose(in);
+ if( i==lemp->nterminal ){
+ /* No change in the file. Don't rewrite it. */
+ return;
+ }
+ }
+ out = file_open(lemp,".h","w");
+ if( out ){
+ for(i=1; i<lemp->nterminal; i++){
+ fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ }
+ fclose(out);
+ }
+ return;
+}
+
+/* Reduce the size of the action tables, if possible, by making use
+** of defaults.
+**
+** In this version, we take the most frequent REDUCE action and make
+** it the default. Only default a reduce if there are more than one.
+*/
+void CompressTables(lemp)
+struct lemon *lemp;
+{
+ struct state *stp;
+ struct action *ap, *ap2;
+ struct rule *rp, *rp2, *rbest;
+ int nbest, n;
+ int i;
+
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ nbest = 0;
+ rbest = 0;
+
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type!=REDUCE ) continue;
+ rp = ap->x.rp;
+ if( rp==rbest ) continue;
+ n = 1;
+ for(ap2=ap->next; ap2; ap2=ap2->next){
+ if( ap2->type!=REDUCE ) continue;
+ rp2 = ap2->x.rp;
+ if( rp2==rbest ) continue;
+ if( rp2==rp ) n++;
+ }
+ if( n>nbest ){
+ nbest = n;
+ rbest = rp;
+ }
+ }
+
+ /* Do not make a default if the number of rules to default
+ ** is not at least 2 */
+ if( nbest<2 ) continue;
+
+
+ /* Combine matching REDUCE actions into a single default */
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE && ap->x.rp==rbest ) break;
+ }
+ assert( ap );
+ ap->sp = Symbol_new("{default}");
+ for(ap=ap->next; ap; ap=ap->next){
+ if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED;
+ }
+ stp->ap = Action_sort(stp->ap);
+ }
+}
+
+/***************** From the file "set.c" ************************************/
+/*
+** Set manipulation routines for the LEMON parser generator.
+*/
+
+static int size = 0;
+
+/* Set the set size */
+void SetSize(n)
+int n;
+{
+ size = n+1;
+}
+
+/* Allocate a new set */
+char *SetNew(){
+ char *s;
+ int i;
+ s = (char*)malloc( size );
+ if( s==0 ){
+ extern void memory_error();
+ memory_error();
+ }
+ for(i=0; i<size; i++) s[i] = 0;
+ return s;
+}
+
+/* Deallocate a set */
+void SetFree(s)
+char *s;
+{
+ free(s);
+}
+
+/* Add a new element to the set. Return TRUE if the element was added
+** and FALSE if it was already there. */
+int SetAdd(s,e)
+char *s;
+int e;
+{
+ int rv;
+ rv = s[e];
+ s[e] = 1;
+ return !rv;
+}
+
+/* Add every element of s2 to s1. Return TRUE if s1 changes. */
+int SetUnion(s1,s2)
+char *s1;
+char *s2;
+{
+ int i, progress;
+ progress = 0;
+ for(i=0; i<size; i++){
+ if( s2[i]==0 ) continue;
+ if( s1[i]==0 ){
+ progress = 1;
+ s1[i] = 1;
+ }
+ }
+ return progress;
+}
+/********************** From the file "table.c" ****************************/
+/*
+** All code in this file has been automatically generated
+** from a specification in the file
+** "table.q"
+** by the associative array code building program "aagen".
+** Do not edit this file! Instead, edit the specification
+** file, then rerun aagen.
+*/
+/*
+** Code for processing tables in the LEMON parser generator.
+*/
+
+PRIVATE int strhash(x)
+char *x;
+{
+ int h = 0;
+ while( *x) h = h*13 + *(x++);
+ return h;
+}
+
+/* Works like strdup, sort of. Save a string in malloced memory, but
+** keep strings in a table so that the same string is not in more
+** than one place.
+*/
+char *Strsafe(y)
+char *y;
+{
+ char *z;
+
+ z = Strsafe_find(y);
+ if( z==0 && (z=malloc( strlen(y)+1 ))!=0 ){
+ strcpy(z,y);
+ Strsafe_insert(z);
+ }
+ MemoryCheck(z);
+ return z;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x1".
+*/
+struct s_x1 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x1node *tbl; /* The data stored here */
+ struct s_x1node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x1".
+*/
+typedef struct s_x1node {
+ char *data; /* The data */
+ struct s_x1node *next; /* Next entry with the same hash */
+ struct s_x1node **from; /* Previous link */
+} x1node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x1 *x1a;
+
+/* Allocate a new associative array */
+void Strsafe_init(){
+ if( x1a ) return;
+ x1a = (struct s_x1*)malloc( sizeof(struct s_x1) );
+ if( x1a ){
+ x1a->size = 1024;
+ x1a->count = 0;
+ x1a->tbl = (x1node*)malloc(
+ (sizeof(x1node) + sizeof(x1node*))*1024 );
+ if( x1a->tbl==0 ){
+ free(x1a);
+ x1a = 0;
+ }else{
+ int i;
+ x1a->ht = (x1node**)&(x1a->tbl[1024]);
+ for(i=0; i<1024; i++) x1a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Strsafe_insert(data)
+char *data;
+{
+ x1node *np;
+ int h;
+ int ph;
+
+ if( x1a==0 ) return 0;
+ ph = strhash(data);
+ h = ph & (x1a->size-1);
+ np = x1a->ht[h];
+ while( np ){
+ if( strcmp(np->data,data)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x1a->count>=x1a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x1 array;
+ array.size = size = x1a->size*2;
+ array.count = x1a->count;
+ array.tbl = (x1node*)malloc(
+ (sizeof(x1node) + sizeof(x1node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x1node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x1a->count; i++){
+ x1node *oldnp, *newnp;
+ oldnp = &(x1a->tbl[i]);
+ h = strhash(oldnp->data) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x1a->tbl);
+ *x1a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x1a->size-1);
+ np = &(x1a->tbl[x1a->count++]);
+ np->data = data;
+ if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next);
+ np->next = x1a->ht[h];
+ x1a->ht[h] = np;
+ np->from = &(x1a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+char *Strsafe_find(key)
+char *key;
+{
+ int h;
+ x1node *np;
+
+ if( x1a==0 ) return 0;
+ h = strhash(key) & (x1a->size-1);
+ np = x1a->ht[h];
+ while( np ){
+ if( strcmp(np->data,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return a pointer to the (terminal or nonterminal) symbol "x".
+** Create a new symbol if this is the first time "x" has been seen.
+*/
+struct symbol *Symbol_new(x)
+char *x;
+{
+ struct symbol *sp;
+
+ sp = Symbol_find(x);
+ if( sp==0 ){
+ sp = (struct symbol *)malloc( sizeof(struct symbol) );
+ MemoryCheck(sp);
+ sp->name = Strsafe(x);
+ sp->type = isupper(*x) ? TERMINAL : NONTERMINAL;
+ sp->rule = 0;
+ sp->fallback = 0;
+ sp->prec = -1;
+ sp->assoc = UNK;
+ sp->firstset = 0;
+ sp->lambda = B_FALSE;
+ sp->destructor = 0;
+ sp->datatype = 0;
+ Symbol_insert(sp,sp->name);
+ }
+ return sp;
+}
+
+/* Compare two symbols for working purposes
+**
+** Symbols that begin with upper case letters (terminals or tokens)
+** must sort before symbols that begin with lower case letters
+** (non-terminals). Other than that, the order does not matter.
+**
+** We find experimentally that leaving the symbols in their original
+** order (the order they appeared in the grammar file) gives the
+** smallest parser tables in SQLite.
+*/
+int Symbolcmpp(struct symbol **a, struct symbol **b){
+ int i1 = (**a).index + 10000000*((**a).name[0]>'Z');
+ int i2 = (**b).index + 10000000*((**b).name[0]>'Z');
+ return i1-i2;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x2".
+*/
+struct s_x2 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x2node *tbl; /* The data stored here */
+ struct s_x2node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x2".
+*/
+typedef struct s_x2node {
+ struct symbol *data; /* The data */
+ char *key; /* The key */
+ struct s_x2node *next; /* Next entry with the same hash */
+ struct s_x2node **from; /* Previous link */
+} x2node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x2 *x2a;
+
+/* Allocate a new associative array */
+void Symbol_init(){
+ if( x2a ) return;
+ x2a = (struct s_x2*)malloc( sizeof(struct s_x2) );
+ if( x2a ){
+ x2a->size = 128;
+ x2a->count = 0;
+ x2a->tbl = (x2node*)malloc(
+ (sizeof(x2node) + sizeof(x2node*))*128 );
+ if( x2a->tbl==0 ){
+ free(x2a);
+ x2a = 0;
+ }else{
+ int i;
+ x2a->ht = (x2node**)&(x2a->tbl[128]);
+ for(i=0; i<128; i++) x2a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Symbol_insert(data,key)
+struct symbol *data;
+char *key;
+{
+ x2node *np;
+ int h;
+ int ph;
+
+ if( x2a==0 ) return 0;
+ ph = strhash(key);
+ h = ph & (x2a->size-1);
+ np = x2a->ht[h];
+ while( np ){
+ if( strcmp(np->key,key)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x2a->count>=x2a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x2 array;
+ array.size = size = x2a->size*2;
+ array.count = x2a->count;
+ array.tbl = (x2node*)malloc(
+ (sizeof(x2node) + sizeof(x2node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x2node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x2a->count; i++){
+ x2node *oldnp, *newnp;
+ oldnp = &(x2a->tbl[i]);
+ h = strhash(oldnp->key) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->key = oldnp->key;
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x2a->tbl);
+ *x2a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x2a->size-1);
+ np = &(x2a->tbl[x2a->count++]);
+ np->key = key;
+ np->data = data;
+ if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next);
+ np->next = x2a->ht[h];
+ x2a->ht[h] = np;
+ np->from = &(x2a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct symbol *Symbol_find(key)
+char *key;
+{
+ int h;
+ x2node *np;
+
+ if( x2a==0 ) return 0;
+ h = strhash(key) & (x2a->size-1);
+ np = x2a->ht[h];
+ while( np ){
+ if( strcmp(np->key,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return the n-th data. Return NULL if n is out of range. */
+struct symbol *Symbol_Nth(n)
+int n;
+{
+ struct symbol *data;
+ if( x2a && n>0 && n<=x2a->count ){
+ data = x2a->tbl[n-1].data;
+ }else{
+ data = 0;
+ }
+ return data;
+}
+
+/* Return the size of the array */
+int Symbol_count()
+{
+ return x2a ? x2a->count : 0;
+}
+
+/* Return an array of pointers to all data in the table.
+** The array is obtained from malloc. Return NULL if memory allocation
+** problems, or if the array is empty. */
+struct symbol **Symbol_arrayof()
+{
+ struct symbol **array;
+ int i,size;
+ if( x2a==0 ) return 0;
+ size = x2a->count;
+ array = (struct symbol **)malloc( sizeof(struct symbol *)*size );
+ if( array ){
+ for(i=0; i<size; i++) array[i] = x2a->tbl[i].data;
+ }
+ return array;
+}
+
+/* Compare two configurations */
+int Configcmp(a,b)
+struct config *a;
+struct config *b;
+{
+ int x;
+ x = a->rp->index - b->rp->index;
+ if( x==0 ) x = a->dot - b->dot;
+ return x;
+}
+
+/* Compare two states */
+PRIVATE int statecmp(a,b)
+struct config *a;
+struct config *b;
+{
+ int rc;
+ for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){
+ rc = a->rp->index - b->rp->index;
+ if( rc==0 ) rc = a->dot - b->dot;
+ }
+ if( rc==0 ){
+ if( a ) rc = 1;
+ if( b ) rc = -1;
+ }
+ return rc;
+}
+
+/* Hash a state */
+PRIVATE int statehash(a)
+struct config *a;
+{
+ int h=0;
+ while( a ){
+ h = h*571 + a->rp->index*37 + a->dot;
+ a = a->bp;
+ }
+ return h;
+}
+
+/* Allocate a new state structure */
+struct state *State_new()
+{
+ struct state *new;
+ new = (struct state *)malloc( sizeof(struct state) );
+ MemoryCheck(new);
+ return new;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x3".
+*/
+struct s_x3 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x3node *tbl; /* The data stored here */
+ struct s_x3node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x3".
+*/
+typedef struct s_x3node {
+ struct state *data; /* The data */
+ struct config *key; /* The key */
+ struct s_x3node *next; /* Next entry with the same hash */
+ struct s_x3node **from; /* Previous link */
+} x3node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x3 *x3a;
+
+/* Allocate a new associative array */
+void State_init(){
+ if( x3a ) return;
+ x3a = (struct s_x3*)malloc( sizeof(struct s_x3) );
+ if( x3a ){
+ x3a->size = 128;
+ x3a->count = 0;
+ x3a->tbl = (x3node*)malloc(
+ (sizeof(x3node) + sizeof(x3node*))*128 );
+ if( x3a->tbl==0 ){
+ free(x3a);
+ x3a = 0;
+ }else{
+ int i;
+ x3a->ht = (x3node**)&(x3a->tbl[128]);
+ for(i=0; i<128; i++) x3a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int State_insert(data,key)
+struct state *data;
+struct config *key;
+{
+ x3node *np;
+ int h;
+ int ph;
+
+ if( x3a==0 ) return 0;
+ ph = statehash(key);
+ h = ph & (x3a->size-1);
+ np = x3a->ht[h];
+ while( np ){
+ if( statecmp(np->key,key)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x3a->count>=x3a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x3 array;
+ array.size = size = x3a->size*2;
+ array.count = x3a->count;
+ array.tbl = (x3node*)malloc(
+ (sizeof(x3node) + sizeof(x3node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x3node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x3a->count; i++){
+ x3node *oldnp, *newnp;
+ oldnp = &(x3a->tbl[i]);
+ h = statehash(oldnp->key) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->key = oldnp->key;
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x3a->tbl);
+ *x3a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x3a->size-1);
+ np = &(x3a->tbl[x3a->count++]);
+ np->key = key;
+ np->data = data;
+ if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next);
+ np->next = x3a->ht[h];
+ x3a->ht[h] = np;
+ np->from = &(x3a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct state *State_find(key)
+struct config *key;
+{
+ int h;
+ x3node *np;
+
+ if( x3a==0 ) return 0;
+ h = statehash(key) & (x3a->size-1);
+ np = x3a->ht[h];
+ while( np ){
+ if( statecmp(np->key,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return an array of pointers to all data in the table.
+** The array is obtained from malloc. Return NULL if memory allocation
+** problems, or if the array is empty. */
+struct state **State_arrayof()
+{
+ struct state **array;
+ int i,size;
+ if( x3a==0 ) return 0;
+ size = x3a->count;
+ array = (struct state **)malloc( sizeof(struct state *)*size );
+ if( array ){
+ for(i=0; i<size; i++) array[i] = x3a->tbl[i].data;
+ }
+ return array;
+}
+
+/* Hash a configuration */
+PRIVATE int confighash(a)
+struct config *a;
+{
+ int h=0;
+ h = h*571 + a->rp->index*37 + a->dot;
+ return h;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x4".
+*/
+struct s_x4 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x4node *tbl; /* The data stored here */
+ struct s_x4node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x4".
+*/
+typedef struct s_x4node {
+ struct config *data; /* The data */
+ struct s_x4node *next; /* Next entry with the same hash */
+ struct s_x4node **from; /* Previous link */
+} x4node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x4 *x4a;
+
+/* Allocate a new associative array */
+void Configtable_init(){
+ if( x4a ) return;
+ x4a = (struct s_x4*)malloc( sizeof(struct s_x4) );
+ if( x4a ){
+ x4a->size = 64;
+ x4a->count = 0;
+ x4a->tbl = (x4node*)malloc(
+ (sizeof(x4node) + sizeof(x4node*))*64 );
+ if( x4a->tbl==0 ){
+ free(x4a);
+ x4a = 0;
+ }else{
+ int i;
+ x4a->ht = (x4node**)&(x4a->tbl[64]);
+ for(i=0; i<64; i++) x4a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Configtable_insert(data)
+struct config *data;
+{
+ x4node *np;
+ int h;
+ int ph;
+
+ if( x4a==0 ) return 0;
+ ph = confighash(data);
+ h = ph & (x4a->size-1);
+ np = x4a->ht[h];
+ while( np ){
+ if( Configcmp(np->data,data)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x4a->count>=x4a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x4 array;
+ array.size = size = x4a->size*2;
+ array.count = x4a->count;
+ array.tbl = (x4node*)malloc(
+ (sizeof(x4node) + sizeof(x4node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x4node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x4a->count; i++){
+ x4node *oldnp, *newnp;
+ oldnp = &(x4a->tbl[i]);
+ h = confighash(oldnp->data) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x4a->tbl);
+ *x4a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x4a->size-1);
+ np = &(x4a->tbl[x4a->count++]);
+ np->data = data;
+ if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next);
+ np->next = x4a->ht[h];
+ x4a->ht[h] = np;
+ np->from = &(x4a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct config *Configtable_find(key)
+struct config *key;
+{
+ int h;
+ x4node *np;
+
+ if( x4a==0 ) return 0;
+ h = confighash(key) & (x4a->size-1);
+ np = x4a->ht[h];
+ while( np ){
+ if( Configcmp(np->data,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Remove all data from the table. Pass each data to the function "f"
+** as it is removed. ("f" may be null to avoid this step.) */
+void Configtable_clear(f)
+int(*f)(/* struct config * */);
+{
+ int i;
+ if( x4a==0 || x4a->count==0 ) return;
+ if( f ) for(i=0; i<x4a->count; i++) (*f)(x4a->tbl[i].data);
+ for(i=0; i<x4a->size; i++) x4a->ht[i] = 0;
+ x4a->count = 0;
+ return;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/lempar.c b/usr/src/cmd/svc/configd/sqlite/tool/lempar.c
new file mode 100644
index 0000000000..dcdf5fe31a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/lempar.c
@@ -0,0 +1,690 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/* Driver template for the LEMON parser generator.
+** The author disclaims copyright to this source code.
+*/
+/* First off, code is include which follows the "include" declaration
+** in the input file. */
+#include <stdio.h>
+%%
+/* Next is all token values, in a form suitable for use by makeheaders.
+** This section will be null unless lemon is run with the -m switch.
+*/
+/*
+** These constants (all generated automatically by the parser generator)
+** specify the various kinds of tokens (terminals) that the parser
+** understands.
+**
+** Each symbol here is a terminal symbol in the grammar.
+*/
+%%
+/* Make sure the INTERFACE macro is defined.
+*/
+#ifndef INTERFACE
+# define INTERFACE 1
+#endif
+/* The next thing included is series of defines which control
+** various aspects of the generated parser.
+** YYCODETYPE is the data type used for storing terminal
+** and nonterminal numbers. "unsigned char" is
+** used if there are fewer than 250 terminals
+** and nonterminals. "int" is used otherwise.
+** YYNOCODE is a number of type YYCODETYPE which corresponds
+** to no legal terminal or nonterminal number. This
+** number is used to fill in empty slots of the hash
+** table.
+** YYFALLBACK If defined, this indicates that one or more tokens
+** have fall-back values which should be used if the
+** original value of the token will not parse.
+** YYACTIONTYPE is the data type used for storing terminal
+** and nonterminal numbers. "unsigned char" is
+** used if there are fewer than 250 rules and
+** states combined. "int" is used otherwise.
+** ParseTOKENTYPE is the data type used for minor tokens given
+** directly to the parser from the tokenizer.
+** YYMINORTYPE is the data type used for all minor tokens.
+** This is typically a union of many types, one of
+** which is ParseTOKENTYPE. The entry in the union
+** for base tokens is called "yy0".
+** YYSTACKDEPTH is the maximum depth of the parser's stack.
+** ParseARG_SDECL A static variable declaration for the %extra_argument
+** ParseARG_PDECL A parameter declaration for the %extra_argument
+** ParseARG_STORE Code to store %extra_argument into yypParser
+** ParseARG_FETCH Code to extract %extra_argument from yypParser
+** YYNSTATE the combined number of states.
+** YYNRULE the number of rules in the grammar
+** YYERRORSYMBOL is the code number of the error symbol. If not
+** defined, then do no error processing.
+*/
+%%
+#define YY_NO_ACTION (YYNSTATE+YYNRULE+2)
+#define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1)
+#define YY_ERROR_ACTION (YYNSTATE+YYNRULE)
+
+/* Next are that tables used to determine what action to take based on the
+** current state and lookahead token. These tables are used to implement
+** functions that take a state number and lookahead value and return an
+** action integer.
+**
+** Suppose the action integer is N. Then the action is determined as
+** follows
+**
+** 0 <= N < YYNSTATE Shift N. That is, push the lookahead
+** token onto the stack and goto state N.
+**
+** YYNSTATE <= N < YYNSTATE+YYNRULE Reduce by rule N-YYNSTATE.
+**
+** N == YYNSTATE+YYNRULE A syntax error has occurred.
+**
+** N == YYNSTATE+YYNRULE+1 The parser accepts its input.
+**
+** N == YYNSTATE+YYNRULE+2 No such action. Denotes unused
+** slots in the yy_action[] table.
+**
+** The action table is constructed as a single large table named yy_action[].
+** Given state S and lookahead X, the action is computed as
+**
+** yy_action[ yy_shift_ofst[S] + X ]
+**
+** If the index value yy_shift_ofst[S]+X is out of range or if the value
+** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S]
+** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table
+** and that yy_default[S] should be used instead.
+**
+** The formula above is for computing the action when the lookahead is
+** a terminal symbol. If the lookahead is a non-terminal (as occurs after
+** a reduce action) then the yy_reduce_ofst[] array is used in place of
+** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of
+** YY_SHIFT_USE_DFLT.
+**
+** The following are the tables generated in this section:
+**
+** yy_action[] A single table containing all actions.
+** yy_lookahead[] A table containing the lookahead for each entry in
+** yy_action. Used to detect hash collisions.
+** yy_shift_ofst[] For each state, the offset into yy_action for
+** shifting terminals.
+** yy_reduce_ofst[] For each state, the offset into yy_action for
+** shifting non-terminals after a reduce.
+** yy_default[] Default action for each state.
+*/
+%%
+#define YY_SZ_ACTTAB (sizeof(yy_action)/sizeof(yy_action[0]))
+
+/* The next table maps tokens into fallback tokens. If a construct
+** like the following:
+**
+** %fallback ID X Y Z.
+**
+** appears in the grammer, then ID becomes a fallback token for X, Y,
+** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
+** but it does not parse, the type of the token is changed to ID and
+** the parse is retried before an error is thrown.
+*/
+#ifdef YYFALLBACK
+static const YYCODETYPE yyFallback[] = {
+%%
+};
+#endif /* YYFALLBACK */
+
+/* The following structure represents a single element of the
+** parser's stack. Information stored includes:
+**
+** + The state number for the parser at this level of the stack.
+**
+** + The value of the token stored at this level of the stack.
+** (In other words, the "major" token.)
+**
+** + The semantic value stored at this level of the stack. This is
+** the information used by the action routines in the grammar.
+** It is sometimes called the "minor" token.
+*/
+struct yyStackEntry {
+ int stateno; /* The state-number */
+ int major; /* The major token value. This is the code
+ ** number for the token at this stack level */
+ YYMINORTYPE minor; /* The user-supplied minor token value. This
+ ** is the value of the token */
+};
+typedef struct yyStackEntry yyStackEntry;
+
+/* The state of the parser is completely contained in an instance of
+** the following structure */
+struct yyParser {
+ int yyidx; /* Index of top element in stack */
+ int yyerrcnt; /* Shifts left before out of the error */
+ ParseARG_SDECL /* A place to hold %extra_argument */
+ yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
+};
+typedef struct yyParser yyParser;
+
+#ifndef NDEBUG
+#include <stdio.h>
+static FILE *yyTraceFILE = 0;
+static char *yyTracePrompt = 0;
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/*
+** Turn parser tracing on by giving a stream to which to write the trace
+** and a prompt to preface each trace message. Tracing is turned off
+** by making either argument NULL
+**
+** Inputs:
+** <ul>
+** <li> A FILE* to which trace output should be written.
+** If NULL, then tracing is turned off.
+** <li> A prefix string written at the beginning of every
+** line of trace output. If NULL, then tracing is
+** turned off.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+void ParseTrace(FILE *TraceFILE, char *zTracePrompt){
+ yyTraceFILE = TraceFILE;
+ yyTracePrompt = zTracePrompt;
+ if( yyTraceFILE==0 ) yyTracePrompt = 0;
+ else if( yyTracePrompt==0 ) yyTraceFILE = 0;
+}
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/* For tracing shifts, the names of all terminals and nonterminals
+** are required. The following table supplies these names */
+static const char *yyTokenName[] = {
+%%
+};
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/* For tracing reduce actions, the names of all rules are required.
+*/
+static const char *yyRuleName[] = {
+%%
+};
+#endif /* NDEBUG */
+
+/*
+** This function returns the symbolic name associated with a token
+** value.
+*/
+const char *ParseTokenName(int tokenType){
+#ifndef NDEBUG
+ if( tokenType>0 && tokenType<(sizeof(yyTokenName)/sizeof(yyTokenName[0])) ){
+ return yyTokenName[tokenType];
+ }else{
+ return "Unknown";
+ }
+#else
+ return "";
+#endif
+}
+
+/*
+** This function allocates a new parser.
+** The only argument is a pointer to a function which works like
+** malloc.
+**
+** Inputs:
+** A pointer to the function used to allocate memory.
+**
+** Outputs:
+** A pointer to a parser. This pointer is used in subsequent calls
+** to Parse and ParseFree.
+*/
+void *ParseAlloc(void *(*mallocProc)(size_t)){
+ yyParser *pParser;
+ pParser = (yyParser*)(*mallocProc)( (size_t)sizeof(yyParser) );
+ if( pParser ){
+ pParser->yyidx = -1;
+ }
+ return pParser;
+}
+
+/* The following function deletes the value associated with a
+** symbol. The symbol can be either a terminal or nonterminal.
+** "yymajor" is the symbol code, and "yypminor" is a pointer to
+** the value.
+*/
+static void yy_destructor(YYCODETYPE yymajor, YYMINORTYPE *yypminor){
+ switch( yymajor ){
+ /* Here is inserted the actions which take place when a
+ ** terminal or non-terminal is destroyed. This can happen
+ ** when the symbol is popped from the stack during a
+ ** reduce or during error processing or when a parser is
+ ** being destroyed before it is finished parsing.
+ **
+ ** Note: during a reduce, the only symbols destroyed are those
+ ** which appear on the RHS of the rule, but which are not used
+ ** inside the C code.
+ */
+%%
+ default: break; /* If no destructor action specified: do nothing */
+ }
+}
+
+/*
+** Pop the parser's stack once.
+**
+** If there is a destructor routine associated with the token which
+** is popped from the stack, then call it.
+**
+** Return the major token number for the symbol popped.
+*/
+static int yy_pop_parser_stack(yyParser *pParser){
+ YYCODETYPE yymajor;
+ yyStackEntry *yytos = &pParser->yystack[pParser->yyidx];
+
+ if( pParser->yyidx<0 ) return 0;
+#ifndef NDEBUG
+ if( yyTraceFILE && pParser->yyidx>=0 ){
+ fprintf(yyTraceFILE,"%sPopping %s\n",
+ yyTracePrompt,
+ yyTokenName[yytos->major]);
+ }
+#endif
+ yymajor = yytos->major;
+ yy_destructor( yymajor, &yytos->minor);
+ pParser->yyidx--;
+ return yymajor;
+}
+
+/*
+** Deallocate and destroy a parser. Destructors are all called for
+** all stack elements before shutting the parser down.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser. This should be a pointer
+** obtained from ParseAlloc.
+** <li> A pointer to a function used to reclaim memory obtained
+** from malloc.
+** </ul>
+*/
+void ParseFree(
+ void *p, /* The parser to be deleted */
+ void (*freeProc)(void*) /* Function used to reclaim memory */
+){
+ yyParser *pParser = (yyParser*)p;
+ if( pParser==0 ) return;
+ while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser);
+ (*freeProc)((void*)pParser);
+}
+
+/*
+** Find the appropriate action for a parser given the terminal
+** look-ahead token iLookAhead.
+**
+** If the look-ahead token is YYNOCODE, then check to see if the action is
+** independent of the look-ahead. If it is, return the action, otherwise
+** return YY_NO_ACTION.
+*/
+static int yy_find_shift_action(
+ yyParser *pParser, /* The parser */
+ int iLookAhead /* The look-ahead token */
+){
+ int i;
+ int stateno = pParser->yystack[pParser->yyidx].stateno;
+
+ /* if( pParser->yyidx<0 ) return YY_NO_ACTION; */
+ i = yy_shift_ofst[stateno];
+ if( i==YY_SHIFT_USE_DFLT ){
+ return yy_default[stateno];
+ }
+ if( iLookAhead==YYNOCODE ){
+ return YY_NO_ACTION;
+ }
+ i += iLookAhead;
+ if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){
+#ifdef YYFALLBACK
+ int iFallback; /* Fallback token */
+ if( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0])
+ && (iFallback = yyFallback[iLookAhead])!=0 ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
+ }
+#endif
+ return yy_find_shift_action(pParser, iFallback);
+ }
+#endif
+ return yy_default[stateno];
+ }else{
+ return yy_action[i];
+ }
+}
+
+/*
+** Find the appropriate action for a parser given the non-terminal
+** look-ahead token iLookAhead.
+**
+** If the look-ahead token is YYNOCODE, then check to see if the action is
+** independent of the look-ahead. If it is, return the action, otherwise
+** return YY_NO_ACTION.
+*/
+static int yy_find_reduce_action(
+ yyParser *pParser, /* The parser */
+ int iLookAhead /* The look-ahead token */
+){
+ int i;
+ int stateno = pParser->yystack[pParser->yyidx].stateno;
+
+ i = yy_reduce_ofst[stateno];
+ if( i==YY_REDUCE_USE_DFLT ){
+ return yy_default[stateno];
+ }
+ if( iLookAhead==YYNOCODE ){
+ return YY_NO_ACTION;
+ }
+ i += iLookAhead;
+ if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){
+ return yy_default[stateno];
+ }else{
+ return yy_action[i];
+ }
+}
+
+/*
+** Perform a shift action.
+*/
+static void yy_shift(
+ yyParser *yypParser, /* The parser to be shifted */
+ int yyNewState, /* The new state to shift in */
+ int yyMajor, /* The major token to shift in */
+ YYMINORTYPE *yypMinor /* Pointer ot the minor token to shift in */
+){
+ yyStackEntry *yytos;
+ yypParser->yyidx++;
+ if( yypParser->yyidx>=YYSTACKDEPTH ){
+ ParseARG_FETCH;
+ yypParser->yyidx--;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will execute if the parser
+ ** stack every overflows */
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument var */
+ return;
+ }
+ yytos = &yypParser->yystack[yypParser->yyidx];
+ yytos->stateno = yyNewState;
+ yytos->major = yyMajor;
+ yytos->minor = *yypMinor;
+#ifndef NDEBUG
+ if( yyTraceFILE && yypParser->yyidx>0 ){
+ int i;
+ fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState);
+ fprintf(yyTraceFILE,"%sStack:",yyTracePrompt);
+ for(i=1; i<=yypParser->yyidx; i++)
+ fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]);
+ fprintf(yyTraceFILE,"\n");
+ }
+#endif
+}
+
+/* The following table contains information about every rule that
+** is used during the reduce.
+*/
+static struct {
+ YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
+ unsigned char nrhs; /* Number of right-hand side symbols in the rule */
+} yyRuleInfo[] = {
+%%
+};
+
+static void yy_accept(yyParser*); /* Forward Declaration */
+
+/*
+** Perform a reduce action and the shift that must immediately
+** follow the reduce.
+*/
+static void yy_reduce(
+ yyParser *yypParser, /* The parser */
+ int yyruleno /* Number of the rule by which to reduce */
+){
+ int yygoto; /* The next state */
+ int yyact; /* The next action */
+ YYMINORTYPE yygotominor; /* The LHS of the rule reduced */
+ yyStackEntry *yymsp; /* The top of the parser's stack */
+ int yysize; /* Amount to pop the stack */
+ ParseARG_FETCH;
+ yymsp = &yypParser->yystack[yypParser->yyidx];
+#ifndef NDEBUG
+ if( yyTraceFILE && yyruleno>=0
+ && yyruleno<sizeof(yyRuleName)/sizeof(yyRuleName[0]) ){
+ fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt,
+ yyRuleName[yyruleno]);
+ }
+#endif /* NDEBUG */
+
+ switch( yyruleno ){
+ /* Beginning here are the reduction cases. A typical example
+ ** follows:
+ ** case 0:
+ ** #line <lineno> <grammarfile>
+ ** { ... } // User supplied code
+ ** #line <lineno> <thisfile>
+ ** break;
+ */
+%%
+ };
+ yygoto = yyRuleInfo[yyruleno].lhs;
+ yysize = yyRuleInfo[yyruleno].nrhs;
+ yypParser->yyidx -= yysize;
+ yyact = yy_find_reduce_action(yypParser,yygoto);
+ if( yyact < YYNSTATE ){
+ yy_shift(yypParser,yyact,yygoto,&yygotominor);
+ }else if( yyact == YYNSTATE + YYNRULE + 1 ){
+ yy_accept(yypParser);
+ }
+}
+
+/*
+** The following code executes when the parse fails
+*/
+static void yy_parse_failed(
+ yyParser *yypParser /* The parser */
+){
+ ParseARG_FETCH;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser fails */
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
+}
+
+/*
+** The following code executes when a syntax error first occurs.
+*/
+static void yy_syntax_error(
+ yyParser *yypParser, /* The parser */
+ int yymajor, /* The major type of the error token */
+ YYMINORTYPE yyminor /* The minor type of the error token */
+){
+ ParseARG_FETCH;
+#define TOKEN (yyminor.yy0)
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
+}
+
+/*
+** The following is executed when the parser accepts
+*/
+static void yy_accept(
+ yyParser *yypParser /* The parser */
+){
+ ParseARG_FETCH;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser accepts */
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
+}
+
+/* The main parser program.
+** The first argument is a pointer to a structure obtained from
+** "ParseAlloc" which describes the current state of the parser.
+** The second argument is the major token number. The third is
+** the minor token. The fourth optional argument is whatever the
+** user wants (and specified in the grammar) and is available for
+** use by the action routines.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser (an opaque structure.)
+** <li> The major token number.
+** <li> The minor token number.
+** <li> An option argument of a grammar-specified type.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+void Parse(
+ void *yyp, /* The parser */
+ int yymajor, /* The major token code number */
+ ParseTOKENTYPE yyminor /* The value for the token */
+ ParseARG_PDECL /* Optional %extra_argument parameter */
+){
+ YYMINORTYPE yyminorunion;
+ int yyact; /* The parser action. */
+ int yyendofinput; /* True if we are at the end of input */
+ int yyerrorhit = 0; /* True if yymajor has invoked an error */
+ yyParser *yypParser; /* The parser */
+
+ /* (re)initialize the parser, if necessary */
+ yypParser = (yyParser*)yyp;
+ if( yypParser->yyidx<0 ){
+ if( yymajor==0 ) return;
+ yypParser->yyidx = 0;
+ yypParser->yyerrcnt = -1;
+ yypParser->yystack[0].stateno = 0;
+ yypParser->yystack[0].major = 0;
+ }
+ yyminorunion.yy0 = yyminor;
+ yyendofinput = (yymajor==0);
+ ParseARG_STORE;
+
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sInput %s\n",yyTracePrompt,yyTokenName[yymajor]);
+ }
+#endif
+
+ do{
+ yyact = yy_find_shift_action(yypParser,yymajor);
+ if( yyact<YYNSTATE ){
+ yy_shift(yypParser,yyact,yymajor,&yyminorunion);
+ yypParser->yyerrcnt--;
+ if( yyendofinput && yypParser->yyidx>=0 ){
+ yymajor = 0;
+ }else{
+ yymajor = YYNOCODE;
+ }
+ }else if( yyact < YYNSTATE + YYNRULE ){
+ yy_reduce(yypParser,yyact-YYNSTATE);
+ }else if( yyact == YY_ERROR_ACTION ){
+ int yymx;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
+ }
+#endif
+#ifdef YYERRORSYMBOL
+ /* A syntax error has occurred.
+ ** The response to an error depends upon whether or not the
+ ** grammar defines an error token "ERROR".
+ **
+ ** This is what we do if the grammar does define ERROR:
+ **
+ ** * Call the %syntax_error function.
+ **
+ ** * Begin popping the stack until we enter a state where
+ ** it is legal to shift the error symbol, then shift
+ ** the error symbol.
+ **
+ ** * Set the error count to three.
+ **
+ ** * Begin accepting and shifting new tokens. No new error
+ ** processing will occur until three tokens have been
+ ** shifted successfully.
+ **
+ */
+ if( yypParser->yyerrcnt<0 ){
+ yy_syntax_error(yypParser,yymajor,yyminorunion);
+ }
+ yymx = yypParser->yystack[yypParser->yyidx].major;
+ if( yymx==YYERRORSYMBOL || yyerrorhit ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sDiscard input token %s\n",
+ yyTracePrompt,yyTokenName[yymajor]);
+ }
+#endif
+ yy_destructor(yymajor,&yyminorunion);
+ yymajor = YYNOCODE;
+ }else{
+ while(
+ yypParser->yyidx >= 0 &&
+ yymx != YYERRORSYMBOL &&
+ (yyact = yy_find_shift_action(yypParser,YYERRORSYMBOL)) >= YYNSTATE
+ ){
+ yy_pop_parser_stack(yypParser);
+ }
+ if( yypParser->yyidx < 0 || yymajor==0 ){
+ yy_destructor(yymajor,&yyminorunion);
+ yy_parse_failed(yypParser);
+ yymajor = YYNOCODE;
+ }else if( yymx!=YYERRORSYMBOL ){
+ YYMINORTYPE u2;
+ u2.YYERRSYMDT = 0;
+ yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2);
+ }
+ }
+ yypParser->yyerrcnt = 3;
+ yyerrorhit = 1;
+#else /* YYERRORSYMBOL is not defined */
+ /* This is what we do if the grammar does not define ERROR:
+ **
+ ** * Report an error message, and throw away the input token.
+ **
+ ** * If the input token is $, then fail the parse.
+ **
+ ** As before, subsequent error messages are suppressed until
+ ** three input tokens have been successfully shifted.
+ */
+ if( yypParser->yyerrcnt<=0 ){
+ yy_syntax_error(yypParser,yymajor,yyminorunion);
+ }
+ yypParser->yyerrcnt = 3;
+ yy_destructor(yymajor,&yyminorunion);
+ if( yyendofinput ){
+ yy_parse_failed(yypParser);
+ }
+ yymajor = YYNOCODE;
+#endif
+ }else{
+ yy_accept(yypParser);
+ yymajor = YYNOCODE;
+ }
+ }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 );
+ return;
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/memleak.awk b/usr/src/cmd/svc/configd/sqlite/tool/memleak.awk
new file mode 100644
index 0000000000..fa2a22ef89
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/memleak.awk
@@ -0,0 +1,32 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#
+# This script looks for memory leaks by analyzing the output of "sqlite"
+# when compiled with the MEMORY_DEBUG=2 option.
+#
+/[0-9]+ malloc / {
+ mem[$6] = $0
+}
+/[0-9]+ realloc / {
+ mem[$8] = "";
+ mem[$10] = $0
+}
+/[0-9]+ free / {
+ if (mem[$6]=="") {
+ print "*** free without a malloc at",$6
+ }
+ mem[$6] = "";
+ str[$6] = ""
+}
+/^string at / {
+ addr = $4
+ sub("string at " addr " is ","")
+ str[addr] = $0
+}
+END {
+ for(addr in mem){
+ if( mem[addr]=="" ) continue
+ print mem[addr], str[addr]
+ }
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/memleak2.awk b/usr/src/cmd/svc/configd/sqlite/tool/memleak2.awk
new file mode 100644
index 0000000000..51b3a1fa7a
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/memleak2.awk
@@ -0,0 +1,32 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# This AWK script reads the output of testfixture when compiled for memory
+# debugging. It generates SQL commands that can be fed into an sqlite
+# instance to determine what memory is never freed. A typical usage would
+# be as follows:
+#
+# make -f memleak.mk fulltest 2>mem.out
+# awk -f ../sqlite/tool/memleak2.awk mem.out | ./sqlite :memory:
+#
+# The job performed by this script is the same as that done by memleak.awk.
+# The difference is that this script uses much less memory when the size
+# of the mem.out file is huge.
+#
+BEGIN {
+ print "CREATE TABLE mem(loc INTEGER PRIMARY KEY, src);"
+}
+/[0-9]+ malloc / {
+ print "INSERT INTO mem VALUES(" strtonum($6) ",'" $0 "');"
+}
+/[0-9]+ realloc / {
+ print "INSERT INTO mem VALUES(" strtonum($10) \
+ ",(SELECT src FROM mem WHERE loc=" strtonum($8) "));"
+ print "DELETE FROM mem WHERE loc=" strtonum($8) ";"
+}
+/[0-9]+ free / {
+ print "DELETE FROM mem WHERE loc=" strtonum($6) ";"
+}
+END {
+ print "SELECT src FROM mem;"
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/mkopts.tcl b/usr/src/cmd/svc/configd/sqlite/tool/mkopts.tcl
new file mode 100644
index 0000000000..fb906f5df7
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/mkopts.tcl
@@ -0,0 +1,54 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#!/usr/bin/tclsh
+#
+# This script is used to generate the array of strings and the enum
+# that appear at the beginning of the C code implementation of a
+# a TCL command and that define the available subcommands for that
+# TCL command.
+
+set prefix {}
+while {![eof stdin]} {
+ set line [gets stdin]
+ if {$line==""} continue
+ regsub -all "\[ \t\n,\]+" [string trim $line] { } line
+ foreach token [split $line { }] {
+ if {![regexp {(([a-zA-Z]+)_)?([_a-zA-Z]+)} $token all px p2 name]} continue
+ lappend namelist [string tolower $name]
+ if {$px!=""} {set prefix $p2}
+ }
+}
+
+puts " static const char *${prefix}_strs\[\] = \173"
+set col 0
+proc put_item x {
+ global col
+ if {$col==0} {puts -nonewline " "}
+ if {$col<2} {
+ puts -nonewline [format " %-21s" $x]
+ incr col
+ } else {
+ puts $x
+ set col 0
+ }
+}
+proc finalize {} {
+ global col
+ if {$col>0} {puts {}}
+ set col 0
+}
+
+foreach name [lsort $namelist] {
+ put_item \"$name\",
+}
+put_item 0
+finalize
+puts " \175;"
+puts " enum ${prefix}_enum \173"
+foreach name [lsort $namelist] {
+ regsub -all {@} $name {} name
+ put_item ${prefix}_[string toupper $name],
+}
+finalize
+puts " \175;"
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/opcodeDoc.awk b/usr/src/cmd/svc/configd/sqlite/tool/opcodeDoc.awk
new file mode 100644
index 0000000000..19f824bfac
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/opcodeDoc.awk
@@ -0,0 +1,26 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#
+# Extract opcode documentation for sqliteVdbe.c and generate HTML
+#
+BEGIN {
+ print "<html><body bgcolor=white>"
+ print "<h1>SQLite Virtual Database Engine Opcodes</h1>"
+ print "<table>"
+}
+/ Opcode: /,/\*\// {
+ if( $2=="Opcode:" ){
+ printf "<tr><td>%s&nbsp;%s&nbsp;%s&nbsp;%s</td>\n<td>\n", $3, $4, $5, $6
+ }else if( $1=="*/" ){
+ printf "</td></tr>\n"
+ }else if( NF>1 ){
+ sub(/^ *\*\* /,"")
+ gsub(/</,"&lt;")
+ gsub(/&/,"&amp;")
+ print
+ }
+}
+END {
+ print "</table></body></html>"
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/report1.txt b/usr/src/cmd/svc/configd/sqlite/tool/report1.txt
new file mode 100644
index 0000000000..d1eb9364a0
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/report1.txt
@@ -0,0 +1,69 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+The SQL database used for ACD contains 113 tables and indices implemented
+in GDBM. The following are statistics on the sizes of keys and data
+within these tables and indices.
+
+Entries: 962080
+Size: 45573853
+Avg Size: 48
+Key Size: 11045299
+Avg Key Size: 12
+Max Key Size: 99
+
+
+ Size of key Cummulative
+ and data Instances Percentage
+------------ ---------- -----------
+ 0..8 266 0%
+ 9..12 5485 0%
+ 13..16 73633 8%
+ 17..24 180918 27%
+ 25..32 209823 48%
+ 33..40 148995 64%
+ 41..48 76304 72%
+ 49..56 14346 73%
+ 57..64 15725 75%
+ 65..80 44916 80%
+ 81..96 127815 93%
+ 97..112 34769 96%
+ 113..128 13314 98%
+ 129..144 8098 99%
+ 145..160 3355 99%
+ 161..176 1159 99%
+ 177..192 629 99%
+ 193..208 221 99%
+ 209..224 210 99%
+ 225..240 129 99%
+ 241..256 57 99%
+ 257..288 496 99%
+ 289..320 60 99%
+ 321..352 37 99%
+ 353..384 46 99%
+ 385..416 22 99%
+ 417..448 24 99%
+ 449..480 26 99%
+ 481..512 27 99%
+ 513..1024 471 99%
+ 1025..2048 389 99%
+ 2049..4096 182 99%
+ 4097..8192 74 99%
+ 8193..16384 34 99%
+16385..32768 17 99%
+32769..65536 5 99%
+65537..131073 3 100%
+
+
+This information is gathered to help design the new built-in
+backend for sqlite 2.0. Note in particular that 99% of all
+database entries have a combined key and data size of less than
+144 bytes. So if a leaf node in the new database is able to
+store 144 bytes of combined key and data, only 1% of the leaves
+will require overflow pages. Furthermore, note that no key
+is larger than 99 bytes, so if the key will never be on an
+overflow page.
+
+The average combined size of key+data is 48. Add in 16 bytes of
+overhead for a total of 64. That means that a 1K page will
+store (on average) about 16 entries.
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/showdb.c b/usr/src/cmd/svc/configd/sqlite/tool/showdb.c
new file mode 100644
index 0000000000..6df9a73df8
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/showdb.c
@@ -0,0 +1,88 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** A utility for printing all or part of an SQLite database file.
+*/
+#include <stdio.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+static int pagesize = 1024;
+static int db = -1;
+static int mxPage = 0;
+
+static void out_of_memory(void){
+ fprintf(stderr,"Out of memory...\n");
+ exit(1);
+}
+
+static print_page(int iPg){
+ unsigned char *aData;
+ int i, j;
+ aData = malloc(pagesize);
+ if( aData==0 ) out_of_memory();
+ lseek(db, (iPg-1)*pagesize, SEEK_SET);
+ read(db, aData, pagesize);
+ fprintf(stdout, "Page %d:\n", iPg);
+ for(i=0; i<pagesize; i += 16){
+ fprintf(stdout, " %03x: ",i);
+ for(j=0; j<16; j++){
+ fprintf(stdout,"%02x ", aData[i+j]);
+ }
+ for(j=0; j<16; j++){
+ fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.');
+ }
+ fprintf(stdout,"\n");
+ }
+ free(aData);
+}
+
+int main(int argc, char **argv){
+ struct stat sbuf;
+ if( argc<2 ){
+ fprintf(stderr,"Usage: %s FILENAME ?PAGE? ...\n", argv[0]);
+ exit(1);
+ }
+ db = open(argv[1], O_RDONLY);
+ if( db<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
+ exit(1);
+ }
+ fstat(db, &sbuf);
+ mxPage = sbuf.st_size/pagesize + 1;
+ if( argc==2 ){
+ int i;
+ for(i=1; i<=mxPage; i++) print_page(i);
+ }else{
+ int i;
+ for(i=2; i<argc; i++){
+ int iStart, iEnd;
+ char *zLeft;
+ iStart = strtol(argv[i], &zLeft, 0);
+ if( zLeft && strcmp(zLeft,"..end")==0 ){
+ iEnd = mxPage;
+ }else if( zLeft && zLeft[0]=='.' && zLeft[1]=='.' ){
+ iEnd = strtol(&zLeft[2], 0, 0);
+ }else{
+ iEnd = iStart;
+ }
+ if( iStart<1 || iEnd<iStart || iEnd>mxPage ){
+ fprintf(stderr,
+ "Page argument should be LOWER?..UPPER?. Range 1 to %d\n",
+ mxPage);
+ exit(1);
+ }
+ while( iStart<=iEnd ){
+ print_page(iStart);
+ iStart++;
+ }
+ }
+ }
+ close(db);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/showjournal.c b/usr/src/cmd/svc/configd/sqlite/tool/showjournal.c
new file mode 100644
index 0000000000..61a9327ae6
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/showjournal.c
@@ -0,0 +1,79 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+** A utility for printing an SQLite database journal.
+*/
+#include <stdio.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+static int pagesize = 1024;
+static int db = -1;
+static int mxPage = 0;
+
+static void out_of_memory(void){
+ fprintf(stderr,"Out of memory...\n");
+ exit(1);
+}
+
+static print_page(int iPg){
+ unsigned char *aData;
+ int i, j;
+ aData = malloc(pagesize);
+ if( aData==0 ) out_of_memory();
+ read(db, aData, pagesize);
+ fprintf(stdout, "Page %d:\n", iPg);
+ for(i=0; i<pagesize; i += 16){
+ fprintf(stdout, " %03x: ",i);
+ for(j=0; j<16; j++){
+ fprintf(stdout,"%02x ", aData[i+j]);
+ }
+ for(j=0; j<16; j++){
+ fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.');
+ }
+ fprintf(stdout,"\n");
+ }
+ free(aData);
+}
+
+int main(int argc, char **argv){
+ struct stat sbuf;
+ unsigned int u;
+ int rc;
+ unsigned char zBuf[10];
+ unsigned char zBuf2[sizeof(u)];
+ if( argc!=2 ){
+ fprintf(stderr,"Usage: %s FILENAME\n", argv[0]);
+ exit(1);
+ }
+ db = open(argv[1], O_RDONLY);
+ if( db<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
+ exit(1);
+ }
+ read(db, zBuf, 8);
+ if( zBuf[7]==0xd6 ){
+ read(db, &u, sizeof(u));
+ printf("Records in Journal: %u\n", u);
+ read(db, &u, sizeof(u));
+ printf("Magic Number: 0x%08x\n", u);
+ }
+ read(db, zBuf2, sizeof(zBuf2));
+ u = zBuf2[0]<<24 | zBuf2[1]<<16 | zBuf2[2]<<8 | zBuf2[3];
+ printf("Database Size: %u\n", u);
+ while( read(db, zBuf2, sizeof(zBuf2))==sizeof(zBuf2) ){
+ u = zBuf2[0]<<24 | zBuf2[1]<<16 | zBuf2[2]<<8 | zBuf2[3];
+ print_page(u);
+ if( zBuf[7]==0xd6 ){
+ read(db, &u, sizeof(u));
+ printf("Checksum: 0x%08x\n", u);
+ }
+ }
+ close(db);
+}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/space_used.tcl b/usr/src/cmd/svc/configd/sqlite/tool/space_used.tcl
new file mode 100644
index 0000000000..bbcc30b43c
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/space_used.tcl
@@ -0,0 +1,114 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# Run this TCL script using "testfixture" in order get a report that shows
+# how much disk space is used by a particular data to actually store data
+# versus how much space is unused.
+#
+
+# Get the name of the database to analyze
+#
+if {[llength $argv]!=1} {
+ puts stderr "Usage: $argv0 database-name"
+ exit 1
+}
+set file_to_analyze [lindex $argv 0]
+
+# Open the database
+#
+sqlite db [lindex $argv 0]
+set DB [btree_open [lindex $argv 0]]
+
+# Output the schema for the generated report
+#
+puts \
+{BEGIN;
+CREATE TABLE space_used(
+ name clob, -- Name of a table or index in the database file
+ is_index boolean, -- TRUE if it is an index, false for a table
+ payload int, -- Total amount of data stored in this table or index
+ pri_pages int, -- Number of primary pages used
+ ovfl_pages int, -- Number of overflow pages used
+ pri_unused int, -- Number of unused bytes on primary pages
+ ovfl_unused int -- Number of unused bytes on overflow pages
+);}
+
+# This query will be used to find the root page number for every index and
+# table in the database.
+#
+set sql {
+ SELECT name, type, rootpage FROM sqlite_master
+ UNION ALL
+ SELECT 'sqlite_master', 'table', 2
+ ORDER BY 1
+}
+
+# Initialize variables used for summary statistics.
+#
+set total_size 0
+set total_primary 0
+set total_overflow 0
+set total_unused_primary 0
+set total_unused_ovfl 0
+
+# Analyze every table in the database, one at a time.
+#
+foreach {name type rootpage} [db eval $sql] {
+ set cursor [btree_cursor $DB $rootpage 0]
+ set go [btree_first $cursor]
+ set size 0
+ catch {unset pg_used}
+ set unused_ovfl 0
+ set n_overflow 0
+ while {$go==0} {
+ set payload [btree_payload_size $cursor]
+ incr size $payload
+ set stat [btree_cursor_dump $cursor]
+ set pgno [lindex $stat 0]
+ set freebytes [lindex $stat 4]
+ set pg_used($pgno) $freebytes
+ if {$payload>238} {
+ set n [expr {($payload-238+1019)/1020}]
+ incr n_overflow $n
+ incr unused_ovfl [expr {$n*1020+238-$payload}]
+ }
+ set go [btree_next $cursor]
+ }
+ btree_close_cursor $cursor
+ set n_primary [llength [array names pg_used]]
+ set unused_primary 0
+ foreach x [array names pg_used] {incr unused_primary $pg_used($x)}
+ regsub -all ' $name '' name
+ puts -nonewline "INSERT INTO space_used VALUES('$name'"
+ puts -nonewline ",[expr {$type=="index"}]"
+ puts ",$size,$n_primary,$n_overflow,$unused_primary,$unused_ovfl);"
+ incr total_size $size
+ incr total_primary $n_primary
+ incr total_overflow $n_overflow
+ incr total_unused_primary $unused_primary
+ incr total_unused_ovfl $unused_ovfl
+}
+
+# Output summary statistics:
+#
+puts "-- Total payload size: $total_size"
+puts "-- Total pages used: $total_primary primary and $total_overflow overflow"
+set file_pgcnt [expr {[file size [lindex $argv 0]]/1024}]
+puts -nonewline "-- Total unused bytes on primary pages: $total_unused_primary"
+if {$total_primary>0} {
+ set upp [expr {$total_unused_primary/$total_primary}]
+ puts " (avg $upp bytes/page)"
+} else {
+ puts ""
+}
+puts -nonewline "-- Total unused bytes on overflow pages: $total_unused_ovfl"
+if {$total_overflow>0} {
+ set upp [expr {$total_unused_ovfl/$total_overflow}]
+ puts " (avg $upp bytes/page)"
+} else {
+ puts ""
+}
+set n_free [expr {$file_pgcnt-$total_primary-$total_overflow}]
+if {$n_free>0} {incr n_free -1}
+puts "-- Total pages on freelist: $n_free"
+puts "COMMIT;"
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/spaceanal.tcl b/usr/src/cmd/svc/configd/sqlite/tool/spaceanal.tcl
new file mode 100644
index 0000000000..093a99a648
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/spaceanal.tcl
@@ -0,0 +1,439 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# Run this TCL script using "testfixture" in order get a report that shows
+# how much disk space is used by a particular data to actually store data
+# versus how much space is unused.
+#
+
+# Get the name of the database to analyze
+#
+if {[llength $argv]!=1} {
+ puts stderr "Usage: $argv0 database-name"
+ exit 1
+}
+set file_to_analyze [lindex $argv 0]
+if {![file exists $file_to_analyze]} {
+ puts stderr "No such file: $file_to_analyze"
+ exit 1
+}
+if {![file readable $file_to_analyze]} {
+ puts stderr "File is not readable: $file_to_analyze"
+ exit 1
+}
+if {[file size $file_to_analyze]<2048} {
+ puts stderr "Empty or malformed database: $file_to_analyze"
+ exit 1
+}
+
+# Open the database
+#
+sqlite db [lindex $argv 0]
+set DB [btree_open [lindex $argv 0]]
+
+# In-memory database for collecting statistics
+#
+sqlite mem :memory:
+set tabledef\
+{CREATE TABLE space_used(
+ name clob, -- Name of a table or index in the database file
+ tblname clob, -- Name of associated table
+ is_index boolean, -- TRUE if it is an index, false for a table
+ nentry int, -- Number of entries in the BTree
+ payload int, -- Total amount of data stored in this table or index
+ mx_payload int, -- Maximum payload size
+ n_ovfl int, -- Number of entries that overflow
+ pri_pages int, -- Number of primary pages used
+ ovfl_pages int, -- Number of overflow pages used
+ pri_unused int, -- Number of unused bytes on primary pages
+ ovfl_unused int -- Number of unused bytes on overflow pages
+);}
+mem eval $tabledef
+
+# This query will be used to find the root page number for every index and
+# table in the database.
+#
+set sql {
+ SELECT name, tbl_name, type, rootpage
+ FROM sqlite_master WHERE type IN ('table','index')
+ UNION ALL
+ SELECT 'sqlite_master', 'sqlite_master', 'table', 2
+ ORDER BY 1
+}
+
+# Analyze every table in the database, one at a time.
+#
+foreach {name tblname type rootpage} [db eval $sql] {
+ puts stderr "Analyzing $name..."
+ set cursor [btree_cursor $DB $rootpage 0]
+ set go [btree_first $cursor]
+ set size 0
+ catch {unset pg_used}
+ set unused_ovfl 0
+ set n_overflow 0
+ set cnt_ovfl 0
+ set n_entry 0
+ set mx_size 0
+ set pg_used($rootpage) 1016
+ while {$go==0} {
+ incr n_entry
+ set payload [btree_payload_size $cursor]
+ incr size $payload
+ set stat [btree_cursor_dump $cursor]
+ set pgno [lindex $stat 0]
+ set freebytes [lindex $stat 4]
+ set pg_used($pgno) $freebytes
+ if {$payload>236} {
+ # if {[lindex $stat 8]==0} {error "overflow is empty with $payload"}
+ set n [expr {($payload-236+1019)/1020}]
+ incr n_overflow $n
+ incr cnt_ovfl
+ incr unused_ovfl [expr {$n*1020+236-$payload}]
+ } else {
+ # if {[lindex $stat 8]!=0} {error "overflow not empty with $payload"}
+ }
+ if {$payload>$mx_size} {set mx_size $payload}
+ set go [btree_next $cursor]
+ }
+ btree_close_cursor $cursor
+ set n_primary [llength [array names pg_used]]
+ set unused_primary 0
+ foreach x [array names pg_used] {incr unused_primary $pg_used($x)}
+ regsub -all ' $name '' name
+ set sql "INSERT INTO space_used VALUES('$name'"
+ regsub -all ' $tblname '' tblname
+ append sql ",'$tblname',[expr {$type=="index"}],$n_entry"
+ append sql ",$size,$mx_size,$cnt_ovfl,"
+ append sql "$n_primary,$n_overflow,$unused_primary,$unused_ovfl);"
+ mem eval $sql
+}
+
+# Generate a single line of output in the statistics section of the
+# report.
+#
+proc statline {title value {extra {}}} {
+ set len [string length $title]
+ set dots [string range {......................................} $len end]
+ set len [string length $value]
+ set sp2 [string range { } $len end]
+ if {$extra ne ""} {
+ set extra " $extra"
+ }
+ puts "$title$dots $value$sp2$extra"
+}
+
+# Generate a formatted percentage value for $num/$denom
+#
+proc percent {num denom} {
+ if {$denom==0.0} {return ""}
+ set v [expr {$num*100.0/$denom}]
+ if {$v>1.0 && $v<99.0} {
+ return [format %4.1f%% $v]
+ } elseif {$v<0.1 || $v>99.9} {
+ return [format %6.3f%% $v]
+ } else {
+ return [format %5.2f%% $v]
+ }
+}
+
+# Generate a subreport that covers some subset of the database.
+# the $where clause determines which subset to analyze.
+#
+proc subreport {title where} {
+ set hit 0
+ mem eval "SELECT sum(nentry) AS nentry, \
+ sum(payload) AS payload, \
+ sum(CASE is_index WHEN 1 THEN 0 ELSE payload-4*nentry END) \
+ AS data, \
+ max(mx_payload) AS mx_payload, \
+ sum(n_ovfl) as n_ovfl, \
+ sum(pri_pages) AS pri_pages, \
+ sum(ovfl_pages) AS ovfl_pages, \
+ sum(pri_unused) AS pri_unused, \
+ sum(ovfl_unused) AS ovfl_unused \
+ FROM space_used WHERE $where" {} {set hit 1}
+ if {!$hit} {return 0}
+ puts ""
+ set len [string length $title]
+ incr len 5
+ set stars "***********************************"
+ append stars $stars
+ set stars [string range $stars $len end]
+ puts "*** $title $stars"
+ puts ""
+ statline "Percentage of total database" \
+ [percent [expr {$pri_pages+$ovfl_pages}] $::file_pgcnt]
+ statline "Number of entries" $nentry
+ set storage [expr {($pri_pages+$ovfl_pages)*1024}]
+ statline "Bytes of storage consumed" $storage
+ statline "Bytes of payload" $payload [percent $payload $storage]
+ statline "Bytes of data" $data [percent $data $storage]
+ set key [expr {$payload-$data}]
+ statline "Bytes of key" $key [percent $key $storage]
+ set avgpay [expr {$nentry>0?$payload/$nentry:0}]
+ statline "Average payload per entry" $avgpay
+ set avgunused [expr {$nentry>0?($pri_unused+$ovfl_unused)/$nentry:0}]
+ statline "Average unused bytes per entry" $avgunused
+ statline "Average fanout" \
+ [format %.2f [expr {$pri_pages==0?0:($nentry+0.0)/$pri_pages}]]
+ statline "Maximum payload per entry" $mx_payload
+ statline "Entries that use overflow" $n_ovfl [percent $n_ovfl $nentry]
+ statline "Total pages used" [set allpgs [expr {$pri_pages+$ovfl_pages}]]
+ statline "Primary pages used" $pri_pages ;# [percent $pri_pages $allpgs]
+ statline "Overflow pages used" $ovfl_pages ;# [percent $ovfl_pages $allpgs]
+ statline "Unused bytes on primary pages" $pri_unused \
+ [percent $pri_unused [expr {$pri_pages*1024}]]
+ statline "Unused bytes on overflow pages" $ovfl_unused \
+ [percent $ovfl_unused [expr {$ovfl_pages*1024}]]
+ set allunused [expr {$ovfl_unused+$pri_unused}]
+ statline "Unused bytes on all pages" $allunused \
+ [percent $allunused [expr {$allpgs*1024}]]
+ return 1
+}
+
+# Output summary statistics:
+#
+puts "/** Disk-Space Utilization Report For $file_to_analyze"
+puts "*** As of [clock format [clock seconds] -format {%Y-%b-%d %H:%M:%S}]"
+puts ""
+set fsize [file size [lindex $argv 0]]
+set file_pgcnt [expr {$fsize/1024}]
+set usedcnt [mem eval {SELECT sum(pri_pages+ovfl_pages) FROM space_used}]
+set freecnt [expr {$file_pgcnt-$usedcnt-1}]
+set freecnt2 [lindex [btree_get_meta $DB] 0]
+statline {Pages in the whole file (measured)} $file_pgcnt
+set file_pgcnt2 [expr {$usedcnt+$freecnt2+1}]
+statline {Pages in the whole file (calculated)} $file_pgcnt2
+statline {Pages that store data} $usedcnt [percent $usedcnt $file_pgcnt]
+statline {Pages on the freelist (per header)}\
+ $freecnt2 [percent $freecnt2 $file_pgcnt]
+statline {Pages on the freelist (calculated)}\
+ $freecnt [percent $freecnt $file_pgcnt]
+statline {Header pages} 1 [percent 1 $file_pgcnt]
+
+set ntable [db eval {SELECT count(*)+1 FROM sqlite_master WHERE type='table'}]
+statline {Number of tables in the database} $ntable
+set nindex [db eval {SELECT count(*) FROM sqlite_master WHERE type='index'}]
+set autoindex [db eval {SELECT count(*) FROM sqlite_master
+ WHERE type='index' AND name LIKE '(% autoindex %)'}]
+set manindex [expr {$nindex-$autoindex}]
+statline {Number of indices} $nindex
+statline {Number of named indices} $manindex [percent $manindex $nindex]
+statline {Automatically generated indices} $autoindex \
+ [percent $autoindex $nindex]
+
+set bytes_data [mem eval "SELECT sum(payload-4*nentry) FROM space_used
+ WHERE NOT is_index AND name!='sqlite_master'"]
+set total_payload [mem eval "SELECT sum(payload) FROM space_used"]
+statline "Size of the file in bytes" $fsize
+statline "Bytes of payload stored" $total_payload \
+ [percent $total_payload $fsize]
+statline "Bytes of user data stored" $bytes_data \
+ [percent $bytes_data $fsize]
+
+# Output table rankings
+#
+puts ""
+puts "*** Page counts for all tables with their indices ********************"
+puts ""
+mem eval {SELECT tblname, count(*) AS cnt, sum(pri_pages+ovfl_pages) AS size
+ FROM space_used GROUP BY tblname ORDER BY size DESC, tblname} {} {
+ statline [string toupper $tblname] $size [percent $size $file_pgcnt]
+}
+
+# Output subreports
+#
+if {$nindex>0} {
+ subreport {All tables and indices} 1
+}
+subreport {All tables} {NOT is_index}
+if {$nindex>0} {
+ subreport {All indices} {is_index}
+}
+foreach tbl [mem eval {SELECT name FROM space_used WHERE NOT is_index
+ ORDER BY name}] {
+ regsub ' $tbl '' qn
+ set name [string toupper $tbl]
+ set n [mem eval "SELECT count(*) FROM space_used WHERE tblname='$qn'"]
+ if {$n>1} {
+ subreport "Table $name and all its indices" "tblname='$qn'"
+ subreport "Table $name w/o any indices" "name='$qn'"
+ subreport "Indices of table $name" "tblname='$qn' AND is_index"
+ } else {
+ subreport "Table $name" "name='$qn'"
+ }
+}
+
+# Output instructions on what the numbers above mean.
+#
+puts {
+*** Definitions ******************************************************
+
+Number of pages in the whole file
+
+ The number of 1024-byte pages that go into forming the complete database
+
+Pages that store data
+
+ The number of pages that store data, either as primary B*Tree pages or
+ as overflow pages. The number at the right is the data pages divided by
+ the total number of pages in the file.
+
+Pages on the freelist
+
+ The number of pages that are not currently in use but are reserved for
+ future use. The percentage at the right is the number of freelist pages
+ divided by the total number of pages in the file.
+
+Header pages
+
+ The number of pages of header overhead in the database. This value is
+ always 1. The percentage at the right is the number of header pages
+ divided by the total number of pages in the file.
+
+Number of tables in the database
+
+ The number of tables in the database, including the SQLITE_MASTER table
+ used to store schema information.
+
+Number of indices
+
+ The total number of indices in the database.
+
+Number of named indices
+
+ The number of indices created using an explicit CREATE INDEX statement.
+
+Automatically generated indices
+
+ The number of indices used to implement PRIMARY KEY or UNIQUE constraints
+ on tables.
+
+Size of the file in bytes
+
+ The total amount of disk space used by the entire database files.
+
+Bytes of payload stored
+
+ The total number of bytes of payload stored in the database. Payload
+ includes both key and data. The content of the SQLITE_MASTER table is
+ counted when computing this number. The percentage at the right shows
+ the payload divided by the total file size.
+
+Bytes of user data stored
+
+ The total number of bytes of data stored in the database, not counting
+ the database schema information stored in the SQLITE_MASTER table. The
+ percentage at the right is the user data size divided by the total file
+ size.
+
+Percentage of total database
+
+ The amount of the complete database file that is devoted to storing
+ information described by this category.
+
+Number of entries
+
+ The total number of B*Tree key/value pairs stored under this category.
+
+Bytes of storage consumed
+
+ The total amount of disk space required to store all B*Tree entries
+ under this category. The is the total number of pages used times
+ the pages size (1024).
+
+Bytes of payload
+
+ The amount of payload stored under this category. Payload is the sum
+ of keys and data. Each table entry has 4 bytes of key and an arbitrary
+ amount of data. Each index entry has 4 or more bytes of key and no
+ data. The percentage at the right is the bytes of payload divided by
+ the bytes of storage consumed.
+
+Bytes of data
+
+ The amount of data stored under this category. The data space reported
+ includes formatting information such as nul-terminators and field-lengths
+ that are stored with the data. The percentage at the right is the bytes
+ of data divided by bytes of storage consumed.
+
+Bytes of key
+
+ The sum of the sizes of all keys under this category. The percentage at
+ the right is the bytes of key divided by the bytes of storage consumed.
+
+Average payload per entry
+
+ The average amount of payload on each entry. This is just the bytes of
+ payload divided by the number of entries.
+
+Average unused bytes per entry
+
+ The average amount of free space remaining on all pages under this
+ category on a per-entry basis. This is the number of unused bytes on
+ all pages divided by the number of entries.
+
+Maximum payload per entry
+
+ The largest payload size of any entry.
+
+Entries that use overflow
+
+ Up to 236 bytes of payload for each entry are stored directly in the
+ primary B*Tree page. Any additional payload is stored on a linked list
+ of overflow pages. This is the number of entries that exceed 236 bytes
+ in size. The value to the right is the number of entries that overflow
+ divided by the total number of entries.
+
+Total pages used
+
+ This is the number of 1024 byte pages used to hold all information in
+ the current category. This is the sum of primary and overflow pages.
+
+Primary pages used
+
+ This is the number of primary B*Tree pages used.
+
+Overflow pages used
+
+ The total number of overflow pages used for this category.
+
+Unused bytes on primary pages
+
+ The total number of bytes of unused space on all primary pages. The
+ percentage at the right is the number of unused bytes divided by the
+ total number of bytes on primary pages.
+
+Unused bytes on overflow pages
+
+ The total number of bytes of unused space on all overflow pages. The
+ percentage at the right is the number of unused bytes divided by the
+ total number of bytes on overflow pages.
+
+Unused bytes on all pages
+
+ The total number of bytes of unused space on all primary and overflow
+ pages. The percentage at the right is the number of unused bytes
+ divided by the total number of bytes.
+}
+
+# Output the database
+#
+puts "**********************************************************************"
+puts "The entire text of this report can be sourced into any SQL database"
+puts "engine for further analysis. All of the text above is an SQL comment."
+puts "The data used to generate this report follows:"
+puts "*/"
+puts "BEGIN;"
+puts $tabledef
+unset -nocomplain x
+mem eval {SELECT * FROM space_used} x {
+ puts -nonewline "INSERT INTO space_used VALUES("
+ regsub ' $x(name) '' qn
+ regsub ' $x(tblname) '' qtn
+ puts -nonewline "'$qn','$qtn',"
+ puts -nonewline "$x(is_index),$x(nentry),$x(payload),$x(mx_payload),"
+ puts -nonewline "$x(n_ovfl),$x(pri_pages),$x(ovfl_pages),$x(pri_unused),"
+ puts "$x(ovfl_unused));"
+}
+puts "COMMIT;"
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/speedtest.tcl b/usr/src/cmd/svc/configd/sqlite/tool/speedtest.tcl
new file mode 100644
index 0000000000..e0ce4e91dd
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/speedtest.tcl
@@ -0,0 +1,278 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#!/usr/bin/tclsh
+#
+# Run this script using TCLSH to do a speed comparison between
+# various versions of SQLite and PostgreSQL and MySQL
+#
+
+# Run a test
+#
+set cnt 1
+proc runtest {title} {
+ global cnt
+ set sqlfile test$cnt.sql
+ puts "<h2>Test $cnt: $title</h2>"
+ incr cnt
+ set fd [open $sqlfile r]
+ set sql [string trim [read $fd [file size $sqlfile]]]
+ close $fd
+ set sx [split $sql \n]
+ set n [llength $sx]
+ if {$n>8} {
+ set sql {}
+ for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
+ append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
+ for {set i [expr {$n-3}]} {$i<$n} {incr i} {
+ append sql [lindex $sx $i]<br>\n
+ }
+ } else {
+ regsub -all \n [string trim $sql] <br> sql
+ }
+ puts "<blockquote>"
+ puts "$sql"
+ puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
+ set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
+ set delay 1000
+# exec sync; after $delay;
+# set t [time "exec psql drh <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format PostgreSQL: $t]
+ exec sync; after $delay;
+ set t [time "exec mysql -f drh <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format MySQL: $t]
+# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.3.2:} $t]
+# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (cache=100):} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite248 s2k.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.8:} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite248 sns.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.8 (nosync):} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite2412 s2kb.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.12:} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite2412 snsb.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.12 (nosync):} $t]
+# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (test):} $t]
+ puts "</table>"
+}
+
+# Initialize the environment
+#
+expr srand(1)
+catch {exec /bin/sh -c {rm -f s*.db}}
+set fd [open clear.sql w]
+puts $fd {
+ drop table t1;
+ drop table t2;
+}
+close $fd
+catch {exec psql drh <clear.sql}
+catch {exec mysql drh <clear.sql}
+set fd [open 2kinit.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=on;
+}
+close $fd
+exec ./sqlite248 s2k.db <2kinit.sql
+exec ./sqlite2412 s2kb.db <2kinit.sql
+set fd [open nosync-init.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=off;
+}
+close $fd
+exec ./sqlite248 sns.db <nosync-init.sql
+exec ./sqlite2412 snsb.db <nosync-init.sql
+set ones {zero one two three four five six seven eight nine
+ ten eleven twelve thirteen fourteen fifteen sixteen seventeen
+ eighteen nineteen}
+set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
+proc number_name {n} {
+ if {$n>=1000} {
+ set txt "[number_name [expr {$n/1000}]] thousand"
+ set n [expr {$n%1000}]
+ } else {
+ set txt {}
+ }
+ if {$n>=100} {
+ append txt " [lindex $::ones [expr {$n/100}]] hundred"
+ set n [expr {$n%100}]
+ }
+ if {$n>=20} {
+ append txt " [lindex $::tens [expr {$n/10}]]"
+ set n [expr {$n%10}]
+ }
+ if {$n>0} {
+ append txt " [lindex $::ones $n]"
+ }
+ set txt [string trim $txt]
+ if {$txt==""} {set txt zero}
+ return $txt
+}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
+for {set i 1} {$i<=1000} {incr i} {
+ set r [expr {int(rand()*100000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+close $fd
+runtest {1000 INSERTs}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+puts $fd "CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+
+set fd [open test$cnt.sql w]
+for {set i 0} {$i<100} {incr i} {
+ set lwr [expr {$i*100}]
+ set upr [expr {($i+10)*100}]
+ puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
+}
+close $fd
+runtest {100 SELECTs without an index}
+
+
+
+set fd [open test$cnt.sql w]
+for {set i 1} {$i<=100} {incr i} {
+ puts $fd "SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%[number_name $i]%';"
+}
+close $fd
+runtest {100 SELECTs on a string comparison}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {CREATE INDEX i2a ON t2(a);}
+puts $fd {CREATE INDEX i2b ON t2(b);}
+close $fd
+runtest {Creating an index}
+
+
+
+set fd [open test$cnt.sql w]
+for {set i 0} {$i<5000} {incr i} {
+ set lwr [expr {$i*100}]
+ set upr [expr {($i+1)*100}]
+ puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
+}
+close $fd
+runtest {5000 SELECTs with an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 0} {$i<1000} {incr i} {
+ set lwr [expr {$i*10}]
+ set upr [expr {($i+1)*10}]
+ puts $fd "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {1000 UPDATEs without an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "UPDATE t2 SET b=$r WHERE a=$i;"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 UPDATEs with an index}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "UPDATE t2 SET c='[number_name $r]' WHERE a=$i;"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 text UPDATEs with an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+puts $fd "INSERT INTO t1 SELECT * FROM t2;"
+puts $fd "INSERT INTO t2 SELECT * FROM t1;"
+puts $fd "COMMIT;"
+close $fd
+runtest {INSERTs from a SELECT}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DELETE FROM t2 WHERE c LIKE '%fifty%';}
+close $fd
+runtest {DELETE without an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DELETE FROM t2 WHERE a>10 AND a<20000;}
+close $fd
+runtest {DELETE with an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {INSERT INTO t2 SELECT * FROM t1;}
+close $fd
+runtest {A big INSERT after a big DELETE}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {BEGIN;}
+puts $fd {DELETE FROM t1;}
+for {set i 1} {$i<=3000} {incr i} {
+ set r [expr {int(rand()*100000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd {COMMIT;}
+close $fd
+runtest {A big DELETE followed by many small INSERTs}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DROP TABLE t1;}
+puts $fd {DROP TABLE t2;}
+close $fd
+runtest {DROP TABLE}
diff --git a/usr/src/cmd/svc/configd/sqlite/tool/speedtest2.tcl b/usr/src/cmd/svc/configd/sqlite/tool/speedtest2.tcl
new file mode 100644
index 0000000000..fed7efb7cc
--- /dev/null
+++ b/usr/src/cmd/svc/configd/sqlite/tool/speedtest2.tcl
@@ -0,0 +1,210 @@
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#!/usr/bin/tclsh
+#
+# Run this script using TCLSH to do a speed comparison between
+# various versions of SQLite and PostgreSQL and MySQL
+#
+
+# Run a test
+#
+set cnt 1
+proc runtest {title} {
+ global cnt
+ set sqlfile test$cnt.sql
+ puts "<h2>Test $cnt: $title</h2>"
+ incr cnt
+ set fd [open $sqlfile r]
+ set sql [string trim [read $fd [file size $sqlfile]]]
+ close $fd
+ set sx [split $sql \n]
+ set n [llength $sx]
+ if {$n>8} {
+ set sql {}
+ for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
+ append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
+ for {set i [expr {$n-3}]} {$i<$n} {incr i} {
+ append sql [lindex $sx $i]<br>\n
+ }
+ } else {
+ regsub -all \n [string trim $sql] <br> sql
+ }
+ puts "<blockquote>"
+ puts "$sql"
+ puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
+ set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
+ set delay 1000
+ exec sync; after $delay;
+ set t [time "exec psql drh <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format PostgreSQL: $t]
+ exec sync; after $delay;
+ set t [time "exec mysql -f drh <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format MySQL: $t]
+# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.3.2:} $t]
+# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (cache=100):} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite240 s2k.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4:} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite240 sns.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4 (nosync):} $t]
+# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (test):} $t]
+ puts "</table>"
+}
+
+# Initialize the environment
+#
+expr srand(1)
+catch {exec /bin/sh -c {rm -f s*.db}}
+set fd [open clear.sql w]
+puts $fd {
+ drop table t1;
+ drop table t2;
+}
+close $fd
+catch {exec psql drh <clear.sql}
+catch {exec mysql drh <clear.sql}
+set fd [open 2kinit.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=on;
+}
+close $fd
+exec ./sqlite240 s2k.db <2kinit.sql
+exec ./sqlite-t1 st1.db <2kinit.sql
+set fd [open nosync-init.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=off;
+}
+close $fd
+exec ./sqlite240 sns.db <nosync-init.sql
+set ones {zero one two three four five six seven eight nine
+ ten eleven twelve thirteen fourteen fifteen sixteen seventeen
+ eighteen nineteen}
+set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
+proc number_name {n} {
+ if {$n>=1000} {
+ set txt "[number_name [expr {$n/1000}]] thousand"
+ set n [expr {$n%1000}]
+ } else {
+ set txt {}
+ }
+ if {$n>=100} {
+ append txt " [lindex $::ones [expr {$n/100}]] hundred"
+ set n [expr {$n%100}]
+ }
+ if {$n>=20} {
+ append txt " [lindex $::tens [expr {$n/10}]]"
+ set n [expr {$n%10}]
+ }
+ if {$n>0} {
+ append txt " [lindex $::ones $n]"
+ }
+ set txt [string trim $txt]
+ if {$txt==""} {set txt zero}
+ return $txt
+}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DROP TABLE t1;}
+close $fd
+runtest {DROP TABLE}
diff --git a/usr/src/cmd/svc/dtd/service_bundle.dtd.1 b/usr/src/cmd/svc/dtd/service_bundle.dtd.1
new file mode 100644
index 0000000000..c7f549bfb7
--- /dev/null
+++ b/usr/src/cmd/svc/dtd/service_bundle.dtd.1
@@ -0,0 +1,777 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+-->
+
+<!--
+ Service description DTD
+
+ Most attributes are string values (or an individual string from a
+ restricted set), but attributes with a specific type requirement are
+ noted in the comment describing the element.
+-->
+
+<!--
+ XInclude support
+
+ A series of service bundles may be composed via the xi:include tag.
+ smf(5) tools enforce that all bundles be of the same type.
+-->
+<!ELEMENT xi:include
+ (xi:fallback)
+ >
+<!ATTLIST xi:include
+ href CDATA #REQUIRED
+ parse (xml|text) "xml"
+ encoding CDATA #IMPLIED
+ xmlns:xi CDATA #FIXED "http://www.w3.org/2001/XInclude"
+ >
+
+<!ELEMENT xi:fallback
+ ANY
+ >
+<!ATTLIST xi:fallback
+ xmlns:xi CDATA #FIXED "http://www.w3.org/2001/XInclude"
+ >
+
+<!--
+ stability
+
+ This element associates an SMI stability level with the parent
+ element. See attributes(5) for an explanation of interface
+ stability levels.
+
+ Its attribute is
+
+ value The stability level of the parent element.
+-->
+
+<!ELEMENT stability EMPTY>
+
+<!ATTLIST stability
+ value ( Standard | Stable | Evolving | Unstable |
+ External | Obsolete ) #REQUIRED >
+
+<!-- Property value lists -->
+
+<!--
+ value_node
+
+ This element represents a single value within any of the typed
+ property value lists.
+
+ Its attribute is
+
+ value The value for this node in the list.
+-->
+
+<!ELEMENT value_node EMPTY>
+
+<!ATTLIST value_node
+ value CDATA #REQUIRED>
+
+<!--
+ count_list
+ integer_list
+ opaque_list
+ host_list
+ hostname_list
+ net_address_v4_list
+ net_address_v6_list
+ time_list
+ astring_list
+ ustring_list
+ boolean_list
+ fmri_list
+ uri_list
+
+ These elements represent the typed lists of values for a property.
+ Each contains one or more value_node elements representing each
+ value on the list.
+
+ None of these elements has attributes.
+-->
+
+<!ELEMENT count_list
+ ( value_node+ )>
+
+<!ATTLIST count_list>
+
+<!ELEMENT integer_list
+ ( value_node+ )>
+
+<!ATTLIST integer_list>
+
+<!ELEMENT opaque_list
+ ( value_node+ )>
+
+<!ATTLIST opaque_list>
+
+<!ELEMENT host_list
+ ( value_node+ )>
+
+<!ATTLIST host_list>
+
+<!ELEMENT hostname_list
+ ( value_node+ )>
+
+<!ATTLIST hostname_list>
+
+<!ELEMENT net_address_v4_list
+ ( value_node+ )>
+
+<!ATTLIST net_address_v4_list>
+
+<!ELEMENT net_address_v6_list
+ ( value_node+ )>
+
+<!ATTLIST net_address_v6_list>
+
+<!ELEMENT time_list
+ ( value_node+ )>
+
+<!ATTLIST time_list>
+
+<!ELEMENT astring_list
+ ( value_node+ )>
+
+<!ATTLIST astring_list>
+
+<!ELEMENT ustring_list
+ ( value_node+ )>
+
+<!ATTLIST ustring_list>
+
+<!ELEMENT boolean_list
+ ( value_node+ )>
+
+<!ATTLIST boolean_list>
+
+<!ELEMENT fmri_list
+ ( value_node+ )>
+
+<!ATTLIST fmri_list>
+
+<!ELEMENT uri_list
+ ( value_node+ )>
+
+<!ATTLIST uri_list>
+
+<!-- Properties and property groups -->
+
+<!--
+ property
+
+ This element is for a singly or multiply valued property within a
+ property group. It contains an appropriate value list element,
+ which is expected to be consistent with the type attribute.
+
+ Its attributes are
+
+ name The name of this property.
+
+ type The data type for this property.
+
+ override These values should replace values already in the
+ repository.
+-->
+
+<!ELEMENT property
+ ( count_list | integer_list | opaque_list | host_list | hostname_list |
+ net_address_v4_list | net_address_v6_list | time_list |
+ astring_list | ustring_list | boolean_list | fmri_list |
+ uri_list )? >
+
+<!ATTLIST property
+ name CDATA #REQUIRED
+ type ( count | integer | opaque | host | hostname |
+ net_address_v4 | net_address_v6 | time |
+ astring | ustring | boolean | fmri | uri ) #REQUIRED
+ override ( true | false ) "false" >
+
+<!--
+ propval
+
+ This element is for a singly valued property within a property
+ group. List-valued properties must use the property element above.
+
+ Its attributes are
+
+ name The name of this property.
+
+ type The data type for this property.
+
+ value The value for this property. Must match type
+ restriction of type attribute.
+
+ override This value should replace any values already in the
+ repository.
+-->
+
+<!ELEMENT propval EMPTY>
+
+<!ATTLIST propval
+ name CDATA #REQUIRED
+ type ( count | integer | opaque | host | hostname |
+ net_address_v4 | net_address_v6 | time | astring |
+ ustring | boolean | fmri | uri ) #REQUIRED
+ value CDATA #REQUIRED
+ override ( true | false ) "false" >
+
+<!--
+ property_group
+
+ This element is for a set of related properties on a service or
+ instance. It contains an optional stability element, as well as
+ zero or more property-containing elements.
+
+ Its attributes are
+
+ name The name of this property group.
+
+ type A category for this property group. Groups of type
+ "framework", "implementation" or "template" are primarily
+ of interest to the service management facility, while
+ groups of type "application" are expected to be only of
+ interest to the service to which this group is attached.
+ Other types may be introduced using the service symbol
+ namespace conventions.
+
+ delete If in the repository, this property group should be removed.
+-->
+
+<!ELEMENT property_group
+ ( stability?, ( propval | property )* )>
+
+<!ATTLIST property_group
+ name CDATA #REQUIRED
+ type CDATA #REQUIRED
+ delete ( true | false ) "false" >
+
+<!--
+ service_fmri
+
+ This element defines a reference to a service FMRI (for either a
+ service or an instance).
+
+ Its attribute is
+
+ value The FMRI.
+-->
+
+<!ELEMENT service_fmri EMPTY>
+
+<!ATTLIST service_fmri
+ value CDATA #REQUIRED>
+
+<!-- Dependencies -->
+
+<!--
+ dependency
+
+ This element identifies a group of FMRIs upon which the service is
+ in some sense dependent. Its interpretation is left to the
+ restarter to which a particular service instance is delegated. It
+ contains a group of service FMRIs, as well as a block of properties.
+
+ Its attributes are
+
+ name The name of this dependency.
+
+ grouping The relationship between the various FMRIs grouped
+ here; "require_all" of the FMRIs to be online, "require_any"
+ of the FMRIs to be online, or "exclude_all" of the FMRIs
+ from being online for the dependency to be satisfied.
+ "optional_all" dependencies are satisfied when all
+ of the FMRIs are either online or unable to come
+ online (because they are disabled, misconfigured, or one
+ of their dependencies is unable to come online).
+
+ restart_on The type of events from the FMRIs that the service should
+ be restarted for. "error" restarts the service if the
+ dependency is restarted due to hardware fault. "restart"
+ restarts the service if the dependency is restarted for
+ any reason, including hardware fault. "refresh" restarts
+ the service if the dependency is refreshed or restarted for
+ any reason. "none" will never restart the service due to
+ dependency state changes.
+
+ type The type of dependency: on another service ('service'), on
+ a filesystem path ('path'), or another dependency type.
+
+ delete This dependency should be deleted.
+-->
+
+<!ELEMENT dependency
+ ( service_fmri*, stability?, ( propval | property )* ) >
+
+<!ATTLIST dependency
+ name CDATA #REQUIRED
+ grouping ( require_all | require_any | exclude_all |
+ optional_all ) #REQUIRED
+ restart_on ( error | restart | refresh | none ) #REQUIRED
+ type CDATA #REQUIRED
+ delete ( true | false ) "false" >
+
+<!-- Dependents -->
+
+<!--
+ dependent
+
+ This element identifies a service which should depend on this service. It
+ corresponds to a dependency in the named service. The grouping and type
+ attributes of that dependency are implied to be "require_all" and
+ "service", respectively.
+
+ Its attributes are
+
+ name The name of the dependency property group to create in the
+ dependent entity.
+
+ grouping The grouping relationship of the dependency property
+ group to create in the dependent entity. See "grouping"
+ attribute on the dependency element.
+
+ restart_on The type of events from this service that the named service
+ should be restarted for.
+
+ delete True if this dependent should be deleted.
+
+ override Whether to replace an existing dependent of the same name.
+
+-->
+
+<!ELEMENT dependent
+ ( service_fmri, stability?, ( propval | property )* ) >
+
+<!ATTLIST dependent
+ name CDATA #REQUIRED
+ grouping ( require_all | require_any | exclude_all |
+ optional_all) #REQUIRED
+ restart_on ( error | restart | refresh | none) #REQUIRED
+ delete ( true | false ) "false"
+ override ( true | false ) "false" >
+
+<!-- Method execution context, security profile, and credential definitions -->
+
+<!--
+ envvar
+
+ An environment variable. It has two attributes:
+
+ name The name of the environment variable.
+ value The value of the environment variable.
+-->
+
+<!ELEMENT envvar EMPTY>
+
+<!ATTLIST envvar
+ name CDATA #REQUIRED
+ value CDATA #REQUIRED >
+
+<!--
+ method_environment
+
+ This element defines the environment for a method. It has no
+ attributes, and one or more envvar child elements.
+-->
+
+<!ELEMENT method_environment (envvar+) >
+
+<!ATTLIST method_environment>
+
+<!--
+ method_profile
+
+ This element indicates which exec_attr(5) profile applies to the
+ method context being defined.
+
+ Its attribute is
+
+ name The name of the profile.
+-->
+
+<!ELEMENT method_profile EMPTY>
+
+<!ATTLIST method_profile
+ name CDATA #REQUIRED >
+
+<!--
+ method_credential
+
+ This element specifies credential attributes for the execution
+ method to use.
+
+ Its attributes are
+
+ user The user ID, in numeric or text form.
+
+ group The group ID, in numeric or text form. If absent or
+ ":default", the group associated with the user in the
+ passwd database.
+
+ supp_groups Supplementary group IDs to be associated with the
+ method, separated by commas or spaces. If absent or
+ ":default", initgroups(3C) will be used.
+
+ privileges An optional string specifying the privilege set.
+
+ limit_privileges An optional string specifying the limit
+ privilege set.
+-->
+
+<!ELEMENT method_credential EMPTY>
+
+<!ATTLIST method_credential
+ user CDATA #REQUIRED
+ group CDATA ":default"
+ supp_groups CDATA ":default"
+ privileges CDATA ":default"
+ limit_privileges CDATA ":default" >
+
+<!--
+ method_context
+
+ This element combines credential and resource management attributes
+ for execution methods. It may contain a method_environment, or
+ a method_profile or method_credential element.
+
+ Its attributes are
+
+ working_directory The home directory to launch the method from.
+ ":default" can be used as a token to indicate use of the
+ user specified by the credential or profile specified.
+
+ project The project ID, in numeric or text form. ":default" can
+ be used as a token to indicate use of the project
+ identified by getdefaultproj(3PROJECT) for the non-root
+ user specified by the credential or profile specified.
+ If the user is root, ":default" designates the project
+ the restarter is running in.
+
+ resource_pool The resource pool name to launch the method on.
+ ":default" can be used as a token to indicate use of the
+ pool specified in the project(4) entry given in the
+ "project" attribute above.
+-->
+<!ELEMENT method_context
+ ( (method_profile | method_credential)?, method_environment? ) >
+
+<!ATTLIST method_context
+ working_directory CDATA ":default"
+ project CDATA ":default"
+ resource_pool CDATA ":default" >
+
+<!-- Restarter delegation, methods, and monitors -->
+
+<!--
+ exec_method
+
+ This element describes one of the methods used by the designated
+ restarter to act on the service instance. Its interpretation is
+ left to the restarter to which a particular service instance is
+ delegated. It contains a set of attributes, an optional method
+ context, and an optional stability element for the optional
+ properties that can be included.
+
+ Its attributes are
+
+ type The type of method, either "method" or "monitor".
+
+ name Name of this execution method. The method names are
+ usually a defined interface of the restarter to which an
+ instance of this service is delegated.
+
+ exec The string identifying the action to take. For
+ svc.startd(1M), this is a string suitable to pass to
+ exec(2).
+
+ timeout_seconds [integer] Duration, in seconds, to wait for this
+ method to complete. A '0' or '-1' denotes an infinite
+ timeout.
+
+ delete If in the repository, the property group for this method
+ should be removed.
+-->
+
+<!ELEMENT exec_method
+ ( method_context?, stability?, ( propval | property )* ) >
+
+<!ATTLIST exec_method
+ type ( method | monitor ) #REQUIRED
+ name CDATA #REQUIRED
+ exec CDATA #REQUIRED
+ timeout_seconds CDATA #REQUIRED
+ delete ( true | false ) "false" >
+
+<!--
+ restarter
+
+ A flag element identifying the restarter to which this service or
+ service instance is delegated. Contains the FMRI naming the
+ delegated restarter.
+
+ This element has no attributes.
+-->
+
+<!ELEMENT restarter
+ ( service_fmri ) >
+
+<!ATTLIST restarter>
+
+<!--
+ Templates
+-->
+
+<!--
+ doc_link
+
+ The doc_link relates a resource described by the given URI to the
+ service described by the containing template. The resource is
+ expected to be a documentation or elucidatory reference of some
+ kind.
+
+ Its attributes are
+
+ name A label for this resource.
+
+ uri A URI to the resource.
+-->
+
+<!ELEMENT doc_link EMPTY>
+
+<!ATTLIST doc_link
+ name CDATA #REQUIRED
+ uri CDATA #REQUIRED >
+
+<!--
+ manpage
+
+ The manpage element connects the reference manual page to the
+ template's service.
+
+ Its attributes are
+
+ title The manual page title.
+
+ section The manual page's section.
+
+ manpath The MANPATH environment variable, as described in man(1)
+ that is required to reach the named manual page
+-->
+
+<!ELEMENT manpage EMPTY>
+
+<!ATTLIST manpage
+ title CDATA #REQUIRED
+ section CDATA #REQUIRED
+ manpath CDATA ":default" >
+
+<!--
+ documentation
+
+ The documentation element groups an arbitrary number of doc_link
+ and manpage references.
+
+ It has no attributes.
+-->
+
+<!ELEMENT documentation
+ ( doc_link | manpage )* >
+
+<!ATTLIST documentation>
+
+<!--
+ loctext
+
+ The loctext element is a container for localized text.
+
+ Its sole attribute is
+
+ xml:lang The name of the locale, in the form accepted by LC_ALL,
+ etc. See locale(5).
+-->
+<!ELEMENT loctext
+ (#PCDATA) >
+
+<!ATTLIST loctext
+ xml:lang CDATA #REQUIRED >
+
+<!--
+ description
+
+ The description holds a set of potentially longer, localized strings that
+ consist of a short description of the serive.
+
+ The description has no attributes.
+-->
+<!ELEMENT description
+ ( loctext+ ) >
+
+<!ATTLIST common_name>
+
+<!--
+ common_name
+
+ The common_name holds a set of short, localized strings that
+ represent a well-known name for the service in the given locale.
+
+ The common_name has no attributes.
+-->
+<!ELEMENT common_name
+ ( loctext+ ) >
+
+<!ATTLIST common_name>
+
+<!--
+ template
+
+ The template contains a collection of metadata about the service.
+ It contains a localizable string that serves as a common,
+ human-readable name for the service. (This name should be less than
+ 60 characters in a single byte locale.) The template may optionally
+ contain a longer localizable description of the service, a
+ collection of links to documentation, either in the form of manual
+ pages or in the form of URI specifications to external documentation
+ sources (such as docs.sun.com).
+
+ The template has no attributes.
+-->
+<!ELEMENT template
+ ( common_name, description?, documentation?) >
+
+<!ATTLIST template>
+
+<!-- Services and instances -->
+
+<!--
+ create_default_instance
+
+ A flag element indicating that an otherwise empty default instance
+ of this service (named "default") should be created at install, with
+ its enabled property set as given.
+
+ Its attribute is
+
+ enabled [boolean] The initial value for the enabled state of
+ this instance.
+-->
+
+<!ELEMENT create_default_instance EMPTY >
+
+<!ATTLIST create_default_instance
+ enabled ( true | false ) #REQUIRED >
+
+<!--
+ single_instance
+
+ A flag element stating that this service can only have a single
+ instance on a particular system.
+-->
+
+<!ELEMENT single_instance EMPTY>
+
+<!ATTLIST single_instance>
+
+<!--
+ instance
+
+ The service instance is the object representing a software component
+ that will run on the system if enabled. It contains an enabled
+ element, a set of dependencies on other services, potentially
+ customized methods or configuration data, an optional method
+ context, and a pointer to its restarter. (If no restarter is
+ specified, the master restarter, svc.startd(1M), is assumed to be
+ responsible for the service.)
+
+ Its attributes are
+
+ name The canonical name for this instance of the service.
+
+ enabled [boolean] The initial value for the enabled state of
+ this instance.
+-->
+
+<!ELEMENT instance
+ ( restarter?, dependency*, dependent*, method_context?,
+ exec_method*, property_group*, template? ) >
+
+<!ATTLIST instance
+ name CDATA #REQUIRED
+ enabled ( true | false ) #REQUIRED >
+
+<!--
+ service
+
+ The service contains the set of instances defined by default for
+ this service, an optional method execution context, any default
+ methods, the template, and various restrictions or advice applicable
+ at installation. The method execution context and template elements
+ are required for service_bundle documents with type "manifest", but
+ are optional for "profile" or "archive" documents.
+
+ Its attributes are
+
+ name The canonical name for the service.
+
+ version [integer] The integer version for this service.
+
+ type Whether this service is a simple service, a delegated
+ restarter, or a milestone (a synthetic service that
+ collects a group of dependencies).
+-->
+
+<!ELEMENT service
+ ( create_default_instance?, single_instance?, restarter?,
+ dependency*, dependent*, method_context?, exec_method*,
+ property_group*, instance*, stability?, template? ) >
+
+<!ATTLIST service
+ name CDATA #REQUIRED
+ version CDATA #REQUIRED
+ type ( service | restarter | milestone ) #REQUIRED >
+
+<!--
+ service_bundle
+
+ The bundle possesses two attributes:
+
+ type How this file is to be understood by the framework (or
+ used in a non-framework compliant way). Standard types
+ are 'archive', 'manifest', and 'profile'.
+
+ name A name for the bundle. Manifests should be named after
+ the package which delivered them; profiles should be
+ named after the "feature set nickname" they intend to
+ enable.
+-->
+
+<!ELEMENT service_bundle
+ ( service_bundle* | service* | xi:include* )>
+
+<!ATTLIST service_bundle
+ type CDATA #REQUIRED
+ name CDATA #REQUIRED>
diff --git a/usr/src/cmd/svc/inc.flg b/usr/src/cmd/svc/inc.flg
new file mode 100644
index 0000000000..abf77ac7f3
--- /dev/null
+++ b/usr/src/cmd/svc/inc.flg
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+exec_file usr/src/cmd/svc/req.flg
diff --git a/usr/src/cmd/svc/lsvcrun/Makefile b/usr/src/cmd/svc/lsvcrun/Makefile
new file mode 100644
index 0000000000..545d70149f
--- /dev/null
+++ b/usr/src/cmd/svc/lsvcrun/Makefile
@@ -0,0 +1,59 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PROG = lsvcrun
+OBJS = lsvcrun.o
+SRCS = $(OBJS:%.o=%.c)
+POFILES = $(OBJS:.o=.po)
+
+ROOTLIBSVCBINPROG = $(ROOT)/lib/svc/bin/$(PROG)
+
+include ../../Makefile.cmd
+
+LDLIBS += -lcontract -lscf -luutil
+
+lint := LINTFLAGS = -ux
+
+.KEEP_STATE:
+
+all: $(PROG)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS)
+ $(POST_PROCESS)
+
+install: all $(ROOTLIBSVCBINPROG)
+
+$(ROOTLIBSVCBIN)/%: %
+ $(INS.file)
+
+clean:
+ $(RM) $(OBJS)
+
+lint: lint_SRCS
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/lsvcrun/lsvcrun.c b/usr/src/cmd/svc/lsvcrun/lsvcrun.c
new file mode 100644
index 0000000000..4b920fb68a
--- /dev/null
+++ b/usr/src/cmd/svc/lsvcrun/lsvcrun.c
@@ -0,0 +1,951 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * lsvcrun - run an rc?.d script, modifying appropriate data in the
+ * repository to reflect legacy behavior.
+ *
+ * We try to keep track of what we can for the legacy scripts via
+ * property groups under the smf/legacy_run service. Each property
+ * group identifies a service, named in the form 'rc2_d_S10foo'.
+ *
+ * Each group has the following properties: name, the script name
+ * displayed by svcs(1m); state_timestamp; contract, contract ID;
+ * inode, the inode of the script; and suffix, the suffix of the
+ * script name, e.g. 'foo'.
+ *
+ * When we run a K script, we try to identify and remove the
+ * property group by means of examining the inode and script
+ * suffix. The inode check means more than one script with the
+ * same suffix will still work as intended in the common case.
+ *
+ * If we cannot find a property group, or one already exists
+ * when we try to add one, then we print a suitable warning. These
+ * are warnings because there was no strict requirement that K
+ * and S scripts be matched up.
+ *
+ * In the face of these assumptions being proved wrong, we always
+ * make sure to execute the script anyway in an attempt to keep
+ * things working as they used to. If we can't execute the script,
+ * we try to leave the repository in the state it was before.
+ */
+
+#include <sys/ctfs.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fnmatch.h>
+#include <libcontract.h>
+#include <libcontract_priv.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <time.h>
+#include <unistd.h>
+#include <limits.h>
+
+
+/* Environment variables to pass on. See clean_environment(). */
+static char *evars_to_pass[] = { "LANG", "LC_ALL", "LC_COLLATE", "LC_CTYPE",
+ "LC_MESSAGES", "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "PATH", "TZ"
+};
+
+#define EVARS_TO_PASS_NUM \
+ (sizeof (evars_to_pass) / sizeof (*evars_to_pass))
+
+
+static void
+usage()
+{
+ (void) fprintf(stderr,
+ gettext("Usage: %s [-s] script {start | stop}\n"), uu_getpname());
+ exit(UU_EXIT_USAGE);
+}
+
+/*
+ * Pick out the script name and convert it for use as an SMF property
+ * group name.
+ */
+static char *
+start_pg_name(const char *path)
+{
+ char *out, *cp;
+
+ if (fnmatch("/etc/rc[0-6S].d/S*", path, FNM_PATHNAME) != 0) {
+ uu_warn(gettext("couldn't parse name %s.\n"), path);
+ return (NULL);
+ }
+
+ out = strdup(path + sizeof ("/etc/") - 1);
+
+ if (out == NULL) {
+ uu_warn(gettext("strdup() failed (%s).\n"), strerror(errno));
+ return (NULL);
+ }
+
+ /* Convert illegal characters to _. */
+ for (cp = out; *cp != '\0'; ++cp) {
+ /* locale problem? */
+ if (!isalnum(*cp) && *cp != '-')
+ *cp = '_';
+ }
+
+ return (out);
+}
+
+static char *
+script_suffix(const char *path)
+{
+ const char *cp;
+ char *out;
+
+ if (fnmatch("/etc/rc[0-6S].d/[SK]*", path, FNM_PATHNAME) != 0) {
+ uu_warn(gettext("couldn't parse name %s.\n"), path);
+ return (NULL);
+ }
+
+ cp = path + sizeof ("/etc/rc0.d/S") - 1;
+
+ while (isdigit(*cp))
+ cp++;
+
+ if (*cp == '\0') {
+ uu_warn(gettext("couldn't parse name %s.\n"), path);
+ return (NULL);
+ }
+
+ out = strdup(cp);
+ if (out == NULL)
+ uu_warn(gettext("strdup() failed (%s).\n"), strerror(errno));
+
+ return (out);
+}
+
+/*
+ * Convert a path to an acceptable SMF (service) name.
+ */
+static char *
+path_to_svc_name(const char *path)
+{
+ char *out, *cp;
+
+ out = strdup(path);
+ if (out == NULL) {
+ uu_warn(gettext("strdup() failed (%s).\n"), strerror(errno));
+ return (NULL);
+ }
+
+ /* Convert illegal characters to _. */
+ for (cp = out; *cp != '\0'; ++cp) {
+ /* locale problem? */
+ if (!isalnum(*cp) && *cp != '-' && *cp != '/')
+ *cp = '_';
+ }
+
+ /* If the first character is _, use a instead. */
+ if (*out == '_')
+ *out = 'a';
+
+ return (out);
+}
+
+static void
+scferr(const char *func)
+{
+ uu_warn(gettext("%s failed (%s). Repository will not be modified.\n"),
+ func, scf_strerror(scf_error()));
+}
+
+static scf_propertygroup_t *
+get_start_pg(const char *script, scf_handle_t *h, scf_service_t *svc,
+ boolean_t *ok)
+{
+ char *pg_name = NULL;
+ scf_propertygroup_t *pg = NULL;
+ scf_property_t *prop = NULL;
+
+ if ((pg_name = start_pg_name(script)) == NULL)
+ return (NULL);
+
+ if ((pg = scf_pg_create(h)) == NULL) {
+ scferr("scf_pg_create()");
+ goto out;
+ }
+
+add:
+ if (scf_service_add_pg(svc, pg_name, SCF_GROUP_FRAMEWORK,
+ SCF_PG_FLAG_NONPERSISTENT, pg) == 0) {
+ *ok = 1;
+ free(pg_name);
+ return (pg);
+ }
+
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+
+ case SCF_ERROR_EXISTS:
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ uu_die(gettext(
+ "Insufficient privilege to add repository properties; "
+ "not launching \"%s\".\n"), script);
+ /* NOTREACHED */
+
+ default:
+ scferr("scf_service_add_pg()");
+ scf_pg_destroy(pg);
+ pg = NULL;
+ goto out;
+ }
+
+ if (scf_service_get_pg(svc, pg_name, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+
+ case SCF_ERROR_NOT_FOUND:
+ goto add;
+
+ default:
+ scferr("scf_service_get_pg()");
+ scf_pg_destroy(pg);
+ pg = NULL;
+ goto out;
+ }
+ }
+
+ if ((prop = scf_property_create(h)) == NULL) {
+ scferr("scf_property_create()");
+ scf_pg_destroy(pg);
+ pg = NULL;
+ goto out;
+ }
+
+ /*
+ * See if the pg has the name property. If it has, that
+ * implies we successfully ran the same script before. We
+ * should re-run it anyway, but not modify the existing pg;
+ * this might lose contract-control but there's not much we
+ * can do.
+ *
+ * If there's no name property, then we probably couldn't
+ * remove the pg fully after a script failed to run.
+ */
+
+ if (scf_pg_get_property(pg, SCF_LEGACY_PROPERTY_NAME, prop) == 0) {
+ uu_warn(gettext("Service matching \"%s\" "
+ "seems to be running.\n"), script);
+ scf_pg_destroy(pg);
+ pg = NULL;
+ } else if (scf_error() != SCF_ERROR_NOT_FOUND) {
+ scferr("scf_pg_get_property()");
+ scf_pg_destroy(pg);
+ pg = NULL;
+ } else {
+ uu_warn(gettext("Service \"%s\" has an invalid property "
+ "group.\n"), script);
+ }
+
+out:
+ free(pg_name);
+ scf_property_destroy(prop);
+ return (pg);
+}
+
+static scf_propertygroup_t *
+pg_match(scf_handle_t *h, scf_service_t *svc, ino_t ino, const char *suffix)
+{
+ char buf[PATH_MAX];
+ scf_iter_t *iter = NULL;
+ scf_propertygroup_t *pg = NULL;
+ scf_property_t *prop = NULL;
+ scf_value_t *val = NULL;
+
+ if ((pg = scf_pg_create(h)) == NULL) {
+ scferr("scf_pg_create()");
+ goto err;
+ }
+
+ if ((iter = scf_iter_create(h)) == NULL) {
+ scferr("scf_iter_create()");
+ goto err;
+ }
+
+ if ((prop = scf_property_create(h)) == NULL) {
+ scferr("scf_property_create()");
+ goto err;
+ }
+
+ if ((val = scf_value_create(h)) == NULL) {
+ scferr("scf_value_create()");
+ goto err;
+ }
+
+ if (scf_iter_service_pgs_typed(iter, svc, SCF_GROUP_FRAMEWORK) !=
+ 0) {
+ scferr("scf_iter_service_pgs_typed()");
+ goto err;
+ }
+
+ while (scf_iter_next_pg(iter, pg) > 0) {
+ int match = 1;
+
+ if (suffix != NULL) {
+ ssize_t len;
+
+ if (scf_pg_get_property(pg, SCF_LEGACY_PROPERTY_SUFFIX,
+ prop) != 0)
+ continue;
+
+ if (scf_property_get_value(prop, val) != 0)
+ continue;
+
+ len = scf_value_get_astring(val, buf, sizeof (buf));
+ if (len < 0) {
+ scferr("scf_value_get_astring()");
+ goto err;
+ }
+ if (len >= sizeof (buf))
+ continue;
+
+ match = (strcmp(buf, suffix) == 0);
+ }
+
+ if (ino != 0) {
+ uint64_t pval;
+
+ if (scf_pg_get_property(pg, SCF_LEGACY_PROPERTY_INODE,
+ prop) != 0)
+ continue;
+
+ if (scf_property_get_value(prop, val) != 0)
+ continue;
+
+ if (scf_value_get_count(val, &pval) != 0)
+ continue;
+
+ match = (ino == pval) && match;
+ }
+
+ if (match)
+ goto out;
+ }
+
+err:
+ scf_pg_destroy(pg);
+ pg = NULL;
+
+out:
+ scf_value_destroy(val);
+ scf_iter_destroy(iter);
+ scf_property_destroy(prop);
+ return (pg);
+}
+
+/*
+ * Try and find the property group matching the service this script
+ * stops. First we look for a matching inode plus a matching suffix.
+ * This commonly succeeds, but if not, we just search for inode.
+ * Finally, we try for just the script suffix.
+ */
+static scf_propertygroup_t *
+get_stop_pg(const char *script, scf_handle_t *h, scf_service_t *svc,
+ boolean_t *ok)
+{
+ struct stat st;
+ char *suffix;
+ scf_propertygroup_t *pg;
+
+ if (stat(script, &st) != 0) {
+ uu_warn(gettext("Couldn't stat %s (%s).\n"), script,
+ strerror(errno));
+ return (NULL);
+ }
+
+ if ((suffix = script_suffix(script)) == NULL) {
+ pg = pg_match(h, svc, st.st_ino, NULL);
+ if (pg != NULL)
+ goto out;
+ return (NULL);
+ }
+
+ if ((pg = pg_match(h, svc, st.st_ino, suffix)) != NULL)
+ goto out;
+
+ if ((pg = pg_match(h, svc, st.st_ino, NULL)) != NULL)
+ goto out;
+
+ if ((pg = pg_match(h, svc, 0, suffix)) == NULL) {
+ uu_warn(gettext("Service matching \"%s\" "
+ "doesn't seem to be running.\n"), script);
+ free(suffix);
+ return (NULL);
+ }
+
+out:
+ *ok = 1;
+ free(suffix);
+ return (pg);
+}
+
+static scf_propertygroup_t *
+get_script_pg(const char *script, boolean_t start_flag, boolean_t *ok)
+{
+ scf_handle_t *h = NULL;
+ scf_scope_t *scope = NULL;
+ scf_service_t *svc = NULL;
+ scf_propertygroup_t *pg = NULL;
+
+ *ok = 0;
+
+ h = scf_handle_create(SCF_VERSION);
+ if (h == NULL) {
+ scferr("scf_handle_create()");
+ goto out;
+ }
+
+ if (scf_handle_bind(h) != 0) {
+ if (scf_error() != SCF_ERROR_NO_SERVER) {
+ scferr("scf_handle_bind()");
+ } else {
+ uu_warn(gettext(
+ "Could not connect to svc.configd.\n"));
+ }
+ goto out;
+ }
+
+ if ((scope = scf_scope_create(h)) == NULL) {
+ scferr("scf_scope_create()");
+ goto out;
+ }
+
+ if ((svc = scf_service_create(h)) == NULL) {
+ scferr("scf_service_create()");
+ goto out;
+ }
+
+ if (scf_handle_get_scope(h, SCF_SCOPE_LOCAL, scope) != 0) {
+ scferr("scf_handle_get_local_scope()");
+ goto out;
+ }
+
+ if (scf_scope_get_service(scope, SCF_LEGACY_SERVICE, svc) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND) {
+ scferr("scf_scope_get_service()");
+ goto out;
+ }
+
+ if (scf_scope_add_service(scope, SCF_LEGACY_SERVICE, svc) !=
+ 0) {
+ scferr("scf_scope_add_service()");
+ goto out;
+ }
+ }
+
+ if (start_flag)
+ pg = get_start_pg(script, h, svc, ok);
+ else
+ pg = get_stop_pg(script, h, svc, ok);
+
+out:
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+ return (pg);
+}
+
+static int
+prepare_contract()
+{
+ int fd;
+
+ do
+ fd = open64(CTFS_ROOT "/process/template", O_RDWR);
+ while (fd < 0 && errno == EINTR);
+ if (fd < 0) {
+ uu_warn(gettext("Can not create contract"));
+ return (-1);
+ }
+
+ /* Leave HWERR in fatal set. */
+
+ errno = ct_tmpl_activate(fd);
+ if (errno != 0) {
+ assert(errno == EPERM);
+ uu_warn(gettext("Can not activate contract template"));
+ (void) close(fd);
+ return (-1);
+ }
+
+ (void) close(fd);
+ return (0);
+}
+
+static void
+cleanup_pg(scf_propertygroup_t *pg)
+{
+ scf_error_t err;
+ char buf[80];
+
+ if (scf_pg_delete(pg) == 0)
+ return;
+
+ err = scf_error();
+
+ if (scf_pg_to_fmri(pg, buf, sizeof (buf)) != 0)
+ (void) strcpy(buf, "?");
+
+ uu_warn(gettext("Could not remove property group %s: %s.\n"), buf,
+ scf_strerror(err));
+}
+
+/*
+ * Create a duplicate environment which only contains approved
+ * variables---those in evars_to_pass and those beginning with "_INIT_".
+ */
+static char **
+approved_env(char **env)
+{
+ char **newenv;
+ int i, i_new, j;
+
+ for (i = 0; env[i] != NULL; ++i)
+ ;
+
+ newenv = malloc(sizeof (*newenv) * (i + 1));
+ if (newenv == NULL)
+ return (NULL);
+
+ i_new = 0;
+
+ for (i = 0; env[i] != NULL; ++i) {
+ if (strncmp(env[i], "_INIT_", sizeof ("_INIT_") - 1) == 0) {
+ newenv[i_new++] = env[i];
+ continue;
+ }
+
+ for (j = 0; j < EVARS_TO_PASS_NUM; ++j) {
+ size_t l = strlen(evars_to_pass[j]);
+
+ if (env[i][l] == '=' &&
+ strncmp(env[i], evars_to_pass[j], l) == 0)
+ newenv[i_new++] = env[i];
+ }
+ }
+
+ newenv[i_new] = NULL;
+
+ return (newenv);
+}
+
+/*
+ * Create a duplicate environment which does not contain any SMF_ variables.
+ */
+static char **
+env_without_smf(char **env)
+{
+ char **newenv;
+ int i, i_new;
+
+ for (i = 0; env[i] != NULL; ++i)
+ ;
+
+ newenv = malloc(sizeof (*newenv) * (i + 1));
+ if (newenv == NULL)
+ return (NULL);
+
+ i_new = 0;
+
+ for (i = 0; env[i] != NULL; ++i) {
+ if (strncmp(env[i], "SMF_", sizeof ("SMF_") - 1) == 0)
+ continue;
+
+ newenv[i_new++] = env[i];
+ }
+
+ newenv[i_new] = NULL;
+
+ return (newenv);
+}
+
+static int
+add_new_property(scf_handle_t *h, scf_transaction_t *tx, const char *name,
+ scf_type_t ty, const void *val)
+{
+ scf_transaction_entry_t *e;
+ scf_value_t *v;
+ const char *func;
+ const struct timeval *t;
+ int r;
+
+ if ((e = scf_entry_create(h)) == NULL) {
+ func = "scf_entry_create()";
+ goto err;
+ }
+
+ if ((v = scf_value_create(h)) == NULL) {
+ func = "scf_value_create()";
+ goto err;
+ }
+
+ r = scf_transaction_property_new(tx, e, name, ty);
+ if (r != 0) {
+ func = "scf_transaction_property_new()";
+ goto err;
+ }
+
+ switch (ty) {
+ case SCF_TYPE_COUNT:
+ scf_value_set_count(v, (uint64_t)val);
+ break;
+
+ case SCF_TYPE_TIME:
+ t = val;
+ r = scf_value_set_time(v, t->tv_sec, 1000 * t->tv_usec);
+ assert(r == 0);
+ break;
+
+ case SCF_TYPE_ASTRING:
+ r = scf_value_set_astring(v, val);
+ assert(r == 0);
+ break;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ if (scf_entry_add_value(e, v) == 0)
+ return (0);
+
+ func = "scf_entry_add_value()";
+
+err:
+ uu_warn(gettext("%s failed (%s).\n"), func, scf_strerror(scf_error()));
+ return (-1);
+}
+
+static void
+set_legacy_service(scf_propertygroup_t *pg, const char *script)
+{
+ scf_handle_t *h;
+ const char *func;
+ char *suffix;
+ scf_transaction_t *tx;
+ struct timeval tstamp;
+ struct stat st;
+ ctid_t ctid;
+ char *svc_name = NULL;
+ int ret;
+
+ h = scf_pg_handle(pg);
+ if (h == NULL) {
+ func = "scf_pg_handle()";
+ goto scferr;
+ }
+
+ ret = gettimeofday(&tstamp, NULL);
+ assert(ret == 0);
+
+ if (stat(script, &st) != 0) {
+ uu_warn(gettext("Couldn't stat %s (%s).\n"), script,
+ strerror(errno));
+ goto err;
+ }
+
+ if (errno = contract_latest(&ctid)) {
+ uu_warn(gettext("Could not get contract"));
+ goto err;
+ }
+
+ tx = scf_transaction_create(h);
+ if (tx == NULL) {
+ func = "scf_transaction_create()";
+ goto scferr;
+ }
+
+ if (scf_transaction_start(tx, pg) != 0) {
+ func = "scf_transaction_start()";
+ goto scferr;
+ }
+
+ /*
+ * We'd like to use the prettier svc_name, but if path_to_svc_name()
+ * fails, we can use the script name anyway.
+ */
+ svc_name = path_to_svc_name(script);
+
+ if (add_new_property(h, tx, SCF_LEGACY_PROPERTY_NAME, SCF_TYPE_ASTRING,
+ (void *)(svc_name ? svc_name : script)) != 0)
+ goto err;
+
+ if (add_new_property(h, tx, SCF_PROPERTY_STATE_TIMESTAMP,
+ SCF_TYPE_TIME, &tstamp) != 0)
+ goto err;
+
+ if (add_new_property(h, tx, SCF_LEGACY_PROPERTY_INODE,
+ SCF_TYPE_COUNT, (void *)st.st_ino) != 0)
+ goto err;
+
+ if ((suffix = script_suffix(script)) != NULL) {
+ if (add_new_property(h, tx, SCF_LEGACY_PROPERTY_SUFFIX,
+ SCF_TYPE_ASTRING, (void *)suffix) != 0)
+ goto err;
+
+ free(suffix);
+ }
+
+ if (add_new_property(h, tx, SCF_PROPERTY_CONTRACT, SCF_TYPE_COUNT,
+ (void *)ctid) != 0)
+ goto err;
+
+ for (;;) {
+ switch (scf_transaction_commit(tx)) {
+ case 1:
+ free(svc_name);
+ return;
+
+ case 0:
+ if (scf_pg_update(pg) == -1) {
+ func = "scf_pg_update()";
+ goto scferr;
+ }
+ continue;
+
+ case -1:
+ func = "scf_transaction_commit()";
+ goto scferr;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+
+scferr:
+ uu_warn(gettext("%s failed (%s).\n"), func, scf_strerror(scf_error()));
+err:
+ uu_die(gettext("Could not commit property values to repository.\n"));
+}
+
+int
+main(int argc, char *argv[], char *envp[])
+{
+ const char *restarter, *script, *action;
+ boolean_t source = 0;
+ int o;
+ boolean_t start_flag;
+ char **newenv;
+ pid_t pid;
+ int pipefds[2];
+ char c;
+ int exitstatus;
+
+ scf_propertygroup_t *pg;
+ boolean_t pg_ok;
+
+ (void) uu_setpname(argv[0]);
+ uu_alt_exit(UU_PROFILE_LAUNCHER);
+
+ /* Make sure we were run by svc.startd. */
+ if ((restarter = getenv("SMF_RESTARTER")) == NULL ||
+ strcmp(restarter, SCF_SERVICE_STARTD) != 0)
+ uu_die(gettext("invocation outside smf(5) inappropriate\n"));
+
+ while ((o = getopt(argc, argv, "s")) != -1) {
+ switch (o) {
+ case 's':
+ source = 1;
+ break;
+
+ default:
+ usage();
+ }
+ }
+
+ if (argc - optind != 2)
+ usage();
+
+ script = argv[optind];
+ action = argv[optind + 1];
+
+ if (strcmp(action, "start") == 0)
+ start_flag = 1;
+ else if (strcmp(action, "stop") == 0)
+ start_flag = 0;
+ else
+ usage();
+
+ /*
+ * Look for the pg & exit if appropriate. Also, if we're starting,
+ * add the pg now so we can exit before launching the script if we
+ * have insufficient repository privilege.
+ *
+ * If any other problem occurs, we carry on anyway.
+ */
+ pg = get_script_pg(script, start_flag, &pg_ok);
+
+ /* Clean the environment. Now so we can fail early. */
+ if (!source)
+ newenv = approved_env(envp);
+ else
+ newenv = env_without_smf(envp);
+ if (newenv == NULL)
+ uu_die(gettext(
+ "Could not create new environment: out of memory.\n"));
+
+ if (prepare_contract() == -1) {
+ if (start_flag && pg != NULL)
+ cleanup_pg(pg);
+
+ exit(UU_EXIT_FATAL);
+ }
+
+ /* pipe to communicate exec success or failure */
+ if (pipe(pipefds) != 0) {
+ uu_warn(gettext("Could not create pipe"));
+
+ if (start_flag && pg != NULL)
+ cleanup_pg(pg);
+
+ exit(UU_EXIT_FATAL);
+ }
+
+ if (!pg_ok)
+ (void) printf(gettext("Executing legacy init script \"%s\" "
+ "despite previous errors.\n"), script);
+ else
+ (void) printf(gettext("Executing legacy init script \"%s\".\n"),
+ script);
+ (void) fflush(stdout);
+
+ pid = fork();
+ if (pid < 0) {
+ uu_warn(gettext("Could not fork"));
+
+ if (start_flag && pg != NULL)
+ cleanup_pg(pg);
+
+ exit(UU_EXIT_FATAL);
+ }
+
+ if (pid == 0) {
+ /* child */
+
+ const char *arg1, *arg2, *arg3;
+
+ (void) close(pipefds[0]);
+ (void) fcntl(pipefds[1], F_SETFD, FD_CLOEXEC);
+
+ if (!source) {
+ arg1 = "/bin/sh";
+ arg2 = script;
+ arg3 = action;
+ } else {
+ arg1 = "/bin/sh";
+ arg2 = "-c";
+ arg3 = script;
+ }
+
+ (void) execle(arg1, arg1, arg2, arg3, NULL, newenv);
+
+ uu_warn(gettext("Could not exec \"%s %s %s\""), arg1,
+ arg2, arg3);
+
+
+ /* Notify parent of the failure. */
+ while (write(pipefds[1], &c, 1) != 1) {
+ switch (errno) {
+ case EAGAIN:
+ (void) sleep(1);
+
+ /* FALLTHROUGH */
+
+ case EINTR:
+ continue;
+ }
+
+ uu_warn(gettext("Could not inform parent of error"));
+ break;
+ }
+
+ exit(UU_EXIT_FATAL);
+ }
+
+ (void) close(pipefds[1]);
+
+ if (read(pipefds[0], &c, sizeof (c)) > 0) {
+ if (!start_flag)
+ uu_die(gettext("exec() failed; leaving properties.\n"));
+ else {
+ uu_warn(gettext("exec() failed.\n"));
+ if (pg != NULL)
+ cleanup_pg(pg);
+ exit(UU_EXIT_FATAL);
+ }
+ }
+
+ while (waitpid(pid, &exitstatus, 0) == -1) {
+ assert(errno == EINTR);
+ }
+
+ if (WIFSIGNALED(exitstatus)) {
+ char buf[SIG2STR_MAX];
+ (void) sig2str(WTERMSIG(exitstatus), buf);
+ (void) printf(gettext("Legacy init script \"%s\" failed due "
+ "to signal %s.\n"), script, buf);
+ } else {
+ (void) printf(gettext("Legacy init script \"%s\" exited with "
+ "return code %d.\n"), script, WEXITSTATUS(exitstatus));
+ }
+
+ if (pg != NULL) {
+ if (start_flag)
+ set_legacy_service(pg, script);
+ else
+ cleanup_pg(pg);
+ scf_pg_destroy(pg);
+ }
+
+ return (UU_EXIT_OK);
+}
diff --git a/usr/src/cmd/svc/mfstscan/Makefile b/usr/src/cmd/svc/mfstscan/Makefile
new file mode 100644
index 0000000000..191b886228
--- /dev/null
+++ b/usr/src/cmd/svc/mfstscan/Makefile
@@ -0,0 +1,74 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PROG = mfstscan
+
+OBJS = mfstscan.o \
+ manifest_hash.o
+
+SRCS = mfstscan.c \
+ ../common/manifest_hash.c
+
+POFILES = $(SRCS:.c=.po)
+
+ROOTLIBSVCBINPROG = $(ROOT)/lib/svc/bin/$(PROG)
+
+include ../../Makefile.cmd
+
+POFILE = $(PROG)_all.po
+CPPFLAGS += -I../common
+LDLIBS += -lscf -luutil -lmd5
+CLOBBERFILES += $(POFILES)
+
+lint := LINTFLAGS = -ux
+
+.KEEP_STATE:
+
+all: $(PROG)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS)
+ $(POST_PROCESS)
+
+$(POFILE): $(POFILES)
+ cat $(POFILES) > $(POFILE)
+
+install: all $(ROOTLIBSVCBINPROG)
+
+$(ROOTLIBSVCBIN)/%: %
+ $(INS.file)
+
+clean:
+ $(RM) $(OBJS)
+
+lint: lint_SRCS
+
+%.o: ../common/%.c
+ $(COMPILE.c) $(OUTPUT_OPTION) $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/mfstscan/mfstscan.c b/usr/src/cmd/svc/mfstscan/mfstscan.c
new file mode 100644
index 0000000000..1ac8dfbfa8
--- /dev/null
+++ b/usr/src/cmd/svc/mfstscan/mfstscan.c
@@ -0,0 +1,146 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+
+#include <ftw.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "manifest_hash.h"
+
+#define MAX_DEPTH 24
+
+static scf_handle_t *hndl;
+static int tflag;
+
+/*
+ * mfstscan - service manifest change detection utility
+ *
+ * mfstscan walks the given filesystem hierarchies, and reports those manifests
+ * with changed or absent hash entries. Manifests are expected to end with a
+ * .xml suffix--other files will be ignored.
+ */
+
+static void
+usage()
+{
+ (void) fprintf(stderr, gettext("Usage: %s [-t] path ...\n"),
+ uu_getpname());
+ exit(UU_EXIT_USAGE);
+}
+
+/*ARGSUSED*/
+static int
+process(const char *fn, const struct stat *sp, int ftw_type,
+ struct FTW *ftws)
+{
+ char *name;
+ char *suffix_match;
+
+ uchar_t hash[MHASH_SIZE];
+
+ if (ftw_type != FTW_F)
+ return (0);
+
+ suffix_match = strstr(fn, ".xml");
+ if (suffix_match == NULL || strcmp(suffix_match, ".xml") != 0)
+ return (0);
+
+ name = mhash_filename_to_propname(fn);
+
+ if (mhash_retrieve_entry(hndl, name, hash) == -1 ||
+ mhash_test_file(hndl, fn, 0, &name, hash) == 0)
+ (void) printf("%s\n", fn);
+
+ return (0);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i;
+ int paths_walked = 0;
+ struct stat sb;
+
+ (void) uu_setpname(argv[0]);
+
+ while ((i = getopt(argc, argv, "t")) != -1) {
+ switch (i) {
+ case 't':
+ tflag = 1;
+ paths_walked = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ /*NOTREACHED*/
+ }
+ }
+
+ if (optind >= argc)
+ usage();
+
+ hndl = scf_handle_create(SCF_VERSION);
+
+ if (scf_handle_bind(hndl) != SCF_SUCCESS)
+ uu_die(gettext("cannot bind to repository: %s\n"),
+ scf_strerror(scf_error()));
+
+ for (i = optind; i < argc; i++) {
+ if (tflag) {
+ (void) puts(mhash_filename_to_propname(argv[i]));
+ continue;
+ }
+
+ if (stat(argv[i], &sb) == -1) {
+ uu_warn(gettext("cannot stat %s"), argv[i]);
+ continue;
+ }
+
+ if (nftw(argv[i], process, MAX_DEPTH, FTW_MOUNT) == -1)
+ uu_warn(gettext("file tree walk of %s encountered "
+ "error"), argv[i]);
+ else
+ paths_walked++;
+ }
+
+ (void) scf_handle_unbind(hndl);
+ (void) scf_handle_destroy(hndl);
+
+ if (!paths_walked)
+ uu_die(gettext("no paths walked\n"));
+
+ return (0);
+}
diff --git a/usr/src/cmd/svc/milestone/Makefile b/usr/src/cmd/svc/milestone/Makefile
new file mode 100644
index 0000000000..231a3a7d31
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/Makefile
@@ -0,0 +1,153 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+include ../../Makefile.cmd
+
+OWNER = root
+GROUP = sys
+FILEMODE = 0444
+
+BUILTXML= \
+ console-login.xml
+
+FSSVCS= \
+ boot-archive.xml \
+ local-fs.xml \
+ minimal-fs.xml \
+ root-fs.xml \
+ usr-fs.xml
+
+FSMANIFESTS= $(FSSVCS:%=$(ROOTSVCSYSTEMFILESYSTEM)/%)
+
+NETSVCS= \
+ datalink.xml \
+ datalink-init.xml \
+ aggregation.xml \
+ network-initial.xml \
+ network-loopback.xml \
+ network-physical.xml \
+ network-service.xml
+
+NETMANIFESTS= $(NETSVCS:%=$(ROOTSVCNETWORK)/%)
+
+MAINMILESTONES= \
+ multi-user-server.xml \
+ multi-user.xml \
+ name-services.xml \
+ network.xml \
+ single-user.xml \
+ sysconfig.xml
+
+MAINMANIFESTS= $(MAINMILESTONES:%=$(ROOTSVCMILESTONE)/%)
+
+SYSDEVSVCS= \
+ devices-local.xml
+
+SYSDEVMANIFESTS= $(SYSDEVSVCS:%=$(ROOTSVCSYSTEMDEVICE)/%)
+
+SYSTEMSVCS= \
+ console-login.xml \
+ identity.xml \
+ manifest-import.xml \
+ rmtmpfiles.xml
+
+SYSTEMMANIFESTS = $(SYSTEMSVCS:%=$(ROOTSVCSYSTEM)/%)
+
+SYSTEMSVCSVCS= \
+ restarter.xml
+
+SYSTEMSVCMANIFESTS= $(SYSTEMSVCSVCS:%=$(ROOTSVCSYSTEM)/svc/%)
+
+MISCFILES= \
+ README.share
+
+SYSTEMMISCFILES = $(MISCFILES:%.share=$(ROOT)/lib/svc/share/%)
+
+#
+# MANIFEST is used solely in the construction of the check target.
+#
+MANIFEST= $(FSSVCS) $(NETSVCS) $(MAINMILESTONES) $(SYSTEMSVCS) \
+ $(SYSDEVSVCS) $(SYSTEMSVCSVCS)
+
+SVCMETHOD=\
+ aggregation \
+ boot-archive \
+ console-login \
+ datalink \
+ datalink-init \
+ devices-local \
+ fs-local \
+ fs-minimal \
+ fs-root \
+ fs-usr \
+ identity-domain \
+ identity-node \
+ manifest-import \
+ net-loopback \
+ net-init \
+ net-physical \
+ net-svc \
+ rmtmpfiles
+
+$(ROOTSVCMETHOD) := FILEMODE = 0555
+
+all: $(BUILTXML)
+
+install: $(FSMANIFESTS) $(MAINMANIFESTS) $(NETMANIFESTS) $(SYSTEMMANIFESTS) \
+ $(ROOTSVCMETHOD) $(SYSDEVMANIFESTS) $(SYSTEMSVCMANIFESTS) \
+ $(SYSTEMMISCFILES)
+
+check: $(CHKMANIFEST)
+
+console-login.xml: make-console-login-xml
+ $(SH) ./make-console-login-xml
+
+clobber: clean
+ -$(RM) $(BUILTXML)
+
+$(ROOTSVCMILESTONE)/%: %
+ $(INS.file)
+
+$(ROOTSVCNETWORK)/%: %
+ $(INS.file)
+
+$(ROOTSVCSYSTEM)/%: %
+ $(INS.file)
+
+$(ROOTSVCSYSTEMDEVICE)/%: %
+ $(INS.file)
+
+$(ROOTSVCSYSTEMFILESYSTEM)/%: %
+ $(INS.file)
+
+$(ROOTSVCSYSTEM)/svc/%: %
+ $(INS.file)
+
+$(ROOT)/lib/svc/share/%: %.share
+ $(INS.rename)
+
+clean lint _msg:
diff --git a/usr/src/cmd/svc/milestone/README b/usr/src/cmd/svc/milestone/README
new file mode 100644
index 0000000000..42b8e17365
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/README
@@ -0,0 +1,36 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+ Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+Use is subject to license terms.
+
+ident "%Z%%M% %I% %E% SMI"
+
+usr/src/cmd/svc/milestone/README
+
+This source directory contains service descriptions, in the form of XML
+manifests, for milestones and services that are required for the seed
+repository construction. This includes most services required for
+satisfying the single-user milestone. (In general, services associated
+with a specific command should locate their description in the directory
+that contains the source for that command.)
diff --git a/usr/src/cmd/svc/milestone/README.share b/usr/src/cmd/svc/milestone/README.share
new file mode 100644
index 0000000000..4751b3cefc
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/README.share
@@ -0,0 +1,148 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+Use is subject to license terms.
+
+ident "%Z%%M% %I% %E% SMI"
+
+/lib/svc/share/README
+
+smf(5): Notes on maintenance mode and recovery
+
+Failures that bring the system to maintenance mode may include hardware
+or critical software failures. The procedures below are given so that
+some software repairs can be made; the recommended exit approach once a
+repair has been made is to reboot the system. The system can be brought
+to maintenance mode deliberately via the '-s' option to boot(1M), or via
+the 's' option to init(1M).
+
+In failure scenarios, smf(5) may or may not be running, depending on
+which component has failed. If smf(5) is running, and the /usr
+filesystem is reachable, then the usual svcadm(1M) invocations to clear
+maintenance state and restart services instances can be used.
+Otherwise, the following instructions describe the direct execution of
+service methods, so that capabilities that svc.startd(1M) would normally
+start automatically can be started manually. In the case that the
+document recommends an invocation like
+
+# /lib/svc/method/example-method start
+
+you may also consider running these scripts with the shell displaying
+the commands from the service method as they are executed. For sh(1)
+based scripts, this would mean running the method as
+
+# /sbin/sh -x /lib/svc/method/example-method start
+
+Some methods may be written to instead use ksh(1), with invocation
+
+# /usr/bin/ksh -x /lib/svc/method/example-method start
+
+The first line of the service method script will generally specify its
+required interpreter using the standard #! notation. Method scripts may
+potentially require interpreters other than sh(1) or ksh(1).
+
+1. Boot archive failure
+
+Boot archive may become out of sync with the root filesystem in
+a reboot following an abnormal system shutdown. The recommended
+action is to reboot immediately, choose "Solaris failsafe" when
+the boot menu is displayed. Type 'i' to get an interactive recovery
+shell and follow instructions to update the boot archive.
+
+If the list of stale files are not yet loaded by the kernel
+or are compatible, you may continue booting by clearing the
+boot-archive service state
+
+# svcadm clear system/boot-archive
+
+2. Failure to mount filesystems.
+
+In cases where the system was unable to bring a combination of the
+system/filesystem/{root,usr,minimal} services online, it may be possible
+to directly execute the corresponding service methods
+
+# /lib/svc/method/fs-root
+# /lib/svc/method/fs-usr
+# /lib/svc/method/fs-minimal
+
+to mount the various filesystems. In the case that these methods fail,
+a direct invocation of mount(1M), and potentially fsck(1M), should be
+attempted for file systems required for recovery purposes.
+
+/lib/svc/method/fs-usr attempts to remount the root file system
+read-write, such that persistent changes can be made to the system's
+configuration. If this method is failing, one can directly remount
+using the mount(1M) command via
+
+# /sbin/mount -o rw,remount /
+
+/etc/svc/volatile is a temporary filesystem generally reserved for Sun
+private use. It may prove a useful location to create mount points if
+the root file system cannot be remounted read-write.
+
+3. Failure to run svc.configd(1M).
+
+svc.configd(1M) will give detailed instructions for recovery if the
+corruption is detected in the repository. If svc.configd(1M) cannot be
+run because of missing or corrupt library components, then the affected
+components will need to be replaced. Components could be copied from a
+CD-ROM or DVD-ROM, or from another system.
+
+4. Failure to run svc.startd(1M).
+
+If the inittab(4) line to invoke svc.startd(1M) is missing or incorrect,
+it will need to be restored. A valid entry is
+
+smf::sysinit:/lib/svc/bin/svc.startd >/dev/msglog 2<>/dev/msglog </dev/console
+
+If svc.startd(1M) cannot be run because of missing or corrupt library
+components, then the affected components will need to be replaced, as
+for svc.configd(1M) above.
+
+5. Activating basic networking configuration.
+
+If svc.startd(1M) did not execute successfully, it may also be necessary
+to activate network interfaces manually, such that other hosts can be
+contacted. The service methods can be invoked directly as
+
+# /lib/svc/method/net-loopback
+# /lib/svc/method/net-physical
+
+If these methods fail, a direct invocation of ifconfig(1M) can be
+attempted.
+
+In some scenarios, one may be able to use routeadm(1M) to activate more
+dynamic route management functionality; restoring the default dynamic
+routing behaviour can be done using the '-u' option. (Invoking routeadm
+with no arguments will display which commands must be accessible for the
+current routing configuration to be invoked.) Otherwise, once
+interfaces are up, a default route can be manually added using the
+route(1M) command. On typical IPv4 networks, this invocation would be
+
+# /sbin/route add net default _gateway_IP_
+
+--
+
+(An extended version of this document is available at
+http://sun.com/msg/SMF-8000-QD. That version includes additional
+document references.)
diff --git a/usr/src/cmd/svc/milestone/aggregation b/usr/src/cmd/svc/milestone/aggregation
new file mode 100644
index 0000000000..88317277e3
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/aggregation
@@ -0,0 +1,40 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+fi
+
+case "$1" in
+'start')
+ /sbin/dladm up-aggr
+ ;;
+'stop')
+ /sbin/dladm down-aggr
+ ;;
+esac
diff --git a/usr/src/cmd/svc/milestone/aggregation.xml b/usr/src/cmd/svc/milestone/aggregation.xml
new file mode 100644
index 0000000000..d7f38fafed
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/aggregation.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:net-aggregation'>
+
+<service
+ name='network/aggregation'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/aggregation %m'
+ timeout_seconds='30' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec='/lib/svc/method/aggregation %m'
+ timeout_seconds='30' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ network aggregations
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='dladm' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/boot-archive b/usr/src/cmd/svc/milestone/boot-archive
new file mode 100644
index 0000000000..1a112563af
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/boot-archive
@@ -0,0 +1,79 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/fs_include.sh
+
+#
+# no boot-archive on sparc...yet
+#
+if [ `uname -p` = "sparc" ]; then
+ exit $SMF_EXIT_OK
+fi
+
+#
+# Check the boot archive content against root filesystem.
+# Return failure if they differ.
+#
+if [ "${_INIT_ZONENAME:=`/sbin/zonename`}" != "global" ]; then
+ exit $SMF_EXIT_OK
+fi
+
+#
+# Make sure we return failure only once. If user choose to ignore
+# error, we return success to permit boot to continue. The boot
+# archive will be updated on the subsequent shutdown.
+#
+ERRORFILE=/etc/svc/volatile/boot_archive_error
+if [ -f "${ERRORFILE}" ]; then
+ rm ${ERRORFILE}
+ exit $SMF_EXIT_OK
+fi
+
+#
+# Now check the archive.
+#
+/sbin/bootadm -a update -vn 2> /dev/null
+if [ $? = 0 ]; then
+ exit $SMF_EXIT_OK
+fi
+
+touch ${ERRORFILE}
+cecho ""
+cecho "WARNING - The following files in / differ from the boot archive:"
+
+/sbin/bootadm -a update -vn > /dev/msglog 2> /dev/null
+
+cecho "The recommended action is to reboot and select \"Solaris failsafe\""
+cecho "option from the boot menu. Then follow prompts to update the"
+cecho "boot archive."
+cecho "To continue booting at your own risk, clear the service:"
+cecho " # svcadm clear system/boot-archive"
+cecho ""
+
+exit $SMF_EXIT_ERR_FATAL
diff --git a/usr/src/cmd/svc/milestone/boot-archive.xml b/usr/src/cmd/svc/milestone/boot-archive.xml
new file mode 100644
index 0000000000..ca8f947321
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/boot-archive.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:boot-archive'>
+
+<service
+ name='system/boot-archive'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='root'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/root' />
+ </dependency>
+
+ <!--
+ Start method timeout is typically < 1 sec. We set it larger
+ to account for potential device timeouts.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/boot-archive'
+ timeout_seconds='60' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ check boot archive content
+ </loctext>
+ </common_name>
+ <description>
+ <loctext xml:lang='C'>
+ This service checks if boot archive is
+ in sync with the root filesystem.
+ </loctext>
+ </description>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/console-login b/usr/src/cmd/svc/milestone/console-login
new file mode 100644
index 0000000000..256b651d34
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/console-login
@@ -0,0 +1,68 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# For modifying parameters passed to ttymon, do not edit
+# this script. Instead use svccfg(1m) to modify the SMF
+# repository. For example:
+#
+# # svccfg
+# svc:> select system/console-login
+# svc:/system/console-login> setprop ttymon/terminal_type = "xterm"
+# svc:/system/console-login> exit
+
+FMRI=svc:/system/console-login
+
+getproparg() {
+ val=`svcprop -p $2 $FMRI`
+ [ -n "$val" ] && echo $1 $val
+}
+
+args="-g"
+
+val=`svcprop -p ttymon/device $FMRI`
+# if this isn't set, recover a little
+[ -z "$val" ] && val=/dev/console
+args="$args -d $val"
+
+args="$args `getproparg -l ttymon/label`"
+args="$args `getproparg -T ttymon/terminal_type`"
+args="$args `getproparg -m ttymon/modules`"
+
+val=`svcprop -p ttymon/nohangup $FMRI`
+[ "$val" = "true" ] && args="$args -h"
+
+val=`svcprop -p ttymon/timeout $FMRI`
+[ -n "$val" -a "$val" != "0" ] && args="$args -t $val"
+
+val=`svcprop -p ttymon/prompt $FMRI`
+if [ -n "$val" ]; then
+ prompt=`eval echo $val`
+ exec /usr/lib/saf/ttymon $args -p "`eval echo $prompt` "
+else
+ exec /usr/lib/saf/ttymon $args
+fi
diff --git a/usr/src/cmd/svc/milestone/datalink b/usr/src/cmd/svc/milestone/datalink
new file mode 100644
index 0000000000..5bc22b5a06
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/datalink
@@ -0,0 +1,37 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+fi
+
+v1=`svcprop -p system/reconfigure system/svc/restarter:default`
+if [ "$v1" = "true" -o -f /etc/.UNCONFIGURED ]; then
+ /sbin/dladm init-link -t > /dev/msglog 2>&1
+fi
+/sbin/dladm up-link
diff --git a/usr/src/cmd/svc/milestone/datalink-init b/usr/src/cmd/svc/milestone/datalink-init
new file mode 100644
index 0000000000..6217e2003c
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/datalink-init
@@ -0,0 +1,44 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+#
+# Populate the /etc/links file during a reconfiguration reboot
+#
+
+if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+fi
+
+svcprop -p system/reconfigure system/svc/restarter:default | (
+ read v1
+ if [ "$v1" = "true" ]; then
+ /sbin/dladm init-link > /dev/msglog 2>&1
+ fi
+)
+
+exit 0
diff --git a/usr/src/cmd/svc/milestone/datalink-init.xml b/usr/src/cmd/svc/milestone/datalink-init.xml
new file mode 100644
index 0000000000..2729eaddc2
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/datalink-init.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:datalink-init'>
+
+<service
+ name='network/datalink-init'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='usr'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/usr' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/datalink-init'
+ timeout_seconds='6000' />
+
+ <exec_method
+ type='method'
+ name='refresh'
+ exec='/lib/svc/method/datalink-init'
+ timeout_seconds='6000' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ Solaris datalink initialization
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/datalink.xml b/usr/src/cmd/svc/milestone/datalink.xml
new file mode 100644
index 0000000000..37153eb52d
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/datalink.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:datalink'>
+
+<service
+ name='network/datalink'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='aggregation'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/aggregation' />
+ </dependency>
+
+ <dependent
+ name='data_network'
+ grouping='optional_all'
+ restart_on='none'>
+ <service_fmri value='svc:/network/physical' />
+ </dependent>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/datalink'
+ timeout_seconds='600' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ network datalinks
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='dladm' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/devices-local b/usr/src/cmd/svc/milestone/devices-local
new file mode 100644
index 0000000000..e646594475
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/devices-local
@@ -0,0 +1,77 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+# GLXXX - The SysV copyright should be unnecessary now?
+
+# Initiate the device reconfiguration process in case we need some
+# device links established so that we can successfully perform our
+# remaining standard mounts.
+
+if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+fi
+
+svcprop -q -p system/reconfigure system/svc/restarter:default
+if [ $? -eq 0 ]; then
+ echo 'Configuring devices.' > /dev/msglog 2>&1
+ /usr/sbin/devfsadm >/dev/msglog 2>&1
+ if [ -x /usr/ucb/ucblinks -a -f /usr/ucblib/ucblinks.awk ]; then
+ /usr/ucb/ucblinks >/dev/null 2>&1
+ fi
+
+ #
+ # Flush any existing socket mappings since the major numbers of
+ # the device files may have changed.
+ #
+ /usr/bin/awk '/^[^#]/ { print $1, $2, $3 }' /etc/sock2path | \
+ /sbin/soconfig -f /dev/fd/0 >/dev/null 2>&1
+ /sbin/soconfig -f /etc/sock2path >/dev/null 2>&1
+
+ #
+ # Update kernel driver.conf cache.
+ #
+ /usr/sbin/devfsadm -I
+fi
+
+# Establish the default framebuffer name.
+
+fbdev=`/usr/sbin/prtconf -F 2>/dev/null`
+
+if [ $? -eq 0 ]; then
+ set -- /devices$fbdev*
+ if [ -c $1 ]; then
+ rm -f /dev/fb
+ ln -s $1 /dev/fb
+ fi
+fi
+
+exit 0
diff --git a/usr/src/cmd/svc/milestone/devices-local.xml b/usr/src/cmd/svc/milestone/devices-local.xml
new file mode 100644
index 0000000000..f02a3bbb1c
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/devices-local.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:devices-system'>
+
+<service
+ name='system/device/local'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='usr'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/usr' />
+ </dependency>
+
+ <!--
+ Start method timeout is long to account for devices which
+ take a long time to probe or enumerate.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/devices-local'
+ timeout_seconds='6000' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ Standard Solaris device configuration.
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+<service
+ name='milestone/devices'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='usr'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/device/local' />
+ </dependency>
+
+
+ <exec_method
+ type='method'
+ name='start'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='general' type='framework'>
+ <propval name='startd_duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ device configuration milestone
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/fs-local b/usr/src/cmd/svc/milestone/fs-local
new file mode 100644
index 0000000000..8199f0f1be
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/fs-local
@@ -0,0 +1,85 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+
+# Mount all local filesystems.
+
+cd /; /sbin/mountall -l >/dev/msglog
+rc=$?
+if [ $rc -ne 0 ]; then
+ msg="WARNING: /sbin/mountall -l failed: exit status $rc"
+ echo $msg
+ echo "$SMF_FMRI:" $msg >/dev/msglog
+ exit $SMF_EXIT_ERR_FATAL
+fi
+
+# get rid of transient reboot entry in GRUB menu
+if [ -f /stubboot/boot/grub/menu.lst ]; then
+ /sbin/bootadm -m update_temp -R /stubboot
+else
+ /sbin/bootadm -m update_temp
+fi
+
+#
+# If there are non-global UFS filesystems with quotas, check and enable them.
+#
+
+# vlist is the non-global filesystems in vfstab requesting quotas
+vlist=`/usr/bin/nawk '$1 !~ /^(#|-)/ && $4 == "ufs" {
+ if (match($7, "(^|,)(quota|rq)(,|$)") != 0 &&
+ match($7, "(^|,)global(,|$)") == 0) print $1; }' /etc/vfstab`
+
+if [ -n "$vlist" ]; then
+ # mlist is the filesystems in mnttab that are ufs, mounted rw,
+ # and without quotas turned on
+ mlist=`/usr/sbin/mount -p | /usr/bin/nawk '$4 == "ufs" {
+ if (match($7, "(^|,)ro(,|$)") == 0) print $1; }'`
+
+ # qlist is the intersection of vlist and mlist
+ qlist=`echo "$vlist\n-\n$mlist" | \
+ /usr/bin/nawk '{if ($1 == "-") { mlist = 1; }
+ else if (mlist == 0) { vlist[$1] = 1; }
+ else if (vlist[$1]) { print $1; } }'`
+
+ #
+ # Just check and enable the non-global UFS file systems with quotas
+ # enabled. Note that "quotacheck -a" and "quotaon -a" will try
+ # to process all UFS entries with quotas rather than excluding
+ # the entries with the global option (the global entries are handled
+ # later in another script if the cluster package is installed).
+ #
+ if [ -n "$qlist" ]; then
+ echo 'Checking UFS quotas: \c'
+ /usr/sbin/quotacheck -p $qlist
+ echo 'done.'
+ /usr/sbin/quotaon $qlist
+ fi
+fi
+
+exit $SMF_EXIT_OK
diff --git a/usr/src/cmd/svc/milestone/fs-minimal b/usr/src/cmd/svc/milestone/fs-minimal
new file mode 100644
index 0000000000..d3b09824b1
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/fs-minimal
@@ -0,0 +1,55 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/fs_include.sh
+
+# Mount other file systems to be available in single user mode.
+# Currently, these are /var, /var/adm, /var/run and /tmp. A change
+# here will require a modification to the following programs (and
+# documentation): /sbin/mountall, /sbin/umountall, and
+# /lib/svc/bin/svc.startd.
+
+for fs in /var /var/adm /tmp; do
+ readvfstab $fs < $vfstab
+ if [ -n "$mountp" ]; then
+ mounted $mountp $mntopts $fstype < /etc/mnttab && continue
+ checkfs $fsckdev $fstype $mountp || exit $SMF_EXIT_ERR_FATAL
+ mountfs -O $mountp $fstype $mntopts - ||
+ exit $SMF_EXIT_ERR_FATAL
+ fi
+done
+
+mounted /var/run - tmpfs < /etc/mnttab && exit $SMF_EXIT_OK
+mountfs -O /var/run tmpfs - swap || exit $SMF_EXIT_ERR_FATAL
+
+exit $SMF_EXIT_OK
diff --git a/usr/src/cmd/svc/milestone/fs-root b/usr/src/cmd/svc/milestone/fs-root
new file mode 100644
index 0000000000..85f709156f
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/fs-root
@@ -0,0 +1,181 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+
+# Make sure that the libraries essential to this stage of booting can be found.
+LD_LIBRARY_PATH=/lib; export LD_LIBRARY_PATH
+
+libc_mount() {
+ #
+ # If there is an optimized libc available in /usr that fits this
+ # processor, mount it on top of the base libc.
+ #
+ MOE=`/usr/bin/moe -32 '/usr/lib/libc/$HWCAP'`
+ if [ -n "$MOE" ]; then
+ /usr/sbin/mount | egrep -s "^/lib/libc.so.1 on "
+ if [ $? -ne 0 ]; then
+ /usr/sbin/mount -O -F lofs $MOE /lib/libc.so.1
+ fi
+ fi
+}
+
+libc_psr_mount() {
+ LIBC_MOE_32=`/usr/bin/moe -32 '/platform/sun4v/lib/libc_psr/$HWCAP'`
+ if [ -n "$LIBC_MOE_32" ]; then
+ /usr/sbin/mount | egrep -s "^/platform/sun4v/lib/libc_psr.so.1 on "
+ if [ $? -ne 0 ]; then
+ /usr/sbin/mount -O -F lofs $LIBC_MOE_32 /platform/sun4v/lib/libc_psr.so.1
+ fi
+ fi
+
+ LIBC_MOE_64=`/usr/bin/moe -64 '/platform/sun4v/lib/sparcv9/libc_psr/$HWCAP'`
+ if [ -n "$LIBC_MOE_64" ]; then
+ /usr/sbin/mount | egrep -s "^/platform/sun4v/lib/sparcv9/libc_psr.so.1 on "
+ if [ $? -ne 0 ]; then
+ /usr/sbin/mount -O -F lofs $LIBC_MOE_64 /platform/sun4v/lib/sparcv9/libc_psr.so.1
+ fi
+ fi
+}
+
+#
+# Most of the operations in this script are only necessary in the global
+# zone but due to the way initialization scripts like this are packaged,
+# it needs to currently exist for all zones.
+#
+if [ "${_INIT_ZONENAME:=`/sbin/zonename`}" != "global" ]; then
+ libc_mount
+ exit 0
+fi
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/fs_include.sh
+
+#
+# Root is already mounted (by the kernel), but still needs to be
+# checked, possibly remounted and entered into mnttab. First
+# mount /usr read only if it is a separate file system. This must
+# be done first to allow utilities such as fsck and setmnt to
+# reside on /usr minimizing the space required by the root file
+# system.
+#
+readvfstab "/usr" < $vfstab
+if [ -n "$mountp" ]; then
+ if [ "$fstype" = cachefs ]; then
+ #
+ # Mount without the cache initially. We'll enable it
+ # later at remount time. This lets us avoid
+ # teaching the statically linked mount program about
+ # cachefs. Here we determine the backfstype.
+ # This is not pretty, but we have no tools for parsing
+ # the option string until we get /usr mounted...
+ #
+ case "$mntopts" in
+ *backfstype=nfs*)
+ cfsbacktype=nfs
+ ;;
+ *backfstype=hsfs*)
+ cfsbacktype=hsfs
+ ;;
+ *)
+ msg='invalid vfstab entry for /usr'
+ echo $msg
+ echo "$SMF_FMRI:" $msg >/dev/msglog
+ cfsbacktype=nfs
+ ;;
+ esac
+ mountfs - /usr $cfsbacktype ro $special ||
+ exit $SMF_EXIT_ERR_FATAL
+ else
+ #
+ # Must use -o largefiles here to ensure the
+ # read-only mount does not fail as a result of
+ # having a large file present on /usr. This gives
+ # fsck a chance to fix up the largefiles flag
+ # before we remount /usr read-write.
+ #
+ if [ "x$mntopts" = x- ]; then
+ mntopts='ro,largefiles'
+ else
+ checkopt largefiles $mntopts
+ if [ "x$option" != xlargefiles ]; then
+ mntopts="largefiles,$mntopts"
+ fi
+
+ checkopt ro $mntopts
+ if [ "x$option" != xro ]; then
+ mntopts="ro,$mntopts"
+ fi
+
+ #
+ # Requesting logging on a read-only mount
+ # causes errors to be displayed, so remove
+ # "logging" from the list of options for now.
+ # The read-write mount performed later will
+ # specify the logging option if appropriate.
+ #
+
+ checkopt logging $mntopts
+ if [ "x$option" = xlogging ]; then
+ mntopts="$otherops"
+ fi
+ fi
+
+ mountfs -O /usr $fstype $mntopts - || exit $SMF_EXIT_ERR_FATAL
+ fi
+fi
+
+#
+# Also mount /boot now so that things like keymap.sh can access
+# boot properties through eeprom. Readonly isn't required because
+# /boot (and other pcfs filesystems) aren't fsck'ed at boot yet.
+# Also, we don't account for caching /boot as it must be on a local
+# disk. So what's in vfstab is fine as it stands; just look to see
+# if it's there and avoid the mount if not.
+#
+readvfstab "/boot" < $vfstab
+
+if [ -n "$mountp" ]; then
+ mountfs - /boot $fstype $mntopts - || exit $SMF_EXIT_ERR_FATAL
+fi
+
+#
+# Update kernel driver.conf cache with any additional driver.conf
+# files found on /usr, and device permissions from /etc/minor_perm.
+#
+/usr/sbin/devfsadm -I -P
+
+[ -f /etc/.dynamic_routing ] && /usr/bin/rm -f /etc/.dynamic_routing
+
+libc_mount
+
+libc_psr_mount
+
+exit 0
diff --git a/usr/src/cmd/svc/milestone/fs-usr b/usr/src/cmd/svc/milestone/fs-usr
new file mode 100644
index 0000000000..b2c39bfed4
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/fs-usr
@@ -0,0 +1,124 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/fs_include.sh
+
+#
+# Add physical swap.
+#
+/sbin/swapadd -1
+
+#
+# Check and remount the / (root) file system.
+# For NFS mounts, force the llock option on.
+#
+if [ "${_INIT_ZONENAME:=`/sbin/zonename`}" = "global" ]; then
+ readvfstab / < $vfstab
+ checkfs $fsckdev $fstype $mountp || exit $SMF_EXIT_ERR_FATAL
+ checkopt "llock" $mntopts
+ mntopts='remount'
+
+ [ -n "$otherops" ] && mntopts="${mntopts},${otherops}"
+ [ "$fstype" = nfs ] && mntopts="${mntopts},llock"
+
+ # if root dev is a read-only metadevice then fail
+ case $special in
+ /dev/md/dsk/*)
+ dd if=/dev/null of=$special count=0 >/dev/null 2>&1 ||
+ exit $SMF_EXIT_ERR_FATAL
+ ;;
+ esac
+
+ mountfs -m $mountp $fstype $mntopts - || exit $SMF_EXIT_ERR_FATAL
+fi
+
+#
+# Check and remount the /usr file system (formerly mounted read-only).
+#
+readvfstab /usr < $vfstab
+if [ "$mountp" ]; then
+ if [ "$fstype" = cachefs ]; then
+ mountfs -O $mountp cachefs $mntopts $special ||
+ exit $SMF_EXIT_ERR_FATAL
+ else
+ checkopt ro $mntopts
+ if [ "x$option" != xro ]; then
+ checkfs $fsckdev $fstype $mountp ||
+ exit $SMF_EXIT_ERR_FATAL
+ if [ "x$mntopts" != x- ]; then
+ mntopts="remount,$mntopts"
+ else
+ mntopts="remount"
+ fi
+
+ # if usr dev is a read-only metadevice then fail
+ case $special in
+ /dev/md/dsk/*)
+ dd if=/dev/null of=$special count=0 \
+ >/dev/null 2>&1 || exit $SMF_EXIT_ERR_FATAL
+ ;;
+ esac
+
+ mountfs - /usr $fstype $mntopts - ||
+ exit $SMF_EXIT_ERR_FATAL
+ fi
+ fi
+fi
+
+#
+# Check and mount the /usr/platform file system. This should only be
+# present when a SunOS 5.5 (Solaris 2.5) or greater client is being
+# administered by a SunOS 5.4 or less host.
+#
+readvfstab /usr/platform < $vfstab
+if [ "$mountp" ]; then
+ checkfs $fsckdev $fstype $mountp || exit $SMF_EXIT_ERR_FATAL
+ mountfs - $mountp $fstype $mntopts - || exit $SMF_EXIT_ERR_FATAL
+fi
+
+#
+# Mount the fd file systems if mount point exists.
+#
+readvfstab /dev/fd < $vfstab
+if [ "$mountp" -a -d /dev/fd ]; then
+ mountfs - /dev/fd - - - || exit $SMF_EXIT_ERR_FATAL
+fi
+
+# Clean up existing /etc/dfs/sharetab as there are no shared file systems
+# at this point. This also takes care of a corrupt sharetab.
+
+if [ -f /etc/dfs/sharetab ]; then
+ > /etc/dfs/sharetab
+fi
+
+exit $SMF_EXIT_OK
diff --git a/usr/src/cmd/svc/milestone/identity-domain b/usr/src/cmd/svc/milestone/identity-domain
new file mode 100644
index 0000000000..ea051f60d4
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/identity-domain
@@ -0,0 +1,47 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+
+#
+# Set LDAP/NIS+/NIS domainname if locally configured.
+#
+if [ -f /etc/defaultdomain ]; then
+ mydomain=`cat /etc/defaultdomain`
+ /usr/bin/domainname $mydomain
+ name_service=""
+ [ -f /var/ldap/ldap_client_file ] && name_service="LDAP "
+ [ -f /var/nis/NIS_COLD_START ] && name_service="${name_service}NIS+ "
+ [ -d /var/yp/binding/$mydomain ] && name_service="${name_service}NIS "
+ [ -n "$name_service" ] && \
+ echo "${name_service}domain name is $mydomain" | smf_console
+else
+ echo "/etc/defaultdomain absent: no domain name set"
+fi
+
+exit $SMF_EXIT_OK
diff --git a/usr/src/cmd/svc/milestone/identity-node b/usr/src/cmd/svc/milestone/identity-node
new file mode 100644
index 0000000000..7f88286e57
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/identity-node
@@ -0,0 +1,96 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/net_include.sh
+
+# Make sure that the libraries essential to this stage of booting can be found.
+LD_LIBRARY_PATH=/lib; export LD_LIBRARY_PATH
+
+#
+# If DHCP was used on a primary interface then set the hostname
+# that was returned. If no hostname was returned, set the name
+# to be "unknown". The hostname must be set to something, because
+# tooltalk will hang unless the name can be locally resolved.
+# Sendmail also requires the name to be resolvable locally.
+# Later, in inetsvc, we create a name "unknown" and create a entry
+# in the local /etc/inet/hosts file pairing "unknown" with the IP
+# address assigned by DHCP. The use of bootparams as a fallback
+# for all non-DHCP cases provides compatibility with the
+# behavior of the system before netstrategy was introduced.
+#
+# For non-global zones, fall back to the `uname -n` value provided by the
+# kernel if /etc/nodename does not exist, as is expected on an initial boot.
+#
+
+smf_netstrategy
+
+case "$_INIT_NET_STRATEGY" in
+ "dhcp") hostname=`/sbin/dhcpinfo Hostname` ;;
+ "rarp") hostname=`/sbin/hostconfig -h -p bootparams`
+ trap 'intr=1' 2 3
+ while [ -z "$hostname" -a ! -f /etc/.UNCONFIGURED -a \
+ -z "$intr" ]; do
+ echo "re-trying host configuration..."
+ # Restrict this to IPv4 interfaces.
+ /sbin/ifconfig -adD4 auto-revarp up
+ hostname=`/sbin/hostconfig -h -p bootparams`
+ done
+ trap 2 3 ;;
+ "none") hostname="`shcat /etc/nodename 2>/dev/null`"
+ if [ -z "$hostname" ]; then
+ if [ "${_INIT_ZONENAME:=`/sbin/zonename`}" = "global" ]
+ then
+ hostname=`/sbin/hostconfig -h -p bootparams`
+ else
+ hostname=`/sbin/uname -n`
+ fi
+ fi ;;
+esac
+
+#
+# If the netstrategy was unsuccessful and we haven't got a locally configured
+# name, default to "unknown"
+#
+if [ -z "$hostname" ]; then
+ hostname="`shcat /etc/nodename 2>/dev/null`"
+ if [ -z "$hostname" ]; then
+ hostname="unknown"
+ fi
+fi
+
+/sbin/uname -S $hostname
+
+echo "Hostname: `/sbin/uname -n`" > /dev/msglog
+
+# Reset the library path now that we are past the critical stage
+unset LD_LIBRARY_PATH
diff --git a/usr/src/cmd/svc/milestone/identity.xml b/usr/src/cmd/svc/milestone/identity.xml
new file mode 100644
index 0000000000..3fdcf56496
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/identity.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:identity'>
+
+<service
+ name='system/identity'
+ type='service'
+ version='1'>
+
+ <dependency
+ name='loopback-network'
+ grouping='require_any'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/loopback' />
+ </dependency>
+
+ <dependency
+ name='physical-network'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/physical' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='60' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <instance name='node' enabled='true'>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/identity-node'
+ timeout_seconds='60' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ system identity (nodename)
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='nodename' section='4'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+
+ </instance>
+
+ <instance name='domain' enabled='false'>
+
+ <dependency
+ name='fs'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/minimal' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/identity-domain'
+ timeout_seconds='60' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ system identity (domainname)
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='domainname' section='1M'
+ manpath='/usr/share/man' />
+ <manpage title='defaultdomain' section='4'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+
+ </instance>
+
+ <stability value='Unstable' />
+
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/local-fs.xml b/usr/src/cmd/svc/milestone/local-fs.xml
new file mode 100644
index 0000000000..a523a63b06
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/local-fs.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:filesystem-local'>
+
+<service
+ name='system/filesystem/local'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='single-user'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/single-user' />
+ </dependency>
+
+ <dependency
+ name='root'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/minimal' />
+ </dependency>
+
+ <!--
+ Start method timeout is infinite to handle potentially unbounded
+ fsck times.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/fs-local'
+ timeout_seconds='0' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ local file system mounts
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/make-console-login-xml b/usr/src/cmd/svc/milestone/make-console-login-xml
new file mode 100644
index 0000000000..b6e3f75f7c
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/make-console-login-xml
@@ -0,0 +1,154 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+case "$MACH" in
+ sparc)
+ TTY_TYPE=sun
+ ;;
+ i386)
+ TTY_TYPE=sun-color
+ ;;
+ *)
+ echo "Unknown machine type $MACH" >&2
+ exit 1
+esac
+
+cat >console-login.xml <<EOF
+<?xml version="1.0"?>
+<!--
+ Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+
+<service_bundle type='manifest' name='SUNWcsr:console'>
+
+<service
+ name='system/console-login'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='fs'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/minimal' />
+ </dependency>
+
+ <dependency
+ name='identity'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/identity:node' />
+ </dependency>
+
+ <dependency
+ name='utmpx'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/utmp:default' />
+ </dependency>
+
+ <!-- Note that console-login should be dependent on any services
+ that may need to use the console. This requirement can be met
+ by establishing a dependency on milestone/sysconfig which,
+ among other things, collects such dependencies.
+ -->
+ <dependency
+ name='sysconfig'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/sysconfig' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/console-login'
+ timeout_seconds='0' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':kill -9'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='child' />
+ <propval name='ignore_error' type='astring'
+ value='core,signal' />
+ <propval name='utmpx_prefix' type='astring' value='co' />
+ </property_group>
+
+ <!-- these are passed to ttymon in the method script -->
+ <property_group name='ttymon' type='application'>
+ <propval name='device' type='astring' value='/dev/console' />
+ <propval name='label' type='astring' value='console' />
+ <propval name='timeout' type='count' value='0' />
+ <propval name='nohangup' type='boolean' value='true' />
+ <propval name='modules' type='astring'
+ value='ldterm,ttcompat' />
+ <propval name='prompt' type='astring'
+ value='\`uname -n\` console login:' />
+ <propval name='terminal_type' type='astring'
+ value='$TTY_TYPE' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+Console login
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='ttymon' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
+EOF
diff --git a/usr/src/cmd/svc/milestone/manifest-import b/usr/src/cmd/svc/milestone/manifest-import
new file mode 100644
index 0000000000..d5d974fac6
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/manifest-import
@@ -0,0 +1,530 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+# 0. Initialization.
+
+[ -f /lib/svc/share/smf_include.sh ] || exit 1
+
+activity=false
+
+X=
+while getopts n opt; do
+ case $opt in
+ n) X=echo;;
+ ?) echo "Usage: /lib/svc/method/manifest-import [-n]\n"
+ exit 2;;
+ esac
+done
+
+svccfg_apply () {
+ $X /usr/sbin/svccfg apply $1
+ if [ $? -ne 0 ]; then
+ echo "WARNING: svccfg apply $1 failed" | tee /dev/msglog
+ fi
+}
+
+svccfg_import () {
+ $X /usr/sbin/svccfg import $1 2>>/tmp/manifest_import.$$
+ if [ $? -ne 0 ]; then
+ echo > /dev/msglog
+ echo "WARNING: svccfg import $1 failed" | tee /dev/msglog
+ fi
+}
+
+prophist_upgrade () {
+ #
+ # A property has changed in the manifest that we wish to propagate into
+ # the repository during manifest import. We don't want to pollute
+ # manifests with overrides, so handle explicitly here.
+ #
+ fmri=$1
+ pgrp=$2
+ prop=$3
+ nval=$4
+ shift 4
+
+ /lib/svc/bin/prophist upgrade -e $fmri -g $pgrp -p $prop -n "$nval" \
+ "$@"
+ [ $? = 0 ] && instance_refresh $fmri
+}
+
+prophist_override () {
+ #
+ # A property has changed in the manifest that we wish to propagate
+ # into the repository during manifest import.
+ #
+ fmri=$1
+ pgrp=$2
+ prop=$3
+ nval=$4
+
+ /lib/svc/bin/prophist overwrite -e $fmri -g $pgrp -p $prop -n "$nval"
+ [ $? = 0 ] && instance_refresh $fmri
+}
+
+prophist_delete_svc_pg () {
+ #
+ # Certain property groups have migrated from the service level to the
+ # instance level. We don't care if they are at both, as the instance
+ # level will trump. But having neither could be bad. So check and if
+ # the given pg exists at both levels, delete the service-level one only.
+ #
+ service=$1
+ instance=$2
+ property_group=$3
+
+ /usr/bin/svcprop -q -p $property_group $service
+ res1=$?
+ /usr/bin/svcprop -q -c -p $property_group $service:$instance
+ res2=$?
+ if [ $res1 -eq 0 -a $res2 -eq 0 ]; then
+ /lib/svc/bin/prophist delete -e $service -g $property_group
+ instance_refresh $service:$instance
+ fi
+}
+
+prophist_delete_dependency () {
+ #
+ # Some services have stale dependencies that need to be removed.
+ # This is done by removing the dependency property group.
+ #
+ fmri=$1
+ property_group=$2
+
+ /usr/bin/svcprop -q -c -p $property_group $fmri
+ if [ $? -eq 0 ]; then
+ /lib/svc/bin/prophist delete -e $fmri -g $property_group
+ else
+ [ -n "$_MFST_DEBUG" ] && \
+ echo "Dependency $property_group not defined on $fmri"
+ fi
+}
+
+prophist_delete_pg () {
+ # Delete obsolete property groups from old manifests. Instances
+ # should be refreshed for changes to take effect.
+ fmri=$1
+ pg=$2
+
+ /usr/bin/svcprop -Cqp $pg $fmri &&
+ /lib/svc/bin/prophist delete -e $fmri -g $pg
+}
+
+prophist_addprop () {
+ #
+ # If a property doesn't exist, create it. Instances should be
+ # refreshed for changes to take effect.
+ #
+ if [ $# -lt 6 ]; then
+ echo "prophist_addprop(): Insufficient arguments ($*)."
+ exit 1
+ fi
+
+ fmri=$1
+ /usr/bin/svcprop -q $fmri || return
+
+ pg=$2
+ pgtype=$3
+ prop=$4
+
+ /usr/bin/svcprop -Cqp $pg/$prop $fmri && return
+
+ shift 4
+
+ /usr/bin/svcprop -Cqp $pg $fmri || \
+ /usr/sbin/svccfg -s $fmri addpg $pg $pgtype
+ /usr/sbin/svccfg -s $fmri setprop $pg/$prop = $*
+}
+
+prophist_addmeth () {
+ #
+ # If a method doesn't exist, create it. Instances should be refreshed
+ # for changes to take effect.
+ #
+ if [ $# -ne 4 ]; then
+ echo "prophist_addmeth(): Insufficient arguments ($*)"
+ exit 1
+ fi
+
+ fmri=$1
+ /usr/bin/svcprop -q $fmri || return
+
+ name=$2
+ /usr/bin/svcprop -Cqp $name $fmri && return
+
+ exec=$3
+ to=$4
+
+ /usr/sbin/svccfg -s $fmri <<END
+ addpg $name method
+ setprop $name/type = astring: method
+ setprop $name/exec = astring: "$exec"
+ setprop $name/timeout_seconds = count: $to
+END
+}
+
+prophist_adddep () {
+ #
+ # If a dependency doesn't exist, create it. Instances should be
+ # refreshed for changes to take effect.
+ #
+ if [ $# -lt 6 ]; then
+ echo "prophist_adddep(): Insufficient arguments ($*)"
+ exit 1
+ fi
+
+ fmri=$1
+ /usr/bin/svcprop -q $fmri || return
+
+ name=$2
+ /usr/bin/svcprop -Cqp $name $fmri && return
+
+ type=$3
+ group=$4
+ ro=$5
+ shift 5
+
+ /usr/sbin/svccfg -s $fmri <<END
+ addpg $name dependency
+ setprop $name/type = astring: $type
+ setprop $name/grouping = astring: $group
+ setprop $name/restart_on = astring: $ro
+ setprop $name/entities = fmri: $*
+END
+}
+
+prophist_adddpt () {
+ #
+ # If a dependent doesn't exist, create it. Instances should be
+ # refresh for changes to take effect.
+ #
+ if [ $# -ne 5 ]; then
+ echo "prophist_adddpt(): Incorrect arguments ($*).\n"
+ exit 1
+ fi
+
+ fmri=$1
+ /usr/bin/svcprop -q $fmri || return
+
+ name=$2
+ /usr/bin/svcprop -Cqp dependents/$name $fmri && return
+
+ group=$3
+ ro=$4
+ target=$5
+
+ prophist_addprop $fmri dependents framework $name fmri: $target
+ prophist_adddep $target $name service $group $ro $fmri
+}
+
+instance_refresh () {
+ echo $1 >> /etc/svc/volatile/refreshes
+}
+
+refresh_instances () {
+ [ -r /etc/svc/volatile/refreshes ] && {
+ sort -u /etc/svc/volatile/refreshes | xargs -l svcadm refresh
+ }
+}
+
+instance_clear () {
+ echo $1 >> /etc/svc/volatile/clears
+}
+
+clear_conditionally () {
+ [ "`/usr/bin/svcprop -p restarter/state $1`" = "maintenance" ] && \
+ /usr/sbin/svcadm clear $1
+}
+
+clear_instances () {
+ [ -r /etc/svc/volatile/clears ] && {
+ for inst in `/usr/bin/sort -u /etc/svc/volatile/clears`; do
+ clear_conditionally $inst
+ done
+ }
+}
+
+prepare_last_import () {
+ # Preserve the five hashes for the profiles: generic (two
+ # cases), platform (uname -i, uname -m outputs), and site.
+
+ gn="var_svc_profile_generic_open_xml"
+ gh=`/usr/bin/svcprop -p ${gn}/md5sum smf/manifest 2>/dev/null`
+ [ $? = 0 ] || gh=""
+
+ gln="var_svc_profile_generic_limited_net_xml"
+ glh=`/usr/bin/svcprop -p ${gln}/md5sum smf/manifest 2>/dev/null`
+ [ $? = 0 ] || glh=""
+
+ LC_ALL=C pl=`/usr/bin/uname -i | /usr/bin/tr , _`
+ pln="var_svc_profile_platform_${pl}_xml"
+ plh=`/usr/bin/svcprop -p ${pln}/md5sum smf/manifest 2>/dev/null`
+ [ $? = 0 ] || plh=""
+
+ LC_ALL=C plm=`/usr/bin/uname -m | /usr/bin/tr , _`
+ if [ $plm != $pl ]; then
+ plmn="var_svc_profile_platform_${plm}_xml"
+ plmh=`/usr/bin/svcprop -p ${plmn}/md5sum smf/manifest \
+ 2>/dev/null`
+ [ $? = 0 ] || plmh=""
+ else
+ plmh=""
+ fi
+
+ sn="var_svc_profile_site_xml"
+ sh=`/usr/bin/svcprop -p $sn/md5sum smf/manifest 2>/dev/null`
+ [ $? = 0 ] || sh=""
+
+ # Remove all manifest hashes.
+ /usr/sbin/svccfg delete smf/manifest
+
+ # Restore smf/manifest and hash values.
+ /usr/sbin/svccfg add smf/manifest
+ [ -n "$gh" ] && {
+ echo "Preserving generic hash ($gh)."
+ /usr/sbin/svccfg -s smf/manifest addpg ${gn} framework
+ /usr/sbin/svccfg -s smf/manifest setprop ${gn}/md5sum = \
+ opaque: $gh
+ }
+ [ -n "$glh" ] && {
+ echo "Preserving generic_limited hash ($glh)."
+ /usr/sbin/svccfg -s smf/manifest addpg ${gln} framework
+ /usr/sbin/svccfg -s smf/manifest setprop ${gln}/md5sum = \
+ opaque: $glh
+ }
+ [ -n "$plh" ] && {
+ echo "Preserving platform hash ($plh)."
+ /usr/sbin/svccfg -s smf/manifest addpg $pln framework
+ /usr/sbin/svccfg -s smf/manifest setprop $pln/md5sum = \
+ opaque: $plh
+ }
+ [ -n "$plmh" ] && {
+ echo "Preserving platform hash ($plmh)."
+ /usr/sbin/svccfg -s smf/manifest addpg $plmn framework
+ /usr/sbin/svccfg -s smf/manifest setprop $plmn/md5sum = \
+ opaque: $plmh
+ }
+ [ -n "$sh" ] && {
+ echo "Preserving site hash ($sh)."
+ /usr/sbin/svccfg -s smf/manifest addpg $sn framework
+ /usr/sbin/svccfg -s smf/manifest setprop $sn/md5sum = \
+ opaque: $sh
+ }
+}
+
+SVCCFG_CHECKHASH=1 export SVCCFG_CHECKHASH
+
+#
+# 0. Clean up repository
+#
+if [ -z "$X" ] && /usr/bin/svcprop smf/manifest 2>/dev/null |
+ /usr/bin/grep '^ar_svc_[^/]*/md5sum opaque ' >/dev/null
+then
+ set -- `
+ /usr/bin/svcprop smf/manifest 2>/dev/null |
+ /usr/bin/grep '^ar_svc[^/]*/md5sum opaque ' |
+ /usr/bin/tr '/' ' ' |
+ while read pg prop type value; do
+ echo "$pg/$value"
+ done
+ `
+ backup=`echo "$#/$#" | sed 's/.//g'`
+ fwidth=`echo "$#\c" | wc -c`
+
+ echo "Converting obsolete repository entries: \c" > /dev/msglog
+ i=1; n=$#
+ while [ $# -gt 0 ]; do
+ printf "%${fwidth}s/%${fwidth}s" $i $n > /dev/msglog
+ echo $1 | sed 's:/: :' | (
+ read pg value
+
+ (echo "select /smf/manifest"; echo "delpg v$pg") |
+ /usr/sbin/svccfg 2>/dev/null >/dev/null
+ (echo "select /smf/manifest"; echo "delpg $pg") |
+ /usr/sbin/svccfg 2>/dev/null >/dev/null
+ (echo "select /smf/manifest";
+ echo "addpg v$pg framework") |
+ /usr/sbin/svccfg 2>/dev/null >/dev/null
+ (echo "select /smf/manifest";
+ echo "setprop v$pg/md5sum = opaque: $value") |
+ /usr/sbin/svccfg 2>/dev/null >/dev/null
+ )
+ i=`expr $i + 1`
+ shift
+ echo "$backup\c" > /dev/msglog
+ done
+ echo > /dev/msglog
+ echo "Converted $n obsolete repository entries"
+ activity=true
+fi
+
+#
+# If no last-import snapshots are present on critical services, then we are
+# creating the last-import snapshots for the first time post upgrade.
+#
+create_last_import=1
+for svc in single-user multi-user multi-user-server; do
+ if /usr/bin/svcprop -s last-import svc:/milestone/$svc:default \
+ >/dev/null 2>&1
+ then
+ create_last_import=
+ break
+ fi
+done
+
+if [ $create_last_import ]; then
+ echo "Last import snapshots absent; preparing for re-import"
+ prepare_last_import
+
+ #
+ # Apply property history files.
+ #
+ echo "Upgrade detected; applying property history"
+ for phist in /var/svc/profile/prophist.*; do
+ /lib/svc/bin/prophist hash $phist
+ if [ $? = 3 ]; then
+ echo "Sourcing $phist"
+ . $phist
+ fi
+ done
+
+ /usr/bin/rm -f /var/svc/profile/.upgrade_prophist
+fi
+
+#
+# 2. Manifest import. Application directories first, then
+# site-specific manifests.
+#
+nonsite_dirs=`/usr/bin/find /var/svc/manifest/* -name site -prune -o -type d \
+ -print -prune`
+
+nonsite_manifests=`/lib/svc/bin/mfstscan $nonsite_dirs`
+site_manifests=`/lib/svc/bin/mfstscan /var/svc/manifest/site`
+
+manifests="$nonsite_manifests $site_manifests"
+
+[ -n "$_MFST_DEBUG" ] && {
+ echo "Changed manifests to import:"
+ for m in $manifests; do echo " $m"; done
+}
+
+#
+# 2b. Import the manifests while giving a running display of imports on
+# console, and a final count in the logfile.
+#
+if [ -n "$nonsite_manifests" -o -n "$site_manifests" ]; then
+ rm -f /tmp/manifest_import.$$
+
+ set -- $manifests
+ backup=`echo "$#/$#" | sed 's/.//g'`
+ fwidth=`echo "$#\c" | wc -c`
+
+ echo "Loading smf(5) service descriptions: \c" > /dev/msglog
+
+ i=1; n=$#
+ while [ $# -gt 0 ]; do
+ printf "%${fwidth}s/%${fwidth}s" $i $n > /dev/msglog
+ svccfg_import $1
+ i=`expr $i + 1`
+ shift
+ echo "$backup\c" > /dev/msglog
+ done
+
+ echo > /dev/msglog
+ echo "Loaded $n smf(5) service descriptions"
+ activity=true
+
+ if [ -s /tmp/manifest_import.$$ ]; then
+ echo "svccfg warnings:"
+ cat /tmp/manifest_import.$$
+
+ msg="svccfg import warnings. See"
+ msg="$msg /var/svc/log/system-manifest-import:default.log ."
+ echo $msg > /dev/msglog
+ fi
+ rm -f /tmp/manifest_import.$$
+fi
+
+#
+# 3. Profile application. We must create the platform profile upon
+# first boot, as we may be a diskless client of a platform or
+# architecture distinct from our NFS server.
+#
+svccfg_apply /var/svc/profile/generic.xml
+
+if [ ! -f /var/svc/profile/platform.xml ]; then
+ this_karch=`uname -m`
+ this_plat=`uname -i`
+
+ if [ -f /var/svc/profile/platform_$this_plat.xml ]; then
+ platform_profile=platform_$this_plat.xml
+ elif [ -f /var/svc/profile/platform_$this_karch.xml ]; then
+ platform_profile=platform_$this_karch.xml
+ else
+ platform_profile=platform_none.xml
+ fi
+
+ ln -s $platform_profile /var/svc/profile/platform.xml
+fi
+
+svccfg_apply /var/svc/profile/platform.xml
+
+#
+# 4. Upgrade handling. The upgrade file generally consists of a series
+# of svcadm(1M) and svccfg(1M) commands.
+#
+(
+ unset SVCCFG_CHECKHASH
+
+ if [ -f /var/svc/profile/upgrade ]; then
+ . /var/svc/profile/upgrade
+
+ /usr/bin/mv /var/svc/profile/upgrade \
+ /var/svc/profile/upgrade.app.`date +\%Y\%m\%d\%H\%M\%S`
+ activity=true
+ fi
+)
+
+#
+# 5. Site profile is applied last to give administrator the final say.
+#
+if [ -f /var/svc/profile/site.xml ]; then
+ svccfg_apply /var/svc/profile/site.xml
+fi
+
+#
+# 6. Final actions.
+#
+refresh_instances
+clear_instances
+
+if $activity; then
+ svcadm _smf_backup "manifest_import" || true
+fi
+
+exit 0
diff --git a/usr/src/cmd/svc/milestone/manifest-import.xml b/usr/src/cmd/svc/milestone/manifest-import.xml
new file mode 100644
index 0000000000..2ef9021401
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/manifest-import.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:import'>
+
+<service
+ name='system/manifest-import'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='fs'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/minimal' />
+ </dependency>
+
+ <!--
+ Manifest import has been observed at >10 minute times on systems
+ with slow disks. A value with a reasonable margin of error on
+ that observation is required.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/manifest-import'
+ timeout_seconds='1800' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ service manifest import
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='smf_bootstrap' section='5'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/minimal-fs.xml b/usr/src/cmd/svc/milestone/minimal-fs.xml
new file mode 100644
index 0000000000..b7af22bfcd
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/minimal-fs.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:filesystem-minimal'>
+
+<service
+ name='system/filesystem/minimal'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='usr'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/usr' />
+ </dependency>
+
+ <dependency
+ name='devices'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/device/local' />
+ </dependency>
+
+ <!--
+ Start method timeout is infinite to handle potentially unbounded
+ fsck times.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/fs-minimal'
+ timeout_seconds='0' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ minimal file system mounts
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/multi-user-server.xml b/usr/src/cmd/svc/milestone/multi-user-server.xml
new file mode 100644
index 0000000000..a4e48fd9ac
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/multi-user-server.xml
@@ -0,0 +1,112 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+
+ Service manifest for the multi-user-server milestone.
+
+ milestone/multi-user-server corresponds to the set of services
+ required to commence run level 3, plus legacy processes started
+ in /etc/rc3.d.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:multi-user-server'>
+
+<service
+ name='milestone/multi-user-server'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <!-- milestones required -->
+
+ <dependency
+ name='multi-user'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/multi-user' />
+ </dependency>
+
+ <!-- services required -->
+
+ <!--
+ We can't know how long legacy init scripts will take to run. Set
+ the timeout value high enough to allow them to take their time
+ to start.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/sbin/rc3'
+ timeout_seconds='1800'>
+ <propval
+ name='restart_on'
+ type='astring'
+ value='external_fault' />
+ </exec_method>
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <!--
+ The init scripts should never automatically be run twice.
+ duration=transient tells svc.startd not to restart if no
+ processes are left running, and timeout_retry=false tells
+ svc.startd not to retry the start method if it times out.
+ -->
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ <propval name='timeout_retry' type='boolean' value='false' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+multi-user plus exports milestone
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='init' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/multi-user.xml b/usr/src/cmd/svc/milestone/multi-user.xml
new file mode 100644
index 0000000000..a5b6698e47
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/multi-user.xml
@@ -0,0 +1,144 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+
+ Service manifest for the multi-user milestone.
+
+ milestone/multi-user corresponds to the set of services required
+ to commence run level 2, plus legacy processes started in
+ /etc/rc2.d.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:multi-user'>
+
+<service
+ name='milestone/multi-user'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='milestones'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/single-user' />
+ <service_fmri value='svc:/milestone/sysconfig' />
+ <service_fmri value='svc:/milestone/name-services' />
+ </dependency>
+
+ <dependency
+ name='fs'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/local' />
+ </dependency>
+
+ <!--
+ We should let kdmconfig give up the console before graphical
+ logins start out of multi-user.
+ -->
+ <dependency
+ name='kdmconfig'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/platform/i86pc/kdmconfig:default' />
+ </dependency>
+
+ <dependency
+ name='rpcbind'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/rpc/bind' />
+ </dependency>
+
+ <dependency
+ name='syslog'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/system-log' />
+ </dependency>
+
+ <!--
+ We can't know how long legacy init scripts will take to run. Set
+ the timeout value high enough to allow them to take their time
+ to start.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/sbin/rc2 start'
+ timeout_seconds='1800'>
+ <propval name='restart_on'
+ type='astring' value='external_fault' />
+ </exec_method>
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <!--
+ The init scripts should never automatically be run twice.
+ duration=transient tells svc.startd not to restart if no
+ processes are left running, and timeout_retry=false tells
+ svc.startd not to retry the start method if it times out.
+ -->
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ <propval name='timeout_retry' type='boolean' value='false' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+multi-user milestone
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='init' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/name-services.xml b/usr/src/cmd/svc/milestone/name-services.xml
new file mode 100644
index 0000000000..25b2512d87
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/name-services.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:name-services'>
+
+<service
+ name='milestone/name-services'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance />
+
+ <dependency
+ name='dns'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/dns/client' />
+ </dependency>
+
+ <dependency
+ name='ldap'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/ldap/client' />
+ </dependency>
+
+ <dependency
+ name='nis_client'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/nis/client' />
+ </dependency>
+
+ <dependency
+ name='nisplus'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/rpc/nisplus' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <property_group name='general' type='framework'>
+ <propval name='startd_duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ name services milestone
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/net-init b/usr/src/cmd/svc/milestone/net-init
new file mode 100644
index 0000000000..de4e4fb4f2
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/net-init
@@ -0,0 +1,362 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+#
+# This is the second phase of TCP/IP configuration. The first part is
+# run by the /lib/svc/method/net-physical script (the svc:/network/physical
+# service) and includes configuring the interfaces and setting the machine's
+# hostname. This script (the svc:/network/initial service), does all
+# configuration that can be done before name services are started. This
+# includes configuring IP routing, and setting any tunable parameters.
+# The third part, run by the /lib/svc/method/net-svc script (the
+# svc:/network/service service), does all configuration that may require
+# name services. This includes a final re-configuration of the interfaces.
+#
+
+. /lib/svc/share/smf_include.sh
+
+case "$1" in
+'start')
+ #
+ # In a zone we need this service to be up, but all of the work
+ # it tries to do is irrelevant (and will actually lead to the service
+ # failing if we try to do it), so just bail out.
+ #
+ if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+ fi
+ ;; # Fall through -- rest of script is the initialization code
+
+'stop')
+ if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+ fi
+ #
+ # If we were routing dynamically, we will note this with
+ # the .dynamic_routing file, so that we can leave the routes
+ # in place without thinking they're static route entries
+ # when we come back into states 2 or 3.
+ #
+ if /usr/bin/pgrep -x -u 0 'in.routed|in.rdisc' >/dev/null 2>&1; then
+ /usr/bin/pkill -z `/sbin/zonename` -x -u 0 'in.routed|in.rdisc'
+ > /etc/.dynamic_routing
+ fi
+ /usr/bin/pkill -z `/sbin/zonename` -x -u 0 'in.ndpd|in.ripngd'
+ exit 0
+ ;;
+
+*)
+ echo "Usage: $0 { start | stop }"
+ exit 1
+ ;;
+esac
+
+# Configure IPv6 Default Address Selection.
+if [ -f /etc/inet/ipaddrsel.conf ]; then
+ /usr/sbin/ipaddrsel -f /etc/inet/ipaddrsel.conf
+fi
+
+/usr/sbin/ifconfig -a6u >/etc/svc/volatile/ifconfig.$$
+numv6ifs=`/usr/bin/grep -c inet6 /etc/svc/volatile/ifconfig.$$`
+if [ $numv6ifs -gt 1 ]; then
+ #
+ # Add a static route for multicast packets out of a link-local
+ # interface, although would like to specify multicast interface using
+ # an interface name!
+ #
+ set -- `/usr/bin/awk '
+ /inet6 fe80:/ {
+ print substr($2, 1, index($2, "/") - 1)
+ }' /etc/svc/volatile/ifconfig.$$`
+
+ if [ -n "$1" ]; then
+ echo "Setting default IPv6 interface for multicast:" \
+ "add net ff00::/8: gateway $1"
+ /usr/sbin/route -n add -interface -inet6 "ff00::/8" "$1" \
+ >/dev/null
+ fi
+fi
+/usr/bin/rm -f /etc/svc/volatile/ifconfig.$$
+
+#
+# Now that /usr is mounted, see if in.mpathd needs to be started by firing it
+# up in "adopt" mode; if there are no interfaces it needs to manage, it will
+# automatically exit. Note that it may already be running if we're not
+# executing as part of system boot.
+#
+/usr/bin/pgrep -x -u 0 in.mpathd >/dev/null 2>&1 || /usr/lib/inet/in.mpathd -a
+
+#
+# Pass to the kernel the list of supported IPsec protocols and algorithms.
+# This will not cause IPsec to be loaded.
+#
+/usr/sbin/ipsecalgs -s
+
+#
+# Initialize IPsec only if ipsecinit.conf exists. Otherwise, save the
+# kernel memory that'll be consumed if IPsec is loaded. See below for more
+# IPsec-related commands.
+#
+if [ -f /etc/inet/ipsecinit.conf ] ; then
+ /usr/sbin/ipsecconf -qa /etc/inet/ipsecinit.conf
+fi
+
+#
+# Set the RFC 1948 entropy, regardless of if I'm using it or not. If present,
+# use the encrypted root password as a source of entropy. Otherwise,
+# just use the pre-set (and hopefully difficult to guess) entropy that
+# tcp used when it loaded.
+#
+encr=`/usr/bin/awk -F: '/^root:/ {print $2}' /etc/shadow`
+[ -z "$encr" ] || /usr/sbin/ndd -set /dev/tcp tcp_1948_phrase $encr
+unset encr
+
+#
+# Get values for TCP_STRONG_ISS, ACCEPT6TO4RELAY and RELAY6TO4ADDR.
+#
+[ -f /etc/default/inetinit ] && . /etc/default/inetinit
+
+#
+# Set TCP ISS generation. By default the ISS generation is
+# time + random()-delta. This might not be strong enough for some users.
+# See /etc/default/inetinit for settings and further info on TCP_STRONG_ISS.
+# If not set, use TCP's internal default setting.
+#
+if [ $TCP_STRONG_ISS ]; then
+ /usr/sbin/ndd -set /dev/tcp tcp_strong_iss $TCP_STRONG_ISS
+fi
+
+#
+# Configure default IPv4 routers using the local "/etc/defaultrouter"
+# configuration file. The file can contain the hostnames or IP
+# addresses of one or more default routers. If hostnames are used,
+# each hostname must also be listed in the local "/etc/hosts" file
+# because NIS and NIS+ are not running at the time that this script is
+# run. Each router name or address is listed on a single line by
+# itself in the file. Anything else on that line after the router's
+# name or address is ignored. Lines that begin with "#" are
+# considered comments and ignored.
+#
+# The default routes listed in the "/etc/defaultrouter" file will
+# replace those added by the kernel during diskless booting. An
+# empty "/etc/defaultrouter" file will cause the default route
+# added by the kernel to be deleted.
+#
+# Note that the default router file is ignored if we received routes
+# from a DHCP server. Our policy is to always trust DHCP over local
+# administration.
+#
+smf_netstrategy
+
+if [ "$_INIT_NET_STRATEGY" = "dhcp" ] && [ -n "`/sbin/dhcpinfo Router`" ]; then
+ defrouters=`/sbin/dhcpinfo Router`
+elif [ -f /etc/defaultrouter ]; then
+ defrouters=`/usr/bin/grep -v \^\# /etc/defaultrouter | \
+ /usr/bin/awk '{print $1}'`
+ if [ -n "$defrouters" ]; then
+ #
+ # We want the default router(s) listed in /etc/defaultrouter
+ # to replace the one added from the BOOTPARAMS WHOAMI response
+ # but we must avoid flushing the last route between the running
+ # system and its /usr file system.
+ #
+
+ # First, remember the original route.
+ shift $#
+ set -- `/usr/bin/netstat -rn -f inet | /usr/bin/grep '^default'`
+ route_IP="$2"
+
+ #
+ # Next, add those from /etc/defaultrouter. While doing this,
+ # if one of the routes we add is for the route previously
+ # added as a result of the BOOTPARAMS response, we will see
+ # a message of the form:
+ # "add net default: gateway a.b.c.d: entry exists"
+ #
+ do_delete=yes
+ for router in $defrouters; do
+ set -- `/usr/sbin/route -n add default -gateway $router`
+ [ $? -ne 0 -a "x$5" = "x$route_IP:" ] && do_delete=no
+ done
+
+ #
+ # Finally, delete the original default route unless it was
+ # also listed in the defaultrouter file.
+ #
+ if [ -n "$route_IP" -a $do_delete = yes ]; then
+ /usr/sbin/route -n delete default -gateway $route_IP \
+ >/dev/null
+ fi
+ else
+ /usr/sbin/route -fn > /dev/null
+ fi
+else
+ defrouters=
+fi
+
+#
+# Use routeadm(1M) to configure forwarding and launch routing daemons for
+# IPv4 and IPv6 based on preset values. These settings only apply to the
+# global zone. For IPv4 dynamic routing, the system will default to
+# disabled if a default route was previously added via BOOTP, DHCP, or
+# the /etc/defaultrouter file. routeadm also starts in.ndpd.
+#
+if [ ! -f /etc/.dynamic_routing ] && [ -z "$defrouters" ]; then
+ #
+ # No default routes were setup by "route" command above.
+ # Check the kernel routing table for any other default
+ # routes.
+ #
+ /usr/bin/netstat -rn -f inet | \
+ /usr/bin/grep default >/dev/null 2>&1 && defrouters=yes
+fi
+[ -f /etc/.dynamic_routing ] && /usr/bin/rm -f /etc/.dynamic_routing
+if [ -z "$defrouters" ]; then
+ routeadmstr="-e ipv4-routing"
+else
+ routeadmstr="-d ipv4-routing"
+fi
+#
+# The -b option used here tells routeadm that the ipv4-routing
+# option in $routeadmstr is the boot-time default. The
+# boot-time default is used if the administrator has not
+# explicitly enabled or disabled ipv4-routing using the -e or
+# -d routeadm option.
+#
+/usr/sbin/routeadm -u -b $routeadmstr
+
+#
+# In spite of global policy, there may be a need for IPsec because of
+# per-socket policy or tunnelled policy. With that in mind, check for manual
+# keys in /etc/inet/secret/ipseckeys, or check for IKE configuration in
+# /etc/inet/ike/config. Either of these will also load and initialize IPsec,
+# thereby consuming kernel memory.
+#
+
+if [ -f /etc/inet/secret/ipseckeys ] ; then
+ /usr/sbin/ipseckey -f /etc/inet/secret/ipseckeys
+fi
+
+if [ -f /etc/inet/ike/config ] ; then
+ /usr/lib/inet/in.iked
+fi
+
+#
+# Configure tunnels which were deferred by /lib/svc/method/net-physical
+# (the svc:/network/physical service) since it depends on the tunnel endpoints
+# being reachable i.e. routing must be running.
+#
+# WARNING: you may wish to turn OFF forwarding if you haven't already, because
+# of various possible security vulnerabilities when configuring tunnels for
+# Virtual Private Network (VPN) construction.
+#
+# Also, if names are used in the /etc/hostname.ip.tun* file, those names
+# have to be in either DNS (and DNS is used) or in /etc/hosts, because this
+# file is executed before NIS or NIS+ is started.
+#
+
+#
+# IPv4 tunnels
+# The second component of the name must be either "ip" or "ip6".
+#
+interface_names="`/usr/bin/ls /etc/hostname.ip*.*[0-9] 2>/dev/null | \
+ /usr/bin/grep '/etc/hostname\.ip6\{0,1\}\.'`"
+if [ -n "$interface_names" ]; then
+ (
+ echo "configuring IPv4 tunnels:\c"
+ # Extract the part after the first '.'
+ set -- `for intr in $interface_names; do \
+ /usr/bin/expr //$intr : '[^.]*\.\(.*\)$'; done`
+ while [ $# -ge 1 ]; do
+ # Skip empty files
+ if [ ! -s /etc/hostname\.$1 ]; then
+ shift
+ continue
+ fi
+ /usr/sbin/ifconfig $1 plumb
+ while read ifcmds; do
+ if [ -n "$ifcmds" ]; then
+ /usr/sbin/ifconfig $1 inet $ifcmds
+ fi
+ done </etc/hostname\.$1 >/dev/null
+ echo " $1\c"
+ shift
+ done
+ echo "."
+ )
+fi
+
+#
+# IPv6 Tunnels
+# The second component of the name must be either "ip" or "ip6".
+#
+interface_names="`/usr/bin/ls /etc/hostname6.ip*.*[0-9] 2>/dev/null | \
+ /usr/bin/grep '/etc/hostname6\.ip6\{0,1\}\.'`"
+if [ -n "$interface_names" ]; then
+ (
+ echo "configuring IPv6 tunnels:\c"
+ # Extract the part after the first '.'
+ set -- `for intr in $interface_names; do \
+ /usr/bin/expr //$intr : '[^.]*\.\(.*\)$'; done`
+ while [ $# -ge 1 ]; do
+ # Skip empty files
+ if [ ! -s /etc/hostname6\.$1 ]; then
+ shift
+ continue
+ fi
+ /usr/sbin/ifconfig $1 inet6 plumb
+ while read ifcmds; do
+ if [ -n "$ifcmds" ]; then
+ /usr/sbin/ifconfig $1 inet6 $ifcmds
+ fi
+ done </etc/hostname6\.$1 > /dev/null
+ echo " $1\c"
+ shift
+ done
+ echo "."
+ )
+fi
+
+#
+# Set 6to4 Relay Router communication support policy and, if applicable,
+# the destination Relay Router IPv4 address. See /etc/default/inetinit for
+# setting and further info on ACCEPT6TO4RELAY and RELAY6TO4ADDR.
+# If ACCEPT6TO4RELAY=NO, the default value in the kernel will
+# be used.
+#
+ACCEPT6TO4RELAY=`echo "$ACCEPT6TO4RELAY" | /usr/bin/tr '[A-Z]' '[a-z]'`
+if [ "$ACCEPT6TO4RELAY" = yes ]; then
+ if [ "$RELAY6TO4ADDR" ]; then
+ /usr/sbin/6to4relay -e -a $RELAY6TO4ADDR
+ else
+ /usr/sbin/6to4relay -e
+ fi
+fi
+
+# Clear exit status.
+exit 0
diff --git a/usr/src/cmd/svc/milestone/net-loopback b/usr/src/cmd/svc/milestone/net-loopback
new file mode 100644
index 0000000000..15f2e038f8
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/net-loopback
@@ -0,0 +1,87 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+. /lib/svc/share/smf_include.sh
+
+#
+# In a zone we need this service to be up, but all of the work
+# it tries to do is irrelevant (and will actually lead to the service
+# failing if we try to do it), so just bail out.
+#
+if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+fi
+
+#
+# Cause ifconfig to not automatically start in.mpathd when IPMP groups are
+# configured. This is not strictly necessary but makes it so that in.mpathd
+# will always be started explicitly from /lib/svc/method/net-init (the
+# svc:/network/initial service), when we're sure that /usr is mounted.
+#
+SUNW_NO_MPATHD=; export SUNW_NO_MPATHD
+
+#
+# Before any interfaces are configured, we need to set the system
+# default IP forwarding behavior. This will be the setting for
+# interfaces that don't modify the per-interface setting with the
+# router or -router ifconfig command in their /etc/hostname.<intf>
+# files. The -F option tells routeadm to only update the forwarding
+# configuration for the system, and not dynamic routing. We don't
+# want routing daemons started until later in the boot process (see
+# the net-init method for that).
+#
+/sbin/routeadm -u -F
+
+# IPv4 loopback
+/sbin/ifconfig lo0 plumb 127.0.0.1 up
+
+# Configure the v6 loopback if any IPv6 interfaces are configured.
+interface_names="`echo /etc/hostname6.*[0-9] 2>/dev/null`"
+if [ "$interface_names" != "/etc/hostname6.*[0-9]" ]; then
+ ORIGIFS="$IFS"
+ IFS="$IFS."
+ set -- $interface_names
+ IFS="$ORIGIFS"
+ while [ $# -ge 2 ]; do
+ shift
+ if [ $# -gt 1 -a "$2" != "/etc/hostname6" ]; then
+ while [ $# -gt 1 -a "$1" != "/etc/hostname6" ]; do
+ shift
+ done
+ else
+ inet6_list="$inet6_list $1"
+ shift
+ fi
+ done
+fi
+
+if [ -n "$inet6_list" ]; then
+ /sbin/ifconfig lo0 inet6 plumb ::1 up
+else
+ exit 0
+fi
diff --git a/usr/src/cmd/svc/milestone/net-physical b/usr/src/cmd/svc/milestone/net-physical
new file mode 100644
index 0000000000..4498762949
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/net-physical
@@ -0,0 +1,347 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+#
+# In a zone we need this service to be up, but all of the work
+# it tries to do is irrelevant (and will actually lead to the service
+# failing if we try to do it), so just bail out.
+#
+if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+fi
+
+. /lib/svc/share/smf_include.sh
+. /lib/svc/share/net_include.sh
+
+# Print warnings to console
+warn_failed_ifs() {
+ echo "Failed to $1 interface(s): $2" >/dev/msglog
+}
+
+# Make sure that the libraries essential to this stage of booting can be found.
+LD_LIBRARY_PATH=/lib; export LD_LIBRARY_PATH
+
+#
+# Cause ifconfig to not automatically start in.mpathd when IPMP groups are
+# configured. This is not strictly necessary but makes it so that in.mpathd
+# will always be started explicitly from /etc/init.d/inetinit, when we're
+# sure that /usr is mounted.
+#
+SUNW_NO_MPATHD=; export SUNW_NO_MPATHD
+
+smf_netstrategy
+
+#
+# If the system was net booted by DHCP, hand DHCP management off to the
+# DHCP agent (ifconfig communicates to the DHCP agent through the
+# loopback interface).
+#
+if [ -n "$_INIT_NET_IF" -a "$_INIT_NET_STRATEGY" = "dhcp" ]; then
+ /sbin/dhcpagent -a
+fi
+
+#
+# The network initialization is done early to support diskless and
+# dataless configurations. For IPv4 interfaces that were configured by
+# the kernel (e.g. those on diskless machines) and not configured by
+# DHCP, reset the netmask using the local "/etc/netmasks" file if one
+# exists, and then reset the broadcast address based on the netmask.
+#
+/sbin/ifconfig -auD4 netmask + broadcast +
+
+#
+# All the IPv4 and IPv6 interfaces are plumbed before doing any
+# interface configuration. This prevents errors from plumb failures
+# getting mixed in with the configured interface lists that the script
+# outputs.
+#
+
+#
+# Get the list of IPv4 interfaces to configure by breaking
+# /etc/hostname.* into separate args by using "." as a shell separator
+# character.
+#
+interface_names="`echo /etc/hostname.*[0-9] 2>/dev/null`"
+if [ "$interface_names" != "/etc/hostname.*[0-9]" ]; then
+ ORIGIFS="$IFS"
+ IFS="$IFS."
+ set -- $interface_names
+ IFS="$ORIGIFS"
+ while [ $# -ge 2 ]; do
+ shift
+ if [ "$1" = "xx0" ]; then
+ #
+ # For some unknown historical reason the xx0
+ # ifname is ignored.
+ #
+ shift
+ continue
+ fi
+ if [ $# -gt 1 -a "$2" != "/etc/hostname" ]; then
+ while [ $# -gt 1 -a "$1" != "/etc/hostname" ]; do
+ shift
+ done
+ else
+ inet_list="$inet_list $1"
+ shift
+ fi
+ done
+fi
+
+#
+# Get the list of IPv6 interfaces to configure by breaking
+# /etc/hostname6.* into separate args by using "." as a shell separator
+# character.
+#
+interface_names="`echo /etc/hostname6.*[0-9] 2>/dev/null`"
+if [ "$interface_names" != "/etc/hostname6.*[0-9]" ]; then
+ ORIGIFS="$IFS"
+ IFS="$IFS."
+ set -- $interface_names
+ IFS="$ORIGIFS"
+ while [ $# -ge 2 ]; do
+ shift
+ if [ $# -gt 1 -a "$2" != "/etc/hostname6" ]; then
+ while [ $# -gt 1 -a "$1" != "/etc/hostname6" ]; do
+ shift
+ done
+ else
+ inet6_list="$inet6_list $1"
+ shift
+ fi
+ done
+fi
+
+
+#
+# Step through the IPv4 interface list and try to plumb every interface.
+# Generate list of plumbed and failed IPv4 interfaces.
+#
+if [ -n "$inet_list" ]; then
+ set -- $inet_list
+ while [ $# -gt 0 ]; do
+ /sbin/ifconfig $1 plumb
+ if /sbin/ifconfig $1 inet >/dev/null 2>&1; then
+ inet_plumbed="$inet_plumbed $1"
+ else
+ inet_failed="$inet_failed $1"
+ fi
+ shift
+ done
+ [ -n "$inet_failed" ] && warn_failed_ifs "plumb IPv4" $inet_failed
+fi
+
+#
+# Step through the IPv6 interface list and plumb every interface.
+# Generate list of plumbed and failed IPv6 interfaces. Each plumbed
+# interface will be brought up later, after processing any contents of
+# the /etc/hostname6.* file.
+#
+if [ -n "$inet6_list" ]; then
+ set -- $inet6_list
+ while [ $# -gt 0 ]; do
+ /sbin/ifconfig $1 inet6 plumb
+ if /sbin/ifconfig $1 inet6 >/dev/null 2>&1; then
+ inet6_plumbed="$inet6_plumbed $1"
+ else
+ inet6_failed="$inet6_failed $1"
+ fi
+ shift
+ done
+ [ -n "$inet6_failed" ] && warn_failed_ifs "plumb IPv6" $inet6_failed
+fi
+
+#
+# Process the /etc/hostname.* files of plumbed IPv4 interfaces. If an
+# /etc/hostname file is not present or is empty, the ifconfig auto-dhcp
+# / auto-revarp command will attempt to set the address, later.
+#
+# If /etc/hostname.lo0 exists the loop below will do additional
+# configuration of lo0.
+#
+if [ -n "$inet_plumbed" ]; then
+ i4s_fail=
+ echo "configuring IPv4 interfaces:\c"
+ set -- $inet_plumbed
+ while [ $# -gt 0 ]; do
+ inet_process_hostname /sbin/ifconfig $1 inet \
+ </etc/hostname.$1 >/dev/null
+ [ $? != 0 ] && i4s_fail="$i4s_fail $1"
+ echo " $1\c"
+ shift
+ done
+ echo "."
+ [ -n "$i4s_fail" ] && warn_failed_ifs "configure IPv4" $i4s_fail
+fi
+
+#
+# Process the /etc/hostname6.* files of plumbed IPv6 interfaces. After
+# processing the hostname6 file, bring the interface up. If
+# /etc/hostname6.lo0 exists the loop below will do additional
+# configuration of lo0.
+#
+if [ -n "$inet6_plumbed" ]; then
+ i6_fail=
+ echo "configuring IPv6 interfaces:\c"
+ set -- $inet6_plumbed
+ while [ $# -gt 0 ]; do
+ inet6_process_hostname /sbin/ifconfig $1 inet6 \
+ </etc/hostname6.$1 >/dev/null &&
+ /sbin/ifconfig $1 inet6 up
+ [ $? != 0 ] && i6_fail="$i6_fail $1"
+ echo " $1\c"
+ shift
+ done
+ echo "."
+ [ -n "$i6_fail" ] && warn_failed_ifs "configure IPv6" $i6_fail
+fi
+
+# Run DHCP if requested. Skip boot-configured interface.
+interface_names="`echo /etc/dhcp.*[0-9] 2>/dev/null`"
+if [ "$interface_names" != '/etc/dhcp.*[0-9]' ]; then
+ #
+ # First find the primary interface. Default to the first
+ # interface if not specified. First primary interface found
+ # "wins". Use care not to "reconfigure" a net-booted interface
+ # configured using DHCP. Run through the list of interfaces
+ # again, this time trying DHCP.
+ #
+ i4d_fail=
+ firstif=
+ primary=
+ ORIGIFS="$IFS"
+ IFS="${IFS}."
+ set -- $interface_names
+
+ while [ $# -ge 2 ]; do
+ shift
+ [ -z "$firstif" ] && firstif=$1
+
+ for i in `shcat /etc/dhcp\.$1`; do
+ if [ "$i" = primary ]; then
+ primary=$1
+ break
+ fi
+ done
+
+ [ -n "$primary" ] && break
+ shift
+ done
+
+ [ -z "$primary" ] && primary="$firstif"
+ cmdline=`shcat /etc/dhcp\.${primary}`
+
+ if [ "$_INIT_NET_IF" != "$primary" ]; then
+ echo "starting DHCP on primary interface $primary"
+ /sbin/ifconfig $primary auto-dhcp primary $cmdline
+ # Exit code 4 means ifconfig timed out waiting for dhcpagent
+ [ $? != 0 ] && [ $? != 4 ] && i4d_fail="$i4d_fail $primary"
+ fi
+
+ set -- $interface_names
+
+ while [ $# -ge 2 ]; do
+ shift
+ cmdline=`shcat /etc/dhcp\.$1`
+ if [ "$1" != "$primary" -a \
+ "$1" != "$_INIT_NET_IF" ]; then
+ echo "starting DHCP on interface $1"
+ /sbin/ifconfig $1 dhcp start wait 0 $cmdline
+ # Exit code can't be timeout when wait is 0
+ [ $? != 0 ] && i4d_fail="$i4d_fail $1"
+ fi
+ shift
+ done
+ IFS="$ORIGIFS"
+ unset ORIGIFS
+ [ -n "$i4d_fail" ] && warn_failed_ifs "configure IPv4 DHCP" $i4d_fail
+fi
+
+# Configure the rest of the IPv4 interfaces automatically, quietly.
+/sbin/ifconfig -adD4 auto-revarp netmask + broadcast + up
+
+#
+# Process IPv4 and IPv6 interfaces that failed to plumb. Find an
+# alternative interface to host the addresses.
+#
+[ -n "$inet_failed" ] && move_addresses inet
+
+[ -n "$inet6_failed" ] && move_addresses inet6
+
+#
+# If the /etc/defaultrouter file exists, process it now so that the next
+# stage of booting will have access to NFS.
+#
+if [ -f /etc/defaultrouter ]; then
+ while read router rubbish; do
+ case "$router" in
+ '#'* | '') ;; # Ignore comments, empty lines
+ *) /sbin/route -n add default -gateway $router ;;
+ esac
+ done </etc/defaultrouter
+fi
+
+#
+# We tell smf this service is online if any of the following is true:
+# - no interfaces were configured for plumbing and no DHCP failures
+# - any non-loopback IPv4 interfaces are up and have a non-zero address
+# - there are any DHCP interfaces started
+# - any non-loopback IPv6 interfaces are up
+#
+# If we weren't asked to configure any interfaces, exit
+if [ -z "$inet_list" ] && [ -z "$inet6_list" ]; then
+ # Config error if DHCP was attempted without plumbed interfaces
+ [ -n "$i4d_fail" ] && exit $SMF_EXIT_ERR_CONFIG
+ exit $SMF_EXIT_OK
+fi
+
+# Any non-loopback IPv4 interfaces with usable addresses up?
+if [ -n "`/sbin/ifconfig -a4u`" ]; then
+ /sbin/ifconfig -a4u | while read intf addr rest; do
+ [ $intf = inet ] && [ $addr != 127.0.0.1 ] &&
+ [ $addr != 0.0.0.0 ] && exit 0
+ done && exit $SMF_EXIT_OK
+fi
+
+# Any DHCP interfaces started?
+[ -n "`/sbin/ifconfig -a4 dhcp status 2>/dev/null`" ] && exit $SMF_EXIT_OK
+
+# Any non-loopback IPv6 interfaces up?
+if [ -n "`/sbin/ifconfig -au6`" ]; then
+ /sbin/ifconfig -au6 | while read intf addr rest; do
+ [ $intf = inet6 ] && [ $addr != ::1/128 ] && exit 0
+ done && exit $SMF_EXIT_OK
+fi
+
+# This service was supposed to configure something yet didn't. Exit
+# with config error.
+exit $SMF_EXIT_ERR_CONFIG
diff --git a/usr/src/cmd/svc/milestone/net-svc b/usr/src/cmd/svc/milestone/net-svc
new file mode 100644
index 0000000000..82a6d14a72
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/net-svc
@@ -0,0 +1,340 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+#
+# This is third phase of TCP/IP startup/configuration. This script
+# runs after the NIS/NIS+ startup script. We run things here that may
+# depend on NIS/NIS+ maps.
+#
+
+case "$1" in
+'start')
+ #
+ # In a zone we need this service to be up, but all of the work
+ # it tries to do is irrelevant (and will actually lead to the service
+ # failing if we try to do it), so just bail out.
+ #
+ if [ `/sbin/zonename` != "global" ]; then
+ exit 0
+ fi
+ ;; # Fall through -- rest of script is the initialization code
+
+'stop')
+ exit 0
+ ;;
+
+*)
+ echo "Usage: $0 { start | stop }"
+ exit 1
+ ;;
+esac
+
+. /lib/svc/share/smf_include.sh
+
+# If boot variables are not set, set variables we use
+[ -z "$_INIT_UTS_NODENAME" ] && _INIT_UTS_NODENAME=`/usr/bin/uname -n`
+
+#
+# wait_nis
+# Wait up to 5 seconds for ypbind to obtain a binding.
+#
+wait_nis ()
+{
+ for i in 1 2 3 4 5; do
+ server=`/usr/bin/ypwhich 2>/dev/null`
+ [ $? -eq 0 -a -n "$server" ] && return 0 || sleep 1
+ done
+ return 1
+}
+
+#
+# This function takes two file names and the file mode as input. The two
+# files are compared for differences (using cmp(1)) and if different, the
+# second file is over written with the first. A chmod is done with the file
+# mode passed in. If the files are equal, the first file passed
+# in (the /tmp file) is deleted.
+#
+mv_file ()
+{
+ /usr/bin/cmp -s $1 $2
+ if [ $? -eq 1 ]; then
+ /usr/bin/mv $1 $2
+ #
+ # The umask during boot is configurable, which requires
+ # explicit setting of file permission modes when we
+ # create files.
+ #
+ /usr/bin/chmod $3 $2
+ else
+ /usr/bin/rm $1
+ fi
+}
+
+#
+# update_nss
+# This routine takes as a parameter, the name of the respective policy
+# to change in the nsswitch.conf (hosts or ipnodes) to update with dns.
+#
+update_nss ()
+{
+ policy=$1;
+ # Add dns to the nsswitch file, if it isn't already there.
+ /usr/bin/awk ' $1 ~ /^'${policy}':/ {
+ n = split($0, a);
+ newl = a[1];
+ if ($0 !~ /dns/) {
+ printf("#%s # Commented out by DHCP\n", $0);
+ updated = 0;
+ for (i = 2; i <= n; i++) {
+ if (updated == 0 && index(a[i], "[") == 1) {
+ newl = newl" dns";
+ updated++;
+ }
+ newl = newl" "a[i];
+ }
+ if (updated == 0) {
+ newl = newl" dns";
+ updated++;
+ }
+ if (updated != 0)
+ newl = newl" # Added by DHCP";
+ else
+ newl = $0;
+ printf("%s\n", newl);
+ } else
+ printf("%s\n", $0);
+ } $1 !~ /^'${policy}':/ { printf("%s\n", $0); }' /etc/nsswitch.conf \
+ >/tmp/nsswitch.conf.$$
+
+ mv_file /tmp/nsswitch.conf.$$ /etc/nsswitch.conf 644
+}
+
+#
+# update_files
+# This routine takes as a parameter, the name of the respective file
+# (hosts or ipnodes) to update with the new host name and IP address.
+#
+update_files ()
+{
+ filename=$1;
+ # Delete any old lines added by dhcp.
+ /usr/bin/sed -e '/# Added by DHCP$/d' /etc/inet/${filename} \
+ > /tmp/${filename}_clear.$$
+
+ shift $# # Clear $0-9 first in case grep fails
+ set -- `/usr/bin/grep "^[ ]*$ipaddr[ ]" \
+ /tmp/${filename}_clear.$$ 2>/dev/null`
+
+ if [ $# -gt 0 ]; then
+ #
+ # IP address is already in the file. Ensure the
+ # associated hostname is the same as the Hostname
+ # property returned by the DHCP server.
+ #
+ /usr/bin/sed -e "/^[ ]*${ipaddr}[ ]/d" \
+ /tmp/${filename}_clear.$$ >/tmp/${filename}.$$
+ echo "${ipaddr}\t${hostname}\t# Added by DHCP" \
+ >>/tmp/${filename}.$$
+ else
+ #
+ # IP address is missing from the respective file. Now check
+ # to see if the hostname is present with a different IP.
+ #
+ shift $# # Clear $0-9 in case grep fails
+ set -- `/usr/bin/grep -s -v '^#' /tmp/${filename}_clear.$$ | \
+ /usr/bin/egrep "[ ]${hostname}([ ]|$)"`
+
+ if [ $# -gt 0 ]; then
+ #
+ # Hostname is present in the file. Rewrite this line
+ # to have the new IP address and the DHCP comment.
+ #
+ /usr/bin/sed -e "/^[ ]*${1}[ ]/d" \
+ /tmp/${filename}_clear.$$ >/tmp/${filename}.$$
+
+ shift # Shift off $1 (the old IP)
+
+ echo "$ipaddr $*\c" | /usr/bin/tr ' ' '\t' \
+ >>/tmp/${filename}.$$
+
+ echo "\t# Added by DHCP" >>/tmp/${filename}.$$
+ else
+ #
+ # Hostname is not present in the named file.
+ # Add a new line for the host at the end of
+ # the new respective file.
+ #
+ /usr/bin/mv /tmp/${filename}_clear.$$ \
+ /tmp/${filename}.$$
+ echo "${ipaddr}\t${hostname}\t# Added by DHCP" \
+ >>/tmp/${filename}.$$
+ fi
+ fi
+
+ /usr/bin/rm -f /tmp/${filename}_clear.$$
+ mv_file /tmp/${filename}.$$ /etc/inet/${filename} 444
+}
+
+#
+# We now need to reset the netmask and broadcast address for our network
+# interfaces. Since this may result in a name service lookup, we want to
+# now wait for NIS to come up if we previously started it.
+#
+domain=`/usr/bin/domainname 2>/dev/null`
+
+[ -z "$domain" ] || [ ! -d /var/yp/binding/$domain ] || wait_nis || \
+ echo "WARNING: Timed out waiting for NIS to come up" >& 2
+
+#
+# Re-set the netmask and broadcast addr for all IP interfaces. This ifconfig
+# is run here, after waiting for name services, so that "netmask +" will find
+# the netmask if it lives in a NIS map. The 'D' in -auD tells ifconfig NOT to
+# mess with the interface if it is under DHCP control
+#
+/usr/sbin/ifconfig -auD4 netmask + broadcast +
+
+# Uncomment these lines to print complete network interface configuration
+# echo "network interface configuration:"
+# /usr/sbin/ifconfig -a
+
+smf_netstrategy
+
+if [ "$_INIT_NET_STRATEGY" = "dhcp" ]; then
+ dnsservers=`/sbin/dhcpinfo DNSserv`
+else
+ dnsservers=""
+fi
+
+if [ -n "$dnsservers" ]; then
+ #
+ # Go through /etc/resolv.conf and replace any existing
+ # domain or nameserver entries with new ones derived
+ # from DHCP. Note that it is important to preserve
+ # order of domain entries vs. search entries; the search
+ # entries are reserved for administrator customization
+ # and if placed after the domain entry will override it.
+ # See resolv.conf(4).
+ #
+ if [ ! -f /etc/resolv.conf ]; then
+ /usr/bin/touch /etc/resolv.conf
+ fi
+ dnsdomain=`/sbin/dhcpinfo DNSdmain`
+ export dnsservers dnsdomain
+ /usr/bin/nawk </etc/resolv.conf >/tmp/resolv.conf.$$ '
+ function writedomain() {
+ if (updated == 0) {
+ # Use only first domain, not a search list
+ split(ENVIRON["dnsdomain"], d)
+ if(length(d[1]) != 0)
+ printf("domain %s\n", d[1])
+ }
+ ++updated
+ }
+ $1 == "domain" { writedomain(); next }
+ $1 != "nameserver" { print $0 }
+ END {
+ writedomain()
+ n = split(ENVIRON["dnsservers"], s)
+ for (i = 1; i <= n; ++i)
+ printf("nameserver %s\n", s[i])
+ }'
+ unset dnsservers dnsdomain
+ mv_file /tmp/resolv.conf.$$ /etc/resolv.conf 644
+ #
+ # Add dns to the nsswitch file, if it isn't already there.
+ #
+ update_nss hosts
+ update_nss ipnodes
+
+elif /usr/bin/grep '# Added by DHCP$' /etc/nsswitch.conf >/dev/null 2>&1; then
+
+ # If we added DNS to the hosts and ipnodes policy in the nsswitch,
+ # remove it.
+ /usr/bin/sed \
+ -e '/# Added by DHCP$/d' \
+ -e 's/^\(#hosts:\)\(.*[^#]\)\(#.*\)$/hosts: \2/' \
+ -e 's/^\(#ipnodes:\)\(.*[^#]\)\(#.*\)$/ipnodes: \2/' \
+ /etc/nsswitch.conf >/tmp/nsswitch.conf.$$
+
+ mv_file /tmp/nsswitch.conf.$$ /etc/nsswitch.conf 644
+fi
+
+if [ "$_INIT_NET_STRATEGY" = "dhcp" ]; then
+
+ hostname=`/usr/bin/uname -n`
+ ipaddr=`/sbin/dhcpinfo Yiaddr`
+ update_files hosts
+ update_files ipnodes
+
+else
+ # We're not using a dhcp strategy, so host entries added by
+ # DHCP should be removed from /etc/inet/hosts and /etc/inet/ipnodes.
+
+ if /usr/bin/grep '# Added by DHCP$' /etc/inet/hosts >/dev/null 2>&1;
+ then
+ /usr/bin/sed -e '/# Added by DHCP$/d' \
+ /etc/inet/hosts > /tmp/hosts.$$
+ mv_file /tmp/hosts.$$ /etc/inet/hosts 444
+ fi
+
+ if /usr/bin/grep '# Added by DHCP$' /etc/inet/ipnodes >/dev/null 2>&1;
+ then
+ /usr/bin/sed -e '/# Added by DHCP$/d' \
+ /etc/inet/ipnodes > /tmp/ipnodes.$$
+ mv_file /tmp/ipnodes.$$ /etc/inet/ipnodes 444
+ fi
+fi
+
+#
+# Load the IPQoS configuration.
+# This is backgrounded so that any remote hostname lookups it performs
+# don't unduely delay startup. Any messages go via syslog.
+#
+
+if [ -f /usr/sbin/ipqosconf -a -f /etc/inet/ipqosinit.conf ]; then
+ /usr/sbin/ipqosconf -s -a /etc/inet/ipqosinit.conf &
+fi
+
+#
+# Add a static route for multicast packets out our default interface.
+# The default interface is the interface that corresponds to the node name.
+# Run in background subshell to avoid waiting for name service.
+#
+
+(
+if [ "$_INIT_NET_STRATEGY" = "dhcp" ]; then
+ mcastif=`/sbin/dhcpinfo Yiaddr` || mcastif=$_INIT_UTS_NODENAME
+else
+ mcastif=$_INIT_UTS_NODENAME
+fi
+
+echo "Setting default IPv4 interface for multicast:" \
+ "add net 224.0/4: gateway $mcastif"
+
+/usr/sbin/route -n add -interface 224.0/4 -gateway "$mcastif" >/dev/null
+) &
diff --git a/usr/src/cmd/svc/milestone/network-initial.xml b/usr/src/cmd/svc/milestone/network-initial.xml
new file mode 100644
index 0000000000..5f106155f3
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/network-initial.xml
@@ -0,0 +1,119 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:network-initial'>
+
+<service
+ name='network/initial'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <dependency
+ name='network'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/network' />
+ </dependency>
+
+ <dependency
+ name='filesystem'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/usr' />
+ </dependency>
+
+ <!--
+ This dependency was added to make sure soconfig runs in
+ devices-local method, before routeadm invocation in net-init.
+ This is because routeadm commands depend on sockets.
+ -->
+ <dependency
+ name='devices'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/devices' />
+ </dependency>
+
+ <dependency
+ name='cryptoframework'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/cryptosvc' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/net-init start'
+ timeout_seconds='600' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec='/lib/svc/method/net-init stop'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+initial network services
+ </loctext>
+ </common_name>
+ <description>
+ <loctext xml:lang='C'>
+ Initial network services includes
+ configuring IP routing and setting any
+ tunable parameters.
+ </loctext>
+ </description>
+ <documentation>
+ <manpage title='ifconfig' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/network-loopback.xml b/usr/src/cmd/svc/milestone/network-loopback.xml
new file mode 100644
index 0000000000..bd51e960cd
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/network-loopback.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:network-loopback'>
+
+<service
+ name='network/loopback'
+ type='service'
+ version='1'>
+
+ <instance name='default' enabled='true'>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/net-loopback'
+ timeout_seconds='60' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ </instance>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ loopback network interface
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='ifconfig' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/network-physical.xml b/usr/src/cmd/svc/milestone/network-physical.xml
new file mode 100644
index 0000000000..e0a8eda24b
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/network-physical.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:network-physical'>
+
+<service
+ name='network/physical'
+ type='service'
+ version='1'>
+
+ <instance name='default' enabled='true'>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/net-physical'
+ timeout_seconds='600' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ </instance>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ physical network interfaces
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='ifconfig' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/network-service.xml b/usr/src/cmd/svc/milestone/network-service.xml
new file mode 100644
index 0000000000..48e9d03f59
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/network-service.xml
@@ -0,0 +1,125 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:network-service'>
+
+<!--
+ network/service is the fourth service containing aggregated TCP/IP
+ service initialization. It will decompose into its constituent
+ services over time.
+-->
+
+<service
+ name='network/service'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <dependency
+ name='init'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/initial' />
+ </dependency>
+
+ <dependency
+ name='nisplus'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/rpc/nisplus' />
+ </dependency>
+
+ <dependency
+ name='nis_server'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/nis/server' />
+ </dependency>
+
+ <dependency
+ name='nis_client'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/nis/client' />
+ </dependency>
+
+ <!--
+ DNS is potentially configured by the DHCP actions in
+ network/service, and cannot presently be used to store data
+ used by this service.
+ -->
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/net-svc start'
+ timeout_seconds='600' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ layered network services
+ </loctext>
+ </common_name>
+ <description>
+ <loctext xml:lang='C'>
+ Network infrastructure services
+ requiring name service availability.
+ </loctext>
+ </description>
+ <documentation>
+ <manpage title='ifconfig' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/network.xml b/usr/src/cmd/svc/milestone/network.xml
new file mode 100644
index 0000000000..75b5578f44
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/network.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "@(#)ident "%Z%%M% %I% %E% SMI"
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:network'>
+
+<service
+ name='milestone/network'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance />
+
+ <dependency
+ name='loopback'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/loopback' />
+ </dependency>
+
+ <dependency
+ name='physical'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/physical' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='general' type='framework'>
+ <propval name='startd_duration' type='astring'
+ value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ Network milestone
+ </loctext>
+ </common_name>
+ <description>
+ <loctext xml:lang='C'>
+ Basic network APIs are functional and it
+ is safe to establish listening sockets
+ without security vulnerabilities.
+ </loctext>
+ </description>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/restarter.xml b/usr/src/cmd/svc/milestone/restarter.xml
new file mode 100644
index 0000000000..6bd796dead
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/restarter.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:restarter'>
+
+<service
+ name='system/svc/restarter'
+ type='service'
+ version='1'>
+
+ <!--
+ svc.startd manages itself. However, this manifest allows
+ us to set non-persistent properties before filesystems
+ have been mounted r/w.
+ -->
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ master restarter
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='svc.startd' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/rmtmpfiles b/usr/src/cmd/svc/milestone/rmtmpfiles
new file mode 100644
index 0000000000..2f2dca4c70
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/rmtmpfiles
@@ -0,0 +1,77 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+# Traditional SunOS 4.x behavior has been to not remove directories in
+# the /tmp directory; only simple files were removed. This lead to an
+# inconsistency when the tmpfs file system was used (which isn't persistent
+# across boots. The following adopts the traditional System V behavior
+# of removing everything in /tmp, unless /tmp or any of its subdirectories
+# are mount points for another filesystem.
+
+/sbin/mount | /usr/bin/egrep '^/tmp(/| )' >/dev/null 2>&1 || {
+ if [ -h /tmp ]; then
+ # Just remove files under directory if symbolic link
+ /usr/bin/rm -rf /tmp/*
+ else
+ /usr/bin/rm -rf /tmp
+ /usr/bin/mkdir -m 1777 /tmp
+ /usr/bin/chown root:sys /tmp
+ fi
+}
+
+# Clean up /etc directory
+
+for file in /etc/rem_name_to_major /etc/nologin; do
+ [ -f $file ] && /usr/bin/rm -f $file
+done
+
+# Traditional SunOS 4.x behavior has been to not alter the contents of
+# /var/tmp (/usr/tmp) at boot time. This behavior is maintained as the
+# current default behavior. It the traditional System V behavior of
+# removing everything in /var/tmp is desired, remove the following 'exit'.
+
+exit 0
+
+# Clean up /var/tmp, unless /var/tmp or any of its subdirectories are
+# mount points for another filesystem.
+
+/sbin/mount | /usr/bin/egrep '^/var/tmp(/| )' >/dev/null 2>&1 || {
+ cd /var/tmp || exit 0
+
+ # We carefully remove all files except the Ex* files (editor
+ # temporary files), which expreserve will process later (in
+ # S80PRESERVE). Of course, it would be simpler to just run
+ # expreserve before this script, but that doesn't work --
+ # expreserve requires the name service, which is not available
+ # until much later.
+
+ /usr/bin/ls -a | /usr/bin/egrep -v '^(Ex.*|\.|\.\.)$' |
+ /usr/bin/xargs /usr/bin/rm -rf -- 2>/dev/null
+}
+
+exit 0
diff --git a/usr/src/cmd/svc/milestone/rmtmpfiles.xml b/usr/src/cmd/svc/milestone/rmtmpfiles.xml
new file mode 100644
index 0000000000..7909021711
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/rmtmpfiles.xml
@@ -0,0 +1,90 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+
+ Service manifest for rmtmpfiles.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:rmtmpfiles'>
+
+<service
+ name='system/rmtmpfiles'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='tmp'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/minimal' />
+ </dependency>
+
+ <dependent
+ name='rmtmpfiles_multi-user'
+ grouping='require_all'
+ restart_on='none'>
+ <service_fmri value='svc:/milestone/multi-user' />
+ </dependent>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/rmtmpfiles'
+ timeout_seconds='30' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='1' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+remove temporary files
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/root-fs.xml b/usr/src/cmd/svc/milestone/root-fs.xml
new file mode 100644
index 0000000000..15452bd8d2
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/root-fs.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:filesystem-root'>
+
+<service
+ name='system/filesystem/root'
+ type='service'
+ version='1'>
+
+ <single_instance/>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/fs-root'
+ timeout_seconds='300' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <instance name='default' enabled='true' />
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ root file system mount
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/single-user.xml b/usr/src/cmd/svc/milestone/single-user.xml
new file mode 100644
index 0000000000..cbb93fd3b3
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/single-user.xml
@@ -0,0 +1,164 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:single-user'>
+
+<service
+ name='milestone/single-user'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance />
+
+ <!--
+ Single-user's dependency on sysidtool is obsolete, but instead of
+ removing it from this manifest, retain it here with its delete
+ attribute set to true. This is to try and prevent a dependency
+ cycle with the new sysidtool which declares a dependency on
+ single-user. This will force the deletion of single-user's
+ sysidtool dependency as soon as this manifest is imported
+ (instead of waiting for upgrade to delete it).
+
+ Note that this does not guarantee the prevention of a dependency
+ cycle (if the new sysidtool manifest is imported before
+ single-user's) - if this does occur, the code in upgrade will
+ catch it - it deletes single-user's dependency and "svcadm
+ clear"s sysidtool.
+ -->
+
+ <dependency
+ name='sysidtool'
+ grouping='require_all'
+ restart_on='none'
+ type='service'
+ delete='true'>
+ <service_fmri value='svc:/system/sysidtool:net' />
+ <service_fmri value='svc:/system/sysidtool:system' />
+ </dependency>
+
+ <dependency
+ name='nodename'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/identity:node' />
+ </dependency>
+
+ <dependency
+ name='filesystem-minimal'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/minimal' />
+ </dependency>
+
+ <dependency
+ name='milestone-devices'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/devices' />
+ </dependency>
+
+ <dependency
+ name='manifests'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/manifest-import' />
+ </dependency>
+
+ <dependency
+ name='loopback-network'
+ grouping='require_any'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/loopback' />
+ </dependency>
+
+ <dependency
+ name='network'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/network' />
+ </dependency>
+
+ <!--
+ We can't know how long legacy init scripts will take to run. Set
+ the timeout value high enough to allow them to take their time
+ to start.
+ -->
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/sbin/rcS start'
+ timeout_seconds='1800' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <!--
+ The init scripts should never automatically be run twice.
+ duration=transient tells svc.startd not to restart if no
+ processes are left running, and timeout_retry=false tells
+ svc.startd not to retry the start method if it times out.
+ -->
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ <propval name='timeout_retry' type='boolean' value='false' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ single-user milestone
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='init' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/sysconfig.xml b/usr/src/cmd/svc/milestone/sysconfig.xml
new file mode 100644
index 0000000000..807990a3ed
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/sysconfig.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ The sysconfig milestone represents the completion of system
+ configuration, such as system identity, and initial network and
+ filesystem configuration. It collects dependencies on related
+ services which form a logical grouping of services that establish
+ the initial system configuration.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:sysconfig'>
+
+<service
+ name='milestone/sysconfig'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance />
+
+ <dependency
+ name='milestone'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/milestone/single-user' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='0' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Evolving' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ Basic system configuration milestone
+ </loctext>
+ </common_name>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/milestone/usr-fs.xml b/usr/src/cmd/svc/milestone/usr-fs.xml
new file mode 100644
index 0000000000..f6b767d173
--- /dev/null
+++ b/usr/src/cmd/svc/milestone/usr-fs.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade. Make customizations in a different
+ file.
+-->
+
+<service_bundle type='manifest' name='SUNWcsr:filesystem-usr'>
+
+<service
+ name='system/filesystem/usr'
+ type='service'
+ version='1'>
+
+ <create_default_instance enabled='true' />
+
+ <single_instance/>
+
+ <dependency
+ name='boot-archive'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/boot-archive' />
+ </dependency>
+
+ <!--
+ Start method timeout is infinite to handle potentially unbounded
+ fsck times.
+ -->
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/fs-usr'
+ timeout_seconds='0' />
+
+ <exec_method
+ type='method'
+ name='stop'
+ exec=':true'
+ timeout_seconds='3' />
+
+ <property_group name='startd' type='framework'>
+ <propval name='duration' type='astring' value='transient' />
+ </property_group>
+
+ <stability value='Unstable' />
+
+ <template>
+ <common_name>
+ <loctext xml:lang='C'>
+ read/write root file systems mounts
+ </loctext>
+ </common_name>
+ <description>
+ <loctext xml:lang='C'>
+ This service remounts both / and /usr
+ read-write.
+ </loctext>
+ </description>
+ </template>
+</service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/Makefile b/usr/src/cmd/svc/profile/Makefile
new file mode 100644
index 0000000000..ad7e537edc
--- /dev/null
+++ b/usr/src/cmd/svc/profile/Makefile
@@ -0,0 +1,76 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+include ../../Makefile.cmd
+
+OWNER = root
+GROUP = sys
+FILEMODE = 0444
+
+ROOTPROFILE = $(ROOT)/var/svc/profile
+
+PROFILESRCS = \
+ generic_open.xml \
+ generic_limited_net.xml \
+ inetd_generic.xml \
+ inetd_upgrade.xml \
+ ns_dns.xml \
+ ns_files.xml \
+ ns_ldap.xml \
+ ns_nis.xml \
+ ns_nisplus.xml \
+ ns_none.xml \
+ platform_SUNW,Sun-Fire-15000.xml \
+ platform_SUNW,Sun-Fire-880.xml \
+ platform_SUNW,Sun-Fire.xml \
+ platform_SUNW,Ultra-Enterprise-10000.xml \
+ platform_SUNW,UltraSPARC-IIi-Netract.xml \
+ platform_i86pc.xml \
+ platform_none.xml
+
+PROFILES = $(PROFILESRCS:%=$(ROOTPROFILE)/%)
+
+install: $(PROFILES)
+ $(RM) $(ROOTPROFILE)/generic.xml
+ $(LN) -s generic_open.xml $(ROOTPROFILE)/generic.xml
+ $(RM) $(ROOTPROFILE)/platform.xml
+ # SUNW,Sun-Fire-V890
+ $(RM) $(ROOTPROFILE)/platform_SUNW,Sun-Fire-V890.xml
+ $(LN) $(ROOTPROFILE)/platform_SUNW,Sun-Fire-880.xml \
+ $(ROOTPROFILE)/platform_SUNW,Sun-Fire-V890.xml
+ # SUNW,UltraSPARC-IIe-NetraCT-[46]0
+ $(RM) $(ROOTPROFILE)/platform_SUNW,UltraSPARC-IIe-NetraCT-40.xml
+ $(RM) $(ROOTPROFILE)/platform_SUNW,UltraSPARC-IIe-NetraCT-60.xml
+ $(LN) $(ROOTPROFILE)/platform_SUNW,UltraSPARC-IIi-Netract.xml \
+ $(ROOTPROFILE)/platform_SUNW,UltraSPARC-IIe-NetraCT-40.xml
+ $(LN) $(ROOTPROFILE)/platform_SUNW,UltraSPARC-IIi-Netract.xml \
+ $(ROOTPROFILE)/platform_SUNW,UltraSPARC-IIe-NetraCT-60.xml
+
+$(ROOTPROFILE)/%: %
+ $(INS.file)
+
+all lint clobber clean _msg:
diff --git a/usr/src/cmd/svc/profile/README b/usr/src/cmd/svc/profile/README
new file mode 100644
index 0000000000..6e93615b5f
--- /dev/null
+++ b/usr/src/cmd/svc/profile/README
@@ -0,0 +1,45 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+ Notes Regarding Modification of generic_open.xml
+
+Any changes made to generic_open.xml will need to be considered for
+inclusion in generic_limited_net.xml, the "Secure By Default" (see
+http://solsec.eng.sun.com/sbd/) profile. The details are discussed
+in PSARC/2004/781:
+
+ ...
+ The generic_limited_net profile explicitly disables all
+ smf(5) converted inetd services that are not required to
+ run the window system, SVM, or vold. It retains ssh and
+ X remote login as the remote login methods available.
+ ...
+
+In general, _any_ service that allows inbound net access should be
+added to generic_limited_net and disabled, unless its activation
+has been:approved by SBD.
diff --git a/usr/src/cmd/svc/profile/generic_limited_net.xml b/usr/src/cmd/svc/profile/generic_limited_net.xml
new file mode 100644
index 0000000000..c2dd9b6bed
--- /dev/null
+++ b/usr/src/cmd/svc/profile/generic_limited_net.xml
@@ -0,0 +1,269 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ The purpose of the limited_net profile is to provide a set of active
+ services that allow one to connect to the machine via ssh (requires
+ sshd,) to be authenticated (requires rpc,) and to access network
+ filesystems (requires nfs.) The services which are deactivated here
+ are those that are at odds with this goal. Those which are activated
+ are explicit requirements for the goal's satisfaction.
+
+ NOTE: Service profiles delivered by this package are not editable,
+ and their contents will be overwritten by package or patch
+ operations, including operating system upgrade. Make customizations
+ in a distinct file. The path, /var/svc/profile/site.xml, is a
+ distinguished location for a site-specific service profile, treated
+ otherwise equivalently to this file.
+-->
+<service_bundle type='profile' name='generic_limited_net'
+ xmlns:xi='http://www.w3.org/2003/XInclude' >
+ <!--
+ Include name service profile, as set by system id tools.
+ -->
+ <xi:include href='file:/var/svc/profile/name_service.xml' />
+
+ <!--
+ svc.startd(1M) services
+ -->
+ <service name='system/coreadm' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/cron' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/cryptosvc' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/identity' version='1' type='service'>
+ <instance name='domain' enabled='true'/>
+ </service>
+ <service name='system/keymap' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/picl' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/sac' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/system-log' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/utmp' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/zones' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/bind' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/name-service-cache' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/status' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/nlockmgr' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/client' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/server' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/rquota' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/ssh' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/smtp' version='1' type='service'>
+ <instance name='sendmail' enabled='true'/>
+ </service>
+ <service name='network/inetd' version='1' type='restarter'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/filesystem/autofs' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/power' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='application/print/cleanup' version='1' type='service'>
+ <instance name='default' enabled='true' />
+ </service>
+ <service name='network/pfil' version='1' type='service'>
+ <instance name='default' enabled='true' />
+ </service>
+
+ <!--
+ non-default svc.startd(1M) services disabled
+ -->
+ <service name='network/dhcp-server' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+ <service name='network/ntp' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+ <service name='network/rarp' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+ <service name='network/slp' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+ <service name='network/security/kadmin' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+ <service name='network/security/krb5_prop' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+ <service name='network/security/krb5kdc' version='1' type='service'>
+ <instance name='default' enabled='false' />
+ </service>
+
+ <!--
+ default inetd(1M) services disabled
+ -->
+ <service name='network/finger' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/ftp' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/login' version='1' type='service'>
+ <instance name='rlogin' enabled='false'/>
+ <!--
+ non-default inetd(1M) instances disabled
+ -->
+ <instance name='klogin' enabled='false'/>
+ <instance name='eklogin' enabled='false'/>
+ </service>
+ <service name='network/shell' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ <!--
+ non-default inetd(1M) instance disabled
+ -->
+ <instance name='kshell' enabled='false'/>
+ </service>
+ <service name='network/telnet' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+
+ <!--
+ non-default inetd(1M) services disabled
+ -->
+ <service name='network/tname' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/uucp' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/chargen' version='1' type='service'>
+ <instance name='stream' enabled='false'/>
+ <instance name='dgram' enabled='false'/>
+ </service>
+ <service name='network/daytime' version='1' type='service'>
+ <instance name='stream' enabled='false'/>
+ <instance name='dgram' enabled='false'/>
+ </service>
+ <service name='network/discard' version='1' type='service'>
+ <instance name='stream' enabled='false'/>
+ <instance name='dgram' enabled='false'/>
+ </service>
+ <service name='network/echo' version='1' type='service'>
+ <instance name='stream' enabled='false'/>
+ <instance name='dgram' enabled='false'/>
+ </service>
+ <service name='network/time' version='1' type='service'>
+ <instance name='stream' enabled='false'/>
+ <instance name='dgram' enabled='false'/>
+ </service>
+ <service name='network/comsat' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/rexec' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/talk' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+
+ <!--
+ default inetd(1M) RPC services enabled
+ -->
+ <service name='network/rpc/gss' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/mdcomm' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/meta' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/metamed' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/metamh' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/smserver' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/security/ktkt_warn' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+
+ <!--
+ default inetd(1M) RPC services disabled
+ -->
+ <service name='network/rpc/rstat' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/rpc/rusers' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+
+ <!--
+ non-default inetd(1M) RPC services disabled
+ -->
+ <service name='network/rpc/ocfserv' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/rpc/rex' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/rpc/spray' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/rpc/wall' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/generic_open.xml b/usr/src/cmd/svc/profile/generic_open.xml
new file mode 100644
index 0000000000..ccab05f9b9
--- /dev/null
+++ b/usr/src/cmd/svc/profile/generic_open.xml
@@ -0,0 +1,120 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ Default service profile, containing a typical set of active service
+ instances.
+
+ NOTE: Service profiles delivered by this package are not editable,
+ and their contents will be overwritten by package or patch
+ operations, including operating system upgrade. Make customizations
+ in a different file. The path, /var/svc/profile/site.xml, is a
+ distinguished location for a site-specific service profile, treated
+ otherwise equivalently to this file.
+-->
+<service_bundle type='profile' name='generic_open'
+ xmlns:xi='http://www.w3.org/2003/XInclude' >
+ <!--
+ Include name service profile, as set by system id tools.
+ -->
+ <xi:include href='file:/var/svc/profile/name_service.xml' />
+
+ <!--
+ svc.startd(1M) services
+ -->
+ <service name='system/coreadm' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/cron' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/cryptosvc' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/identity' version='1' type='service'>
+ <instance name='domain' enabled='true'/>
+ </service>
+ <service name='system/keymap' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/picl' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/sac' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/system-log' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/utmp' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/zones' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/bind' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/name-service-cache' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/status' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/nlockmgr' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/client' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nfs/server' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/ssh' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/smtp' version='1' type='service'>
+ <instance name='sendmail' enabled='true'/>
+ </service>
+ <service name='network/inetd' version='1' type='restarter'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/filesystem/autofs' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='system/power' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='application/print/cleanup' version='1' type='service'>
+ <instance name='default' enabled='true' />
+ </service>
+
+ <!--
+ Include inetd(1M) services profile.
+ -->
+ <xi:include href='file:/var/svc/profile/inetd_services.xml' />
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/inetd_generic.xml b/usr/src/cmd/svc/profile/inetd_generic.xml
new file mode 100644
index 0000000000..cb963c52d5
--- /dev/null
+++ b/usr/src/cmd/svc/profile/inetd_generic.xml
@@ -0,0 +1,89 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ Service profile to enable default inetd services.
+
+ NOTE: Service profiles delivered by this package are not editable,
+ and their contents will be overwritten by package or patch
+ operations, including operating system upgrade. Make customizations
+ in a distinct file.
+-->
+<service_bundle type='profile' name='default'>
+ <!--
+ inetd(1M) services
+ -->
+ <service name='network/finger' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/ftp' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/login' version='1' type='service'>
+ <instance name='rlogin' enabled='true'/>
+ </service>
+ <service name='network/shell' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/telnet' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+
+ <!--
+ inetd(1M) RPC services
+ -->
+ <service name='network/nfs/rquota' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/gss' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/mdcomm' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/meta' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/metamed' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/metamh' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/rstat' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/rusers' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/smserver' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/security/ktkt_warn' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/inetd_upgrade.xml b/usr/src/cmd/svc/profile/inetd_upgrade.xml
new file mode 100644
index 0000000000..e8f46260e0
--- /dev/null
+++ b/usr/src/cmd/svc/profile/inetd_upgrade.xml
@@ -0,0 +1,40 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ Service profile to enable inetd services following upgrade.
+
+ No services are listed because the inetd-upgrade service takes care
+ of any required enables on upgrade.
+
+ NOTE: Service profiles delivered by this package are not editable,
+ and their contents will be overwritten by package or patch
+ operations, including operating system upgrade. Make customizations
+ in a distinct file.
+-->
+<service_bundle type='profile' name='default'>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/ns_dns.xml b/usr/src/cmd/svc/profile/ns_dns.xml
new file mode 100644
index 0000000000..2de280b7a0
--- /dev/null
+++ b/usr/src/cmd/svc/profile/ns_dns.xml
@@ -0,0 +1,42 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Service profile to activate DNS.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='network/service' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/dns/client' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/ns_files.xml b/usr/src/cmd/svc/profile/ns_files.xml
new file mode 100644
index 0000000000..aeedfc4519
--- /dev/null
+++ b/usr/src/cmd/svc/profile/ns_files.xml
@@ -0,0 +1,36 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Service profile to activate local file-based name services.
+-->
+<service_bundle type='profile' name='default'>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/ns_ldap.xml b/usr/src/cmd/svc/profile/ns_ldap.xml
new file mode 100644
index 0000000000..41bc40dc82
--- /dev/null
+++ b/usr/src/cmd/svc/profile/ns_ldap.xml
@@ -0,0 +1,39 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Service profile to activate LDAP client name service.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='network/ldap/client' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/ns_nis.xml b/usr/src/cmd/svc/profile/ns_nis.xml
new file mode 100644
index 0000000000..ac7df8fe65
--- /dev/null
+++ b/usr/src/cmd/svc/profile/ns_nis.xml
@@ -0,0 +1,42 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Service profile to activate NIS client name service.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='network/rpc/keyserv' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/nis/client' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/ns_nisplus.xml b/usr/src/cmd/svc/profile/ns_nisplus.xml
new file mode 100644
index 0000000000..05b57e0d37
--- /dev/null
+++ b/usr/src/cmd/svc/profile/ns_nisplus.xml
@@ -0,0 +1,42 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Service profile to activate NIS+ client name service.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='network/rpc/keyserv' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='network/rpc/nisplus' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/ns_none.xml b/usr/src/cmd/svc/profile/ns_none.xml
new file mode 100644
index 0000000000..f1293e7429
--- /dev/null
+++ b/usr/src/cmd/svc/profile/ns_none.xml
@@ -0,0 +1,68 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Service profile to deactivate all network-dependent name services.
+-->
+<service_bundle type='profile' name='default'>
+ <!-- DNS -->
+ <service name='network/dns/client' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <!-- LDAP -->
+ <service name='network/ldap/client' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <!-- NIS, client and server -->
+ <service name='network/nis/client' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/nis/server' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/nis/passwd' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/nis/update' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <service name='network/nis/xfr' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <!-- NIS+ -->
+ <service name='network/rpc/nisplus' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+ <!-- supporting services for NIS/NIS+ -->
+ <service name='network/rpc/keyserv' version='1' type='service'>
+ <instance name='default' enabled='false'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-15000.xml b/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-15000.xml
new file mode 100644
index 0000000000..31a5e66a52
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-15000.xml
@@ -0,0 +1,46 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ SunFire 15000 platform service profile.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='system/cvc' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='platform/sun4u/dcs' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='platform/sun4u/efdaemon' version='1'
+ type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-880.xml b/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-880.xml
new file mode 100644
index 0000000000..8173cdd232
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire-880.xml
@@ -0,0 +1,44 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Profile for SunFire 880 and related platforms.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='platform/sun4u/sf880drd' version='1'
+ type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='platform/sun4u/efdaemon' version='1'
+ type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire.xml b/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire.xml
new file mode 100644
index 0000000000..050a633a42
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_SUNW,Sun-Fire.xml
@@ -0,0 +1,40 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Profile for SunFire and related platforms.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='platform/sun4u/efdaemon' version='1'
+ type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_SUNW,Ultra-Enterprise-10000.xml b/usr/src/cmd/svc/profile/platform_SUNW,Ultra-Enterprise-10000.xml
new file mode 100644
index 0000000000..fbb6c3d3a8
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_SUNW,Ultra-Enterprise-10000.xml
@@ -0,0 +1,42 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Ultra Enterprise 10000 platform service profile.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='system/cvc' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+ <service name='platform/sun4u/dcs' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_SUNW,UltraSPARC-IIi-Netract.xml b/usr/src/cmd/svc/profile/platform_SUNW,UltraSPARC-IIi-Netract.xml
new file mode 100644
index 0000000000..50ae263b90
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_SUNW,UltraSPARC-IIi-Netract.xml
@@ -0,0 +1,40 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Profile for NetraCT and related platforms.
+-->
+<service_bundle type='profile' name='default'>
+ <service name='platform/sun4u/efdaemon' version='1'
+ type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_i86pc.xml b/usr/src/cmd/svc/profile/platform_i86pc.xml
new file mode 100644
index 0000000000..3c196487c1
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_i86pc.xml
@@ -0,0 +1,39 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Platform service profile for the i86pc platform.
+-->
+<service_bundle type='profile' name='i86pc'>
+ <service name='platform/i86pc/eeprom' version='1' type='service'>
+ <instance name='default' enabled='true'/>
+ </service>
+</service_bundle>
diff --git a/usr/src/cmd/svc/profile/platform_none.xml b/usr/src/cmd/svc/profile/platform_none.xml
new file mode 100644
index 0000000000..855ca94c34
--- /dev/null
+++ b/usr/src/cmd/svc/profile/platform_none.xml
@@ -0,0 +1,36 @@
+<?xml version='1.0'?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ Use is subject to license terms.
+
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License, Version 1.0 only
+ (the "License"). You may not use this file except in compliance
+ with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ ident "%Z%%M% %I% %E% SMI"
+
+ NOTE: This service profile is not editable; its contents will be
+ overwritten by package or patch operations, including operating
+ system upgrade.
+
+ Default service profile, containing non-specific platform services.
+-->
+<service_bundle type='profile' name='default'>
+</service_bundle>
diff --git a/usr/src/cmd/svc/prophist/Makefile b/usr/src/cmd/svc/prophist/Makefile
new file mode 100644
index 0000000000..93289e9904
--- /dev/null
+++ b/usr/src/cmd/svc/prophist/Makefile
@@ -0,0 +1,80 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PROG = prophist
+OBJS = prophist.o \
+ manifest_hash.o
+LNTS = $(OBJS:%.o=%.ln)
+POFILES = $(OBJS:%.o=%.po)
+
+SRCS = prophist.c \
+ ../common/manifest_hash.c
+
+POFILES = $(OBJS:.o=.po)
+
+include ../../Makefile.cmd
+include ../Makefile.ctf
+
+ROOTCMDDIR= $(ROOT)/lib/svc/bin
+
+ROOTPROPHIST= $(ROOT)/var/svc/profile/prophist.SUNWcsr
+
+$(ROOTPROPHIST) := FILEMODE = 0444
+
+CPPFLAGS += -I../common
+LDLIBS += -lscf -luutil -lmd5
+CLOBBERFILES += $(POFILES)
+
+lint := LINTFLAGS = -mux
+
+.KEEP_STATE:
+
+all: $(PROG)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS) $(CTFMERGE_HOOK)
+ $(POST_PROCESS)
+
+install: all $(ROOTCMD) $(ROOTPROPHIST)
+
+clean:
+ $(RM) $(OBJS) $(POFILES) $(LNTS)
+
+lint: $(LNTS)
+ $(LINT.c) $(LINTFLAGS) $(LNTS) $(LDLIBS)
+
+%.o: ../common/%.c
+ $(COMPILE.c) $(OUTPUT_OPTION) $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+%.ln: ../common/%.c
+ $(LINT.c) $(OUTPUT_OPTION) -c $<
+
+$(ROOT)/var/svc/profile/%: %
+ $(INS.file)
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/prophist/prophist.SUNWcsr b/usr/src/cmd/svc/prophist/prophist.SUNWcsr
new file mode 100644
index 0000000000..44978057e8
--- /dev/null
+++ b/usr/src/cmd/svc/prophist/prophist.SUNWcsr
@@ -0,0 +1,700 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+# prophist.SUNWcsr - historical property corrections for ON
+#
+# For builds prior to S10 final product release, certain manifests were
+# delivered with incorrect property values or dependencies. This file
+# contains corrected values and, optionally for each property, a series
+# of previous default values which should be corrected.
+#
+# With the arrival of manifest merging support, this file's contents
+# should be treated as fixed.
+#
+# NB: prophist_upgrade calls that have a hyphen-prefixed prior value
+# must use -- at the head of the prior value sequence. Value arguments
+# with spaces require doubly-nested quoting.
+
+# milestone/
+
+prophist_upgrade milestone/single-user start timeout_seconds 1800 30
+prophist_delete_dependency milestone/single-user physical-network
+prophist_adddep svc:/milestone/single-user network service optional_all none \
+ svc:/milestone/network
+prophist_addprop svc:/milestone/single-user startd framework timeout_retry \
+ boolean: false
+prophist_delete_dependency milestone/single-user sysidtool
+prophist_adddep svc:/milestone/single-user milestone-devices \
+ service require_all none svc:/milestone/devices
+instance_refresh milestone/single-user:default
+instance_clear milestone/single-user:default
+instance_clear system/sysidtool:net
+instance_clear system/sysidtool:system
+
+prophist_upgrade milestone/multi-user start timeout_seconds 1800 3
+prophist_adddep svc:/milestone/multi-user kdmconfig service optional_all none \
+ svc:/platform/i86pc/kdmconfig:default
+prophist_addprop svc:/milestone/multi-user startd framework timeout_retry \
+ boolean: false
+/usr/sbin/svccfg -s milestone/multi-user addpropvalue milestones/entities \
+ svc:/milestone/sysconfig
+instance_refresh milestone/multi-user:default
+
+prophist_upgrade milestone/multi-user-server start timeout_seconds 1800 3
+prophist_upgrade milestone/multi-user-server multi-user restart_on none refresh
+prophist_addprop svc:/milestone/multi-user-server startd framework \
+ timeout_retry boolean: false
+instance_refresh milestone/multi-user-server
+
+prophist_delete_dependency milestone/name-services nis_server
+instance_refresh milestone/name-services
+
+# system/
+
+prophist_upgrade system/consadm start timeout_seconds 60 2
+
+prophist_upgrade system/console-login start timeout_seconds 3 0
+if /usr/bin/svcprop -Cqp ttymon svc:/system/console-login; then :; else
+ /usr/sbin/svccfg -s svc:/system/console-login <<\END
+ addpg ttymon application
+ setprop ttymon/device = astring: /dev/console
+ setprop ttymon/label = astring: console
+ setprop ttymon/timeout = count: 0
+ setprop ttymon/nohangup = boolean: true
+ setprop ttymon/modules = astring: ldterm,ttcompat
+ setprop ttymon/prompt = astring: "`uname -n` console login:"
+END
+
+ if [ "`/usr/bin/uname -p`" = "i386" ]; then
+ /usr/sbin/svccfg -s svc:/system/console-login \
+ setprop ttymon/terminal_type = astring: sun-color
+ else
+ /usr/sbin/svccfg -s svc:/system/console-login \
+ setprop ttymon/terminal_type = astring: sun
+ fi
+fi
+prophist_delete_dependency system/console-login sysidtool
+prophist_adddep svc:/system/console-login sysconfig service require_all none \
+ svc:/milestone/sysconfig
+instance_refresh system/console-login
+
+prophist_upgrade system/coreadm start timeout_seconds 60 3
+prophist_upgrade system/coreadm stop timeout_seconds 60 0
+
+prophist_upgrade system/cron start timeout_seconds 60 6
+prophist_upgrade system/cron stop timeout_seconds 60 3
+prophist_adddpt svc:/system/cron cron_multi-user optional_all none \
+ svc:/milestone/multi-user
+prophist_addprop svc:/system/cron general framework action_authorization \
+ astring: solaris.smf.manage.cron
+instance_refresh svc:/milestone/multi-user:default
+instance_refresh svc:/system/cron:default
+
+prophist_upgrade system/cryptosvc start exec "/usr/sbin/cryptoadm %m" \
+ /lib/svc/method/crypto
+prophist_upgrade system/cryptosvc stop exec "/usr/sbin/cryptoadm %m" :kill
+prophist_addmeth svc:/system/cryptosvc refresh "/usr/sbin/cryptoadm %m" 60
+prophist_adddpt svc:/system/cryptosvc cryptosvc_single optional_all none \
+ svc:/milestone/single-user
+instance_refresh svc:/milestone/single-user:default
+instance_refresh system/cryptosvc:default
+
+prophist_upgrade system/device/local start timeout_seconds 6000 600
+
+prophist_upgrade system/filesystem/autofs start timeout_seconds 60 6
+prophist_upgrade system/filesystem/autofs stop timeout_seconds 60 15
+prophist_adddpt svc:/system/filesystem/autofs autofs_multi-user \
+ optional_all none svc:/milestone/multi-user
+prophist_addprop svc:/system/filesystem/autofs application framework \
+ stability astring: Evolving
+prophist_addprop svc:/system/filesystem/autofs application framework \
+ auto_enable boolean: true
+prophist_addprop svc:/system/filesystem/autofs general framework \
+ action_authorization astring: solaris.smf.manage.autofs
+prophist_upgrade system/filesystem/autofs stop exec \
+ "/lib/svc/method/svc-autofs %m %{restarter/contract}" \
+ "/lib/svc/method/svc-autofs %m"
+instance_refresh svc:/system/filesystem/autofs:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_upgrade system/filesystem/minimal start timeout_seconds 0 30 3
+prophist_upgrade system/filesystem/local start timeout_seconds 0 30
+prophist_upgrade system/filesystem/usr start timeout_seconds 0 3
+prophist_upgrade system/filesystem/root start timeout_seconds 300 30
+
+prophist_delete_dependency system/fmd SUNfmd
+prophist_adddep svc:/system/fmd SUNWfmd path require_all none \
+ file://localhost/usr/lib/fm/fmd/fmd
+prophist_delete_dependency system/fmd startup
+prophist_adddep svc:/system/fmd startup_req service require_all none \
+ '("svc:/system/sysevent" "svc:/system/filesystem/minimal"' \
+ '"svc:/system/dumpadm")'
+prophist_adddep svc:/system/fmd startup_opt service optional_all none \
+ svc:/network/rpc/bind
+instance_refresh system/fmd:default
+
+prophist_delete_svc_pg system/identity domain tm_common_name
+prophist_delete_svc_pg system/identity domain tm_man_defaultdomain
+prophist_delete_svc_pg system/identity domain tm_man_domainname
+prophist_delete_svc_pg system/identity node tm_man_nodename
+
+prophist_upgrade system/mdmonitor start timeout_seconds 60 2
+prophist_upgrade system/mdmonitor stop timeout_seconds 60 2
+instance_refresh svc:/system/mdmonitor:default
+
+prophist_delete_dependency svc:/system/metainit usr
+prophist_adddpt svc:/system/metainit metainit-root optional_all none \
+ svc:/system/filesystem/root
+prophist_adddep svc:/system/metainit identity service require_all none \
+ svc:/system/identity:node
+prophist_upgrade system/metainit start timeout_seconds 180 10
+instance_refresh svc:/system/metainit:default
+
+prophist_addmeth svc:/system/manifest-import stop :true 3
+prophist_upgrade system/manifest-import start timeout_seconds 1800 3
+prophist_upgrade system/manifest-import stop timeout_seconds 3 -- -1
+instance_refresh svc:/system/manifest-import:default
+
+prophist_adddep svc:/system/name-service-cache filesystem \
+ service require_all none svc:/system/filesystem/minimal
+prophist_adddpt svc:/system/name-service-cache name-service-cache_multi-user \
+ optional_all none svc:/milestone/multi-user
+prophist_addprop svc:/system/name-service-cache general framework \
+ action_authorization astring: solaris.smf.manage.name-service-cache
+instance_refresh svc:/system/name-service-cache:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_upgrade system/picl start timeout_seconds 60 30
+prophist_upgrade system/picl stop timeout_seconds 60 30
+
+prophist_upgrade system/power start timeout_seconds 60 6
+prophist_upgrade system/power stop timeout_seconds 60 6
+prophist_adddpt svc:/system/power power_multi-user optional_all none \
+ svc:/milestone/multi-user
+prophist_addprop svc:/system/power general framework action_authorization \
+ astring: solaris.smf.manage.power
+instance_refresh svc:/system/power:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_upgrade system/rcap start timeout_seconds 60 3
+prophist_upgrade system/rcap refresh timeout_seconds 60 3
+prophist_upgrade system/rcap stop timeout_seconds 60 3
+prophist_adddpt svc:/system/rcap rcap_multi-user optional_all none \
+ svc:/milestone/multi-user
+instance_refresh svc:/system/rcap:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_upgrade system/rmtmpfiles start timeout_seconds 30 3
+
+prophist_delete_dependency system/sac single-user
+prophist_adddep svc:/system/sac sysconfig service require_all none \
+ svc:/milestone/sysconfig
+instance_refresh svc:/system/sac:default
+
+prophist_upgrade system/sysevent start timeout_seconds 60 2
+prophist_upgrade system/sysevent stop timeout_seconds 60 2
+prophist_upgrade system/sysevent stop exec \
+ "/lib/svc/method/svc-syseventd %m %{restarter/contract}" \
+ "/lib/svc/method/svc-syseventd %m"
+instance_refresh svc:/system/sysevent:default
+instance_clear svc:/system/sysevent:default
+
+prophist_addmeth svc:/system/system-log refresh ":kill -HUP" 60
+prophist_adddep svc:/system/system-log filesystem service require_all none \
+ svc:/system/filesystem/local
+prophist_upgrade system/system-log start timeout_seconds 600 3
+prophist_upgrade system/system-log stop timeout_seconds 60 3
+prophist_upgrade system/system-log refresh timeout_seconds 60 3
+prophist_addprop svc:/system/system-log general framework action_authorization \
+ astring: solaris.smf.manage.system-log
+prophist_upgrade system/system-log milestone entities \
+ svc:/milestone/sysconfig svc:/milestone/single-user
+prophist_adddep svc:/system/system-log autofs service optional_all none \
+ svc:/system/filesystem/autofs
+prophist_adddep svc:/system/system-log name-services service require_all none \
+ svc:/milestone/name-services
+instance_refresh svc:/system/system-log:default
+
+prophist_upgrade system/utmp milestone entities \
+ svc:/milestone/sysconfig svc:/milestone/single-user
+instance_refresh system/utmp
+
+# network/
+
+prophist_adddep svc:/network/initial devices service require_all none \
+ svc:/milestone/devices
+prophist_adddep svc:/network/initial filesystem service require_all none \
+ svc:/system/filesystem/usr
+prophist_upgrade network/initial start timeout_seconds 600 3
+prophist_delete_dependency network/initial loopback
+prophist_delete_dependency network/initial physical
+prophist_adddep svc:/network/initial network service optional_all none \
+ svc:/milestone/network
+instance_refresh network/initial:default
+
+prophist_upgrade network/loopback:default start timeout_seconds 60 3
+prophist_upgrade network/physical:default start timeout_seconds 600 3
+prophist_upgrade network/service start timeout_seconds 600 3
+
+prophist_adddep svc:/network/inetd filesystem service require_all error \
+ svc:/system/filesystem/local
+prophist_adddep svc:/network/inetd upgrade service optional_all none \
+ svc:/network/inetd-upgrade
+prophist_adddpt svc:/network/inetd inetd_multi-user optional_all none \
+ svc:/milestone/multi-user
+prophist_delete_dependency network/inetd physical
+prophist_adddep svc:/network/inetd network service optional_all error \
+ svc:/milestone/network
+/usr/sbin/svccfg -s network/inetd delpropvalue milestones/entities \
+ svc:/milestone/single-user
+/usr/sbin/svccfg -s network/inetd addpropvalue milestones/entities \
+ svc:/milestone/sysconfig
+instance_refresh network/inetd:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_delete_dependency svc:/network/inetd-upgrade network
+prophist_adddep svc:/network/inetd-upgrade filesystem \
+ service require_all error svc:/system/filesystem/local
+instance_refresh svc:/network/inetd-upgrade:default
+instance_clear svc:/network/inetd:default
+
+prophist_adddep svc:/network/ipfilter filesystem service require_all none \
+ svc:/system/filesystem/usr
+instance_refresh svc:/network/ipfilter:default
+
+prophist_delete_dependency network/dhcp-server milestone
+prophist_delete_dependency network/dhcp-server:default milestone
+prophist_adddep svc:/network/dhcp-server multi-user service require_all \
+ refresh svc:/milestone/multi-user
+prophist_adddpt svc:/network/dhcp-server dhcp_multi-user-server \
+ optional_all none svc:/milestone/multi-user-server
+instance_refresh network/dhcp-server:default
+instance_refresh svc:/milestone/multi-user-server:default
+instance_clear network/dhcp-server:default
+instance_clear milestone/multi-user:default
+instance_clear milestone/multi-user-server:default
+
+prophist_delete_dependency network/dns/client physical
+prophist_adddep svc:/network/dns/client network service optional_all error \
+ svc:/milestone/network
+instance_refresh network/dns/client:default
+
+prophist_upgrade network/ldap/client start timeout_seconds 120 60 30
+prophist_upgrade network/ldap/client stop timeout_seconds 60 10
+
+prophist_adddep svc:/network/nfs/cbd filesystem-minimal \
+ service require_all error svc:/system/filesystem/minimal
+prophist_addprop svc:/network/nfs/cbd application framework stability \
+ astring: Evolving
+prophist_addprop svc:/network/nfs/cbd application framework auto_enable \
+ boolean: true
+prophist_override network/nfs/cbd network entities "svc:/milestone/network"
+instance_refresh svc:/network/nfs/cbd
+
+prophist_adddep svc:/network/nfs/client nlockmgr service require_all error \
+ svc:/network/nfs/nlockmgr
+prophist_adddep svc:/network/nfs/client cbd service optional_all error \
+ svc:/network/nfs/cbd
+prophist_adddep svc:/network/nfs/client mapid service optional_all error \
+ svc:/network/nfs/mapid
+prophist_upgrade network/nfs/client start timeout_seconds 3600 60
+prophist_adddpt svc:/network/nfs/client nfs-client_multi-user \
+ optional_all none svc:/milestone/multi-user
+prophist_override network/nfs/client network entities "svc:/milestone/network"
+prophist_upgrade network/nfs/client stop timeout_seconds 60 600
+instance_refresh svc:/network/nfs/client:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_adddep svc:/network/nfs/mapid filesystem-minimal \
+ service require_all error svc:/system/filesystem/minimal
+prophist_addprop svc:/network/nfs/mapid application framework stability \
+ astring: Evolving
+prophist_addprop svc:/network/nfs/mapid application framework auto_enable \
+ boolean: true
+prophist_override network/nfs/mapid network entities "svc:/milestone/network"
+instance_refresh svc:/network/nfs/mapid:default
+
+prophist_adddep svc:/network/nfs/nlockmgr filesystem-minimal \
+ service require_all error svc:/system/filesystem/minimal
+prophist_addprop svc:/network/nfs/nlockmgr application framework stability \
+ astring: Evolving
+prophist_addprop svc:/network/nfs/nlockmgr application framework auto_enable \
+ boolean: true
+prophist_override network/nfs/nlockmgr network entities "svc:/milestone/network"
+instance_refresh svc:/network/nfs/nlockmgr:default
+
+prophist_addprop svc:/network/nfs/rquota inetd framework proto \
+ astring: datagram_v
+prophist_addprop svc:/network/nfs/rquota application framework stability \
+ astring: Evolving
+prophist_addprop svc:/network/nfs/rquota application framework auto_enable \
+ boolean: true
+instance_refresh svc:/network/nfs/rquota:default
+
+prophist_adddep svc:/network/nfs/server nlockmgr service require_all error \
+ svc:/network/nfs/nlockmgr
+prophist_adddep svc:/network/nfs/server mapid service optional_all error \
+ svc:/network/nfs/mapid
+prophist_upgrade network/nfs/server start timeout_seconds 3600 60
+prophist_upgrade network/nfs/server stop timeout_seconds 3600 60
+prophist_adddpt svc:/network/nfs/server nfs-server_multi-user-server \
+ optional_all none svc:/milestone/multi-user-server
+prophist_addprop svc:/network/nfs/server application framework stability \
+ astring: Evolving
+prophist_addprop svc:/network/nfs/server application framework auto_enable \
+ boolean: true
+prophist_override network/nfs/server network entities "svc:/milestone/network"
+prophist_upgrade network/nfs/server stop exec \
+ "/lib/svc/method/nfs-server %m %{restarter/contract}" \
+ "/lib/svc/method/nfs-server %m"
+instance_refresh svc:/network/nfs/server:default
+instance_refresh svc:/milestone/multi-user-server:default
+
+prophist_adddep svc:/network/nfs/status filesystem-local service require_all \
+ error svc:/system/filesystem/local
+prophist_addprop svc:/network/nfs/status application framework stability \
+ astring: Evolving
+prophist_addprop svc:/network/nfs/status application framework auto_enable \
+ boolean: true
+prophist_override network/nfs/status network entities "svc:/milestone/network"
+instance_refresh svc:/network/nfs/status:default
+
+prophist_upgrade network/nis/client start timeout_seconds 300 30 3
+prophist_upgrade network/nis/client stop timeout_seconds 60 30 3
+prophist_upgrade network/nis/client start exec \
+ /lib/svc/method/yp "/usr/lib/netsvc/yp/ypstart client"
+prophist_upgrade network/nis/client yp_server grouping \
+ optional_all exclude_all
+instance_refresh svc:/network/nis/client:default
+instance_clear svc:/network/nis/client:default
+
+prophist_upgrade svc:/network/nis/server start exec /lib/svc/method/yp \
+ "/usr/lib/netsvc/yp/ypstart server"
+prophist_upgrade network/nis/server start timeout_seconds 300 30 3
+prophist_upgrade network/nis/server stop timeout_seconds 60 30 3
+instance_refresh svc:/network/nis/server:default
+
+prophist_upgrade network/ntp start timeout_seconds 1800 6
+prophist_upgrade network/ntp stop timeout_seconds 60 3
+prophist_upgrade network/ntp start exec "/lib/svc/method/xntp" \
+ "/lib/svc/method/xntp start"
+prophist_upgrade network/ntp stop exec ":kill" \
+ "/lib/svc/method/xntp stop"
+/usr/sbin/svccfg -s network/ntp delpropvalue paths/entities \
+ "file://localhost/etc/inet/ntp.conf"
+prophist_adddpt svc:/network/ntp ntp_multi-user optional_all none \
+ svc:/milestone/multi-user
+instance_refresh svc:/network/ntp:default
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_upgrade network/pfil start exec \
+ "/lib/svc/method/pfil start" "/sbin/autopush -f /etc/ipf/pfil.ap"
+
+prophist_upgrade network/rarp start timeout_seconds 60 3
+prophist_upgrade network/rarp stop timeout_seconds 60 3
+prophist_adddpt svc:/network/rarp rarp_multi-user-server optional_all none \
+ svc:/milestone/multi-user-server
+prophist_delete_dependency network/rarp physical
+prophist_adddep svc:/network/rarp network service optional_all error \
+ svc:/milestone/network
+instance_refresh network/rarp:default
+instance_refresh svc:/milestone/multi-user-server:default
+
+prophist_delete_dependency svc:/network/rpc/bind refresh
+prophist_upgrade network/rpc/bind stop exec \
+ "/lib/svc/method/rpc-bind %m %{restarter/contract}" \
+ "/lib/svc/method/rpc-bind %m"
+instance_refresh network/rpc/bind:default
+
+prophist_upgrade network/rpc/bootparams start timeout_seconds 60 3
+prophist_upgrade network/rpc/bootparams stop timeout_seconds 60 3
+prophist_upgrade network/rpc/bootparams rpcbind restart_on restart error
+prophist_delete_dependency network/rpc/bootparams physical
+prophist_adddpt svc:/network/rpc/bootparams rpc-bootparams_multi-user-server \
+ optional_all none svc:/milestone/multi-user-server
+prophist_adddep svc:/network/rpc/bootparams network service require_all none \
+ svc:/milestone/network
+instance_refresh network/rpc/bootparams:default
+instance_refresh svc:/milestone/multi-user-server:default
+
+/usr/sbin/svcadm disable network/rpc/gss:ticotsord
+/usr/sbin/svccfg delete network/rpc/gss:ticotsord
+prophist_override network/rpc/gss inetd_start privileges \
+ "basic,!file_link_any,!proc_info,!proc_session,net_privaddr,file_chown,file_dac_read,file_dac_write"
+instance_refresh svc:/network/rpc/gss:default
+
+/usr/sbin/svcadm disable network/rpc/mdcomm:tcp6
+/usr/sbin/svcadm disable network/rpc/mdcomm:tcp
+/usr/sbin/svccfg delete network/rpc/mdcomm:tcp6
+/usr/sbin/svccfg delete network/rpc/mdcomm:tcp
+prophist_addprop svc:/network/rpc/mdcomm inetd framework proto astring: tcp
+prophist_override network/rpc/mdcomm inetd proto tcp
+instance_refresh svc:/network/rpc/mdcomm:default
+
+/usr/sbin/svcadm disable network/rpc/meta:tcp6
+/usr/sbin/svcadm disable network/rpc/meta:tcp
+/usr/sbin/svccfg delete network/rpc/meta:tcp6
+/usr/sbin/svccfg delete network/rpc/meta:tcp
+prophist_addprop svc:/network/rpc/meta inetd framework proto astring: tcp
+prophist_override network/rpc/meta inetd proto tcp
+instance_refresh svc:/network/rpc/meta:default
+
+/usr/sbin/svcadm disable network/rpc/metamed:tcp6
+/usr/sbin/svcadm disable network/rpc/metamed:tcp
+/usr/sbin/svccfg delete network/rpc/metamed:tcp6
+/usr/sbin/svccfg delete network/rpc/metamed:tcp
+prophist_addprop svc:/network/rpc/metamed inetd framework proto astring: tcp
+prophist_override network/rpc/metamed inetd proto tcp
+instance_refresh svc:/network/rpc/metamed:default
+
+/usr/sbin/svcadm disable network/rpc/metamh:tcp6
+/usr/sbin/svcadm disable network/rpc/metamh:tcp
+/usr/sbin/svccfg delete network/rpc/metamh:tcp6
+/usr/sbin/svccfg delete network/rpc/metamh:tcp
+prophist_addprop svc:/network/rpc/metamh inetd framework proto astring: tcp
+prophist_override network/rpc/metamh inetd proto tcp
+instance_refresh svc:/network/rpc/metamh:default
+
+prophist_addprop svc:/network/rpc/nisplus:default application application \
+ stability astring: Unstable
+prophist_addprop svc:/network/rpc/nisplus:default application application \
+ emulate_yp boolean: false
+instance_refresh svc:/network/rpc/nisplus:default
+
+prophist_upgrade network/security/kadmin start timeout_seconds 60 30
+prophist_upgrade network/security/kadmin stop timeout_seconds 60 30
+if /usr/bin/svcprop -Cqp start/use_profile svc:/network/security/kadmin; then
+ prophist_override network/security/kadmin start privileges \
+ "basic,!file_link_any,!proc_info,!proc_session,net_privaddr,proc_audit,file_dac_write"
+else
+ /usr/sbin/svccfg -s svc:/network/security/kadmin <<END
+ setprop start/working_directory = astring: :default
+ setprop start/project = astring: :default
+ setprop start/resource_pool = astring: :default
+ setprop start/use_profile = boolean: false
+ setprop start/user = astring: root
+ setprop start/group = astring: root
+ setprop start/supp_groups = astring: :default
+ setprop start/privileges = astring: \
+ basic,!file_link_any,!proc_info,!proc_session,net_privaddr,proc_audit,file_dac_write
+ setprop start/limit_privileges = astring: :default
+END
+fi
+instance_refresh svc:/network/security/kadmin:default
+
+prophist_upgrade network/security/krb5kdc start timeout_seconds 60 30
+prophist_upgrade network/security/krb5kdc stop timeout_seconds 60 30
+if /usr/bin/svcprop -Cqp start/use_profile svc:/network/security/krb5kdc; then
+ prophist_override network/security/krb5kdc start privileges \
+ "basic,!file_link_any,!proc_info,!proc_session,net_privaddr,proc_audit"
+else
+ /usr/sbin/svccfg -s svc:/network/security/krb5kdc <<END
+ setprop start/working_directory = astring: :default
+ setprop start/project = astring: :default
+ setprop start/resource_pool = astring: :default
+ setprop start/use_profile = boolean: false
+ setprop start/user = astring: root
+ setprop start/group = astring: root
+ setprop start/supp_groups = astring: :default
+ setprop start/privileges = astring: \
+ basic,!file_link_any,!proc_info,!proc_session,net_privaddr,proc_audit
+ setprop start/limit_privileges = astring: :default
+END
+fi
+instance_refresh svc:/network/security/krb5kdc:default
+
+if svcprop -q svc:/network/security/krb5_prop:tcp; then
+ /usr/sbin/svcadm disable svc:/network/security/krb5_prop:tcp
+ sleep 1
+ /usr/sbin/svccfg delete -f svc:/network/security/krb5_prop:tcp
+fi
+prophist_override network/security/krb5_prop inetd_start privileges \
+ "basic,!file_link_any,!proc_info,!proc_session"
+prophist_addprop svc:/network/security/krb5_prop inetd framework proto \
+ astring: tcp
+instance_refresh svc:/network/security/krb5_prop:default
+
+if svcprop -q svc:/network/security/ktkt_warn:ticotsord; then
+ /usr/sbin/svcadm disable svc:/network/security/ktkt_warn:ticotsord
+ sleep 1
+ /usr/sbin/svccfg delete -f svc:/network/security/ktkt_warn:ticotsord
+fi
+prophist_override network/security/ktkt_warn inetd_start privileges \
+ "basic,!file_link_any,!proc_info,!proc_session,proc_setid"
+prophist_addprop svc:/network/security/ktkt_warn inetd framework proto \
+ astring: ticotsord
+instance_refresh svc:/network/security/ktkt_warn:default
+
+prophist_addprop svc:/network/shell inetd framework proto \
+ astring: '("tcp" "tcp6only")'
+prophist_delete_dependency network/shell physical
+prophist_adddep svc:/network/shell network service optional_all error \
+ svc:/milestone/network
+instance_refresh network/shell:default
+instance_refresh network/shell:kshell
+
+prophist_upgrade network/slp start timeout_seconds 60 6
+prophist_upgrade network/slp stop timeout_seconds 60 3
+prophist_upgrade network/slp stop exec \
+ "/lib/svc/method/slp stop %{restarter/contract}" \
+ "/lib/svc/method/slp stop"
+prophist_delete_dependency network/slp physical
+prophist_adddep svc:/network/slp network service optional_all error \
+ svc:/milestone/network
+prophist_upgrade network/slp milestone entities \
+ svc:/milestone/sysconfig svc:/milestone/single-user
+instance_refresh network/slp:default
+
+prophist_upgrade network/smtp:sendmail start timeout_seconds 120 60 30
+prophist_upgrade network/smtp:sendmail stop timeout_seconds 60 30
+prophist_upgrade network/smtp:sendmail stop exec \
+ "/lib/svc/method/smtp-sendmail stop %{restarter/contract}" \
+ "/lib/svc/method/smtp-sendmail stop"
+prophist_upgrade network/smtp:sendmail refresh timeout_seconds 60 10
+prophist_upgrade network/smtp identity grouping optional_all require_all
+prophist_delete_pg svc:/network/smtp config-file
+prophist_adddep svc:/network/smtp:sendmail config-file \
+ path require_all refresh file://localhost/etc/mail/sendmail.cf
+prophist_delete_pg svc:/network/smtp nsswitch
+prophist_adddep svc:/network/smtp:sendmail nsswitch \
+ path require_all refresh file://localhost/etc/nsswitch.conf
+prophist_delete_pg svc:/network/smtp autofs
+prophist_adddep svc:/network/smtp:sendmail autofs service optional_all none \
+ svc:/system/filesystem/autofs
+prophist_delete_pg svc:/network/smtp start
+prophist_addmeth svc:/network/smtp:sendmail start \
+ "/lib/svc/method/smtp-sendmail start" 120
+prophist_delete_pg svc:/network/smtp stop
+prophist_addmeth svc:/network/smtp:sendmail stop \
+ "/lib/svc/method/smtp-sendmail stop" 60
+prophist_delete_pg svc:/network/smtp refresh
+prophist_addmeth svc:/network/smtp:sendmail refresh \
+ "/lib/svc/method/smtp-sendmail refresh" 60
+prophist_delete_svc_pg network/smtp sendmail tm_common_name
+prophist_delete_svc_pg network/smtp sendmail tm_man_sendmail
+prophist_adddpt svc:/network/smtp:sendmail smtp-sendmail_multi-user \
+ optional_all none svc:/milestone/multi-user
+prophist_addprop svc:/network/smtp:sendmail startd framework ignore_error \
+ astring: core,signal
+prophist_addprop svc:/network/smtp:sendmail general framework \
+ action_authorization astring: solaris.smf.manage.sendmail
+instance_refresh network/smtp:sendmail
+instance_refresh svc:/milestone/multi-user:default
+
+prophist_upgrade network/ssh start timeout_seconds 60 30
+prophist_upgrade network/ssh stop timeout_seconds 60 30
+prophist_upgrade network/ssh refresh timeout_seconds 60 30
+prophist_adddpt svc:/network/ssh ssh_multi-user-server optional_all none \
+ svc:/milestone/multi-user-server
+prophist_adddep svc:/network/ssh fs-local service require_all none \
+ svc:/system/filesystem/local
+prophist_adddep svc:/network/ssh fs-autofs service optional_all none \
+ svc:/system/filesystem/autofs
+prophist_adddep svc:/network/ssh net-loopback service require_all none \
+ svc:/network/loopback
+prophist_adddep svc:/network/ssh net-physical service require_all none \
+ svc:/network/physical
+prophist_adddep svc:/network/ssh utmp service require_all none \
+ svc:/system/utmp
+prophist_addprop svc:/network/ssh general framework action_authorization \
+ astring: solaris.smf.manage.ssh
+instance_refresh svc:/network/ssh:default
+instance_refresh svc:/milestone/multi-user-server:default
+
+# Add inetd_offline methods for some services
+for svc in chargen comsat daytime discard echo nfs/rquota rpc/gss rpc/rex \
+ rpc/ocfserv rpc/rstat rpc/rusers rpc/smserver rpc/spray rpc/wall \
+ security/ktkt_warn talk time tname; do
+ prophist_addmeth svc:/network/$svc inetd_offline :kill_process 0
+ instance_refresh svc:/network/$svc:default
+done
+
+# application/
+
+prophist_adddep svc:/application/print/cleanup filesystem \
+ service require_all none svc:/system/filesystem/minimal
+prophist_upgrade application/print/cleanup start timeout_seconds 60 10
+prophist_upgrade application/print/cleanup stop timeout_seconds 0 5 -- -1
+instance_refresh svc:/application/print/cleanup:default
+
+prophist_adddep svc:/application/print/server filesystem \
+ service require_all none svc:/system/filesystem/usr
+if /usr/bin/svcprop -Cqp lpsched svc:/application/print/server; then :; else
+ /usr/sbin/svccfg -s svc:/application/print/server <<END
+ addpg lpsched framework
+ setprop lpsched/num_notifiers = count: 0
+ setprop lpsched/num_filters = count: 0
+ setprop lpsched/fd_limit = count: 0
+ setprop lpsched/reserved_fds = count: 0
+END
+fi
+prophist_upgrade application/print/server start timeout_seconds 60 10
+prophist_upgrade application/print/server stop timeout_seconds 60 5
+prophist_adddpt svc:/application/print/server print-server_multi-user \
+ optional_all none svc:/milestone/multi-user
+prophist_addprop svc:/application/print/server general framework \
+ single_instance boolean: true
+prophist_adddep svc:/application/print/server fs-local service require_all \
+ none svc:/system/filesystem/local
+prophist_adddep svc:/application/print/server identity service require_all \
+ refresh svc:/system/identity:domain
+prophist_adddep svc:/application/print/server system-log service optional_all \
+ none svc:/system/system-log
+instance_refresh svc:/application/print/server:default
+
+# platform/
+
+if [ "`/usr/bin/uname -p`" = "i386" ]; then
+ # /i86pc/
+ prophist_upgrade platform/i86pc/eeprom start timeout_seconds 60 6
+
+ prophist_delete_dependency platform/i86pc/kdmconfig kdmconfig_console
+ prophist_delete_dependency system/console-login kdmconfig_console
+ instance_refresh platform/i86pc/kdmconfig
+else
+ # ! /i86pc/
+ prophist_upgrade system/cvc start timeout_seconds 60 5
+ prophist_upgrade system/cvc stop timeout_seconds 60 0
+
+ prophist_addprop svc:/platform/sun4u/dcs inetd framework proto \
+ astring: '("tcp" "tcp6only")'
+ prophist_addmeth svc:/platform/sun4u/dcs inetd_offline :kill_process 0
+ instance_refresh svc:/platform/sun4u/dcs:default
+
+ prophist_adddep svc:/platform/sun4u/mpxio-upgrade metainit \
+ service optional_all none svc:/system/metainit
+ instance_refresh svc:/platform/sun4u/mpxio-upgrade:default
+
+ prophist_upgrade platform/sun4u/sf880drd start timeout_seconds \
+ 60 3
+ prophist_upgrade platform/sun4u/sf880drd stop timeout_seconds \
+ 60 3
+ prophist_upgrade platform/sun4u/sf880drd milestone entities \
+ svc:/milestone/sysconfig svc:/milestone/single-user
+ instance_refresh platform/sun4u/sf880drd
+fi
diff --git a/usr/src/cmd/svc/prophist/prophist.c b/usr/src/cmd/svc/prophist/prophist.c
new file mode 100644
index 0000000000..8e115fb3e7
--- /dev/null
+++ b/usr/src/cmd/svc/prophist/prophist.c
@@ -0,0 +1,539 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * prophist - property history utility
+ *
+ * 1. Description
+ *
+ * During the development of smf(5), a set of service manifests were delivered
+ * that required subsequent changes. The bulk of these changes are in ON,
+ * although additional consolidations may possess one or two manifests that are
+ * affected. These incorrect values need to be smoothed into a correct
+ * configuration surface for subsequent automatic merge technology to be
+ * introduced safely. The mechanism is the combination of this utility with a
+ * set of "property history" files.
+ *
+ * /var/svc/profile/prophist.SUNWcsr is delivered as an immutable file by the
+ * SUNWcsr packages. prophist.SUNWcsr covers the entire ON consolidation, for
+ * the purposes of collecting in one place what is essentially a temporary
+ * construct. Other consolidations should deliver /var/svc/profile/prophist.*
+ * files.
+ *
+ * The processing of the property history files occurs in
+ * svc:/system/manifest-import:default. Each prophist.* file is checked against
+ * its hashed value in smf/manifest using the "hash" subcommand. If a change is
+ * detected, the prophist.* file is sourced. These operations are carried out
+ * prior to any manifest being imported.
+ *
+ * 2. Interface
+ *
+ * prophist presents a subcommand style interface, with various suboptions to
+ * each subcommand:
+ *
+ * prophist delete -e FMRI -g pg [-p prop]
+ * prophist upgrade -e FMRI -g pg -p prop -n newval oldval ...
+ * prophist overwrite -e FMRI -g pg -p prop -n newval
+ * prophist hash file
+ *
+ * The hash subcommand signals that a file requires processing using an exit
+ * status of 3. Otherwise, exit statuses of 0, 1, and 2 have their conventional
+ * meaning.
+ *
+ * 3. Limitations
+ *
+ * The present implementation has no support for multiply-valued properties.
+ * Manipulation of such properties should be done using a svccfg(1M) invocation
+ * in the appropriate prophist.* file.
+ */
+
+#include <sys/types.h>
+
+#include <assert.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <manifest_hash.h>
+
+#define OPTIONS_STR "e:g:n:p:"
+
+static int o_delete;
+static int o_hash;
+static int o_overwrite;
+
+static char *entity;
+static char *pgrp_name;
+static char *prop_name;
+static char *new_value;
+
+static scf_handle_t *hndl;
+static scf_service_t *svc;
+static scf_instance_t *inst;
+static scf_snapshot_t *snap;
+static scf_snaplevel_t *level;
+static scf_propertygroup_t *pg;
+static scf_property_t *prop;
+static scf_value_t *value;
+static scf_iter_t *iter;
+static scf_transaction_t *tx;
+static scf_transaction_entry_t *entry;
+
+static scf_type_t ptype;
+
+static char *valbuf;
+static ssize_t valbuf_sz;
+
+#define LG_BUFSIZ 1024 /* larger than a property name */
+static char namebuf[LG_BUFSIZ];
+
+static void
+usage()
+{
+ (void) fprintf(stderr, gettext(
+ "Usage:"
+ "\tprophist hash file\n"
+ "\tprophist delete -e FMRI -g pg [-p prop]\n"
+ "\tprophist overwrite -e FMRI -g pg -p prop -n newval\n"
+ "\tprophist upgrade -e FMRI -g pg -p prop -n newval oldval "
+ "...\n"));
+ exit(UU_EXIT_USAGE);
+}
+
+static void
+ready_scf_objects()
+{
+ if ((hndl = scf_handle_create(SCF_VERSION)) == NULL)
+ uu_die(gettext("handle creation failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ if (scf_handle_bind(hndl) != 0)
+ uu_die(gettext("handle bind failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ svc = scf_service_create(hndl);
+ inst = scf_instance_create(hndl);
+ snap = scf_snapshot_create(hndl);
+ level = scf_snaplevel_create(hndl);
+ pg = scf_pg_create(hndl);
+ prop = scf_property_create(hndl);
+ value = scf_value_create(hndl);
+ iter = scf_iter_create(hndl);
+ tx = scf_transaction_create(hndl);
+ entry = scf_entry_create(hndl);
+
+ if (svc == NULL ||
+ inst == NULL ||
+ snap == NULL ||
+ level == NULL ||
+ pg == NULL ||
+ prop == NULL ||
+ value == NULL ||
+ iter == NULL ||
+ tx == NULL ||
+ entry == NULL)
+ uu_die(gettext("object creation failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ valbuf_sz = 4096;
+ valbuf = malloc(valbuf_sz);
+ if (valbuf == NULL)
+ uu_die(gettext("value buffer allocation failed"));
+}
+
+static int
+hash(char *arg)
+{
+ char *pname;
+ char *errstr;
+ int ret;
+ uchar_t hash[16];
+
+ ready_scf_objects();
+
+ switch (ret = mhash_test_file(hndl, arg, 0, &pname, hash)) {
+ case 1:
+ /* Equivalent hash already stored. */
+ return (0);
+ case 0:
+ /* Hash differs. */
+ break;
+ case -1:
+ uu_die(gettext("mhash_test_file() failed"));
+ default:
+ uu_die(gettext("unknown return value (%d) from "
+ "mhash_test_file()"), ret);
+ }
+
+ if (mhash_store_entry(hndl, pname, hash, &errstr)) {
+ if (errstr)
+ uu_die(errstr);
+ else
+ uu_die(gettext("Unknown error from "
+ "mhash_store_entry()\n"));
+ }
+
+ return (3);
+}
+
+static int
+delete_prop(scf_propertygroup_t *pg, char *prop_name)
+{
+ if (scf_transaction_start(tx, pg) != 0)
+ uu_die(gettext("transaction start failed: %s\n"),
+ scf_strerror(scf_error()));
+ if (scf_transaction_property_delete(tx, entry, prop_name) != 0)
+ uu_die(gettext("transaction property delete failed: %s\n"),
+ scf_strerror(scf_error()));
+ if (scf_transaction_commit(tx) != 1)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * Returns 1 if target property group or property not found.
+ */
+static int
+delete_pg_or_prop(scf_iter_t *pg_iter, char *pgrp_name, char *prop_name)
+{
+ while (scf_iter_next_pg(pg_iter, pg) > 0) {
+ if (scf_pg_get_name(pg, namebuf, LG_BUFSIZ) == -1)
+ continue;
+
+ if (strcmp(namebuf, pgrp_name) != 0)
+ continue;
+
+ if (prop_name != NULL)
+ return (delete_prop(pg, prop_name));
+
+ if (scf_pg_delete(pg) != 0)
+ uu_die(gettext("property group delete failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * Remove property group or property from both service and instance.
+ */
+static int
+delete(char *entity, char *pgrp_name, char *prop_name)
+{
+ ready_scf_objects();
+
+ if (scf_handle_decode_fmri(hndl, entity, NULL, svc, inst, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) == 0) {
+ (void) scf_iter_instance_pgs(iter, inst);
+ return (delete_pg_or_prop(iter, pgrp_name, prop_name));
+ }
+
+ if (scf_handle_decode_fmri(hndl, entity, NULL, svc, NULL, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) == 0) {
+ (void) scf_iter_service_pgs(iter, svc);
+ return (delete_pg_or_prop(iter, pgrp_name, prop_name));
+ }
+
+ uu_die(gettext("%s not decoded: %s\n"), entity,
+ scf_strerror(scf_error()));
+
+ /*NOTREACHED*/
+}
+
+static void
+replace_value(scf_propertygroup_t *pg, char *prop_name, char *new_value)
+{
+ int result;
+ int ret;
+
+ do {
+ if (scf_pg_update(pg) == -1)
+ uu_die(gettext("property group update failed: %s\n"),
+ scf_strerror(scf_error()));
+ if (scf_transaction_start(tx, pg) != SCF_SUCCESS) {
+ if (scf_error() == SCF_ERROR_PERMISSION_DENIED)
+ uu_die(gettext("permission denied\n"));
+
+ uu_die(gettext("transaction start failed: %s\n"),
+ scf_strerror(scf_error()));
+ }
+
+ ret = scf_pg_get_property(pg, prop_name, prop);
+ if (ret == SCF_SUCCESS) {
+ if (scf_property_type(prop, &ptype) != SCF_SUCCESS)
+ uu_die(gettext("couldn't get property type\n"));
+ if (scf_transaction_property_change_type(tx, entry,
+ prop_name, ptype) == -1)
+ uu_die(gettext("couldn't change entry\n"));
+ } else if (scf_error() == SCF_ERROR_INVALID_ARGUMENT) {
+ uu_die(gettext("illegal property name\n"));
+ } else {
+ uu_die(gettext("property fetch failed\n"));
+ }
+
+ if (scf_value_set_from_string(value, ptype,
+ (const char *)new_value) != 0) {
+ assert(scf_error() == SCF_ERROR_INVALID_ARGUMENT);
+ uu_die(gettext("Invalid \"%s\" value \"%s\".\n"),
+ scf_type_to_string(ptype), new_value);
+ }
+
+ ret = scf_entry_add_value(entry, value);
+ if (ret != SCF_SUCCESS)
+ uu_die(gettext("scf_entry_add_value failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ assert(ret == SCF_SUCCESS);
+
+ result = scf_transaction_commit(tx);
+
+ scf_transaction_reset(tx);
+ scf_entry_destroy_children(entry);
+ } while (result == 0);
+
+ if (result < 0) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ uu_die(gettext("transaction commit failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ uu_die(gettext("permission denied\n"));
+ }
+}
+
+static scf_propertygroup_t *
+get_pg(char *entity, char *pgrp_name, char *prop_name)
+{
+ scf_propertygroup_t *targetpg;
+
+ ready_scf_objects();
+
+ if (scf_handle_decode_fmri(hndl, entity, NULL, svc, inst, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) == 0) {
+ /*
+ * 1. Working at the instance level. The instance level
+ * contains one special case: general/enabled is active in the
+ * current version, and its value in snapshots is not relevant.
+ * Otherwise, pull from running snapshot.
+ */
+ if (strcmp(pgrp_name, "general") == 0 &&
+ strcmp(prop_name, "enabled") == 0) {
+ if (scf_instance_get_pg(inst, pgrp_name, pg) == 0)
+ return (pg);
+
+ uu_die(gettext("property group %s not available: %s\n"),
+ pgrp_name, scf_strerror(scf_error()));
+ }
+
+ if (scf_instance_get_snapshot(inst, "running", snap) == -1) {
+ if (scf_instance_get_pg(inst, pgrp_name, pg) == 0)
+ return (pg);
+
+ uu_die(gettext("property group %s not available: %s\n"),
+ pgrp_name, scf_strerror(scf_error()));
+ }
+
+ if (scf_snapshot_get_base_snaplevel(snap, level) != 0)
+ uu_die(gettext("base snaplevel not available: %s\n"),
+ scf_strerror(scf_error()));
+
+ if (scf_snaplevel_get_pg(level, pgrp_name, pg) == -1)
+ uu_die(gettext("property group %s not available: %s\n"),
+ pgrp_name, scf_strerror(scf_error()));
+
+ targetpg = scf_pg_create(hndl);
+ if (scf_instance_get_pg(inst, pgrp_name, targetpg) == -1)
+ uu_die(gettext("property group %s not available: %s\n"),
+ pgrp_name, scf_strerror(scf_error()));
+
+ return (targetpg);
+ }
+
+ if (scf_handle_decode_fmri(hndl, entity, NULL, svc, NULL, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) == 0) {
+ /*
+ * 2. Working at the service level.
+ */
+ if (scf_service_get_pg(svc, pgrp_name, pg) == 0)
+ return (pg);
+
+ uu_die(gettext("property group %s not available: %s\n"),
+ pgrp_name, scf_strerror(scf_error()));
+ }
+
+ /*
+ * 3. Cannot decode either instance or service exactly.
+ */
+ uu_die(gettext("%s not decoded: %s\n"), entity,
+ scf_strerror(scf_error()));
+
+ /*NOTREACHED*/
+}
+
+static int
+upgrade(char *entity, char *pgrp_name, char *prop_name, char *new_value,
+ int argc, char *argv[], int optind)
+{
+ int replace = 0;
+ int vals = 0;
+ scf_propertygroup_t *targetpg;
+
+ targetpg = get_pg(entity, pgrp_name, prop_name);
+
+ if (scf_pg_get_property(targetpg, prop_name, prop) != 0)
+ uu_die(gettext("property %s/%s not available: %s\n"), pgrp_name,
+ prop_name, scf_strerror(scf_error()));
+
+ if (scf_iter_property_values(iter, prop) != 0)
+ uu_die(gettext("could not establish value iterator: %s\n"),
+ scf_strerror(scf_error()));
+
+ while (scf_iter_next_value(iter, value) == 1) {
+ if (scf_value_get_as_string(value, valbuf, valbuf_sz) < 0)
+ uu_die(gettext("string value get failed: %s\n"),
+ scf_strerror(scf_error()));
+
+ for (; optind < argc; optind++)
+ if (strcmp(valbuf, argv[optind]) == 0) {
+ replace = 1;
+ break;
+ }
+
+ vals++;
+ if (vals > 1)
+ uu_die(gettext("too many values to upgrade\n"));
+ }
+
+ if (replace)
+ replace_value(targetpg, prop_name, new_value);
+
+ return (0);
+}
+
+static int
+overwrite(char *entity, char *pgrp_name, char *prop_name, char *new_value)
+{
+ scf_propertygroup_t *targetpg;
+
+ targetpg = get_pg(entity, pgrp_name, prop_name);
+
+ if (scf_pg_get_property(targetpg, prop_name, prop) != 0)
+ uu_die(gettext("property %s/%s not available: %s\n"), pgrp_name,
+ prop_name, scf_strerror(scf_error()));
+
+ replace_value(targetpg, prop_name, new_value);
+
+ return (0);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int c;
+
+ if (argc < 2)
+ usage();
+
+ if (strcmp(argv[1], "hash") == 0)
+ o_hash = 1;
+ else if (strcmp(argv[1], "delete") == 0)
+ o_delete = 1;
+ else if (strcmp(argv[1], "overwrite") == 0)
+ o_overwrite = 1;
+ else if (strcmp(argv[1], "upgrade") != 0)
+ usage();
+
+ (void) uu_setpname(argv[0]);
+
+ argv++;
+ argc--;
+
+ while ((c = getopt(argc, argv, OPTIONS_STR)) != EOF) {
+ switch (c) {
+ case 'e':
+ entity = optarg;
+ break;
+ case 'g':
+ pgrp_name = optarg;
+ break;
+ case 'n':
+ new_value = optarg;
+ break;
+ case 'p':
+ prop_name = optarg;
+ break;
+ case '?':
+ default:
+ usage();
+ break;
+ }
+ }
+
+ if (o_hash) {
+ if (entity != NULL ||
+ pgrp_name != NULL ||
+ prop_name != NULL ||
+ new_value != NULL)
+ usage();
+
+ return (hash(argv[optind]));
+ }
+
+ if (entity == NULL)
+ usage();
+
+ if (o_delete) {
+ if (pgrp_name == NULL ||
+ new_value != NULL ||
+ optind < argc)
+ usage();
+
+ return (delete(entity, pgrp_name, prop_name));
+ }
+
+ if (pgrp_name == NULL || prop_name == NULL || new_value == NULL)
+ usage();
+
+ if (o_overwrite)
+ return (overwrite(entity, pgrp_name, prop_name, new_value));
+
+ if (optind >= argc)
+ usage();
+
+ return (upgrade(entity, pgrp_name, prop_name, new_value, argc, argv,
+ optind));
+}
diff --git a/usr/src/cmd/svc/req.flg b/usr/src/cmd/svc/req.flg
new file mode 100644
index 0000000000..ef651ef257
--- /dev/null
+++ b/usr/src/cmd/svc/req.flg
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+echo_file usr/src/cmd/svc/Makefile.ctf
+find_files "s.*" \
+ usr/src/cmd/svc/common \
+ usr/src/common/svc \
+ usr/src/lib/libscf \
+ usr/src/lib/libuutil
+
+exec_file usr/src/lib/libscf/inc.flg
+exec_file usr/src/lib/libuutil/inc.flg
+
+echo_file usr/src/lib/req.flg
+exec_file usr/src/lib/req.flg
diff --git a/usr/src/cmd/svc/seed/Makefile b/usr/src/cmd/svc/seed/Makefile
new file mode 100644
index 0000000000..784255d7f9
--- /dev/null
+++ b/usr/src/cmd/svc/seed/Makefile
@@ -0,0 +1,153 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+include ../../Makefile.cmd
+
+ETCSVC = $(ROOTETC)/svc
+LIBSVCSEED = $(ROOT)/lib/svc/seed
+
+#
+# Because seed repository construction requires a functioning repository, a
+# working svccfg(1) binary, and XML support, the following libraries must exist
+# on the build system or in the proto area: libscf, libuutil, and libxml2.
+#
+
+#
+# GLOBAL_ZONE_DESCRIPTIONS and NONGLOBAL_ZONE_DESCRIPTIONS contain the
+# services used to define a 'seed repository' for a standalone Solaris
+# instance or for a zone, respectively. A service needed for either one of
+# these seeds must be added to the appropriate macro. The definition of a seed
+# repository is a self-consistent set of services that can boot.
+#
+GLOBAL_ZONE_DESCRIPTIONS = \
+ ../milestone/boot-archive.xml \
+ ../milestone/console-login.xml \
+ ../milestone/datalink.xml \
+ ../milestone/datalink-init.xml \
+ ../milestone/devices-local.xml \
+ ../milestone/identity.xml \
+ ../milestone/local-fs.xml \
+ ../milestone/manifest-import.xml \
+ ../milestone/minimal-fs.xml \
+ ../milestone/multi-user-server.xml \
+ ../milestone/multi-user.xml \
+ ../milestone/name-services.xml \
+ ../milestone/aggregation.xml \
+ ../milestone/network-initial.xml \
+ ../milestone/network-loopback.xml \
+ ../milestone/network-physical.xml \
+ ../milestone/restarter.xml \
+ ../milestone/root-fs.xml \
+ ../milestone/single-user.xml \
+ ../milestone/usr-fs.xml \
+ ../../rpcbind/bind.xml \
+ ../../cmd-inet/usr.lib/inetd/inetd-upgrade.xml \
+ ../../utmpd/utmp.xml \
+ ../../lvm/util/metainit.xml \
+ ../../ipf/svc/pfil.xml
+
+NONGLOBAL_ZONE_DESCRIPTIONS = \
+ ../milestone/boot-archive.xml \
+ ../milestone/console-login.xml \
+ ../milestone/datalink.xml \
+ ../milestone/devices-local.xml \
+ ../milestone/identity.xml \
+ ../milestone/local-fs.xml \
+ ../milestone/manifest-import.xml \
+ ../milestone/minimal-fs.xml \
+ ../milestone/multi-user-server.xml \
+ ../milestone/multi-user.xml \
+ ../milestone/name-services.xml \
+ ../milestone/aggregation.xml \
+ ../milestone/network-initial.xml \
+ ../milestone/network-loopback.xml \
+ ../milestone/network-physical.xml \
+ ../milestone/restarter.xml \
+ ../milestone/root-fs.xml \
+ ../milestone/single-user.xml \
+ ../milestone/usr-fs.xml \
+ ../../rpcbind/bind.xml \
+ ../../utmpd/utmp.xml
+
+OWNER = root
+GROUP = sys
+FILEMODE = 0600
+SEEDFILEMODE = 0444 # seeds are not intended for editing, but may
+ # be copied
+
+CONFIGD = ../configd/svc.configd-native
+SVCCFG = ../svccfg/svccfg-native
+
+.KEEP_STATE:
+
+all: global.db nonglobal.db
+
+$(CONFIGD): FRC
+ @cd ../configd; pwd; $(MAKE) $(MFLAGS) native
+
+$(SVCCFG): FRC
+ @cd ../svccfg; pwd; $(MAKE) $(MFLAGS) native
+
+../milestone/console-login.xml:
+ @cd ../milestone; pwd; $(MAKE) $(MFLAGS) console-login.xml
+
+global.db: $(GLOBAL_ZONE_DESCRIPTIONS) $(CONFIGD) $(SVCCFG)
+ $(RM) -f global.db global.db-journal
+ for m in $(GLOBAL_ZONE_DESCRIPTIONS); do \
+ echo $$m; \
+ SVCCFG_DTD=../dtd/service_bundle.dtd.1 \
+ SVCCFG_REPOSITORY=$(SRC)/cmd/svc/seed/global.db \
+ SVCCFG_CONFIGD_PATH=$(CONFIGD) \
+ $(SVCCFG) import $$m; \
+ done
+
+nonglobal.db: $(NONGLOBAL_ZONE_DESCRIPTIONS) $(CONFIGD) $(SVCCFG)
+ $(RM) -f nonglobal.db global.db-journal
+ for m in $(NONGLOBAL_ZONE_DESCRIPTIONS); do \
+ echo $$m; \
+ SVCCFG_DTD=../dtd/service_bundle.dtd.1 \
+ SVCCFG_REPOSITORY=$(SRC)/cmd/svc/seed/nonglobal.db \
+ SVCCFG_CONFIGD_PATH=$(CONFIGD) \
+ $(SVCCFG) import $$m; \
+ done
+
+install: install_global install_nonglobal
+
+install_global: global.db
+ $(RM) $(LIBSVCSEED)/global.db
+ $(INS) -f $(LIBSVCSEED) -m $(SEEDFILEMODE) -s global.db
+
+install_nonglobal: nonglobal.db
+ $(RM) $(LIBSVCSEED)/nonglobal.db
+ $(INS) -f $(LIBSVCSEED) -m $(SEEDFILEMODE) -s nonglobal.db
+
+clean lint:
+
+clobber:
+ $(RM) global.db nonglobal.db
+
+FRC:
diff --git a/usr/src/cmd/svc/seed/inc.flg b/usr/src/cmd/svc/seed/inc.flg
new file mode 100644
index 0000000000..c8adc2a41f
--- /dev/null
+++ b/usr/src/cmd/svc/seed/inc.flg
@@ -0,0 +1,36 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+echo_file usr/src/cmd/cmd-inet/usr.lib/inetd/inetd-upgrade.xml
+echo_file usr/src/cmd/rpcbind/bind.xml
+echo_file usr/src/cmd/utmpd/utmp.xml
+echo_file usr/src/cmd/lvm/util/metainit.xml
+echo_file usr/src/cmd/lvm/md_monitord/mdmonitor.xml
+echo_file usr/src/cmd/ipf/svc/pfil.xml
+find_files "s.*.xml" usr/src/cmd/svc/milestone
diff --git a/usr/src/cmd/svc/shell/Makefile b/usr/src/cmd/svc/shell/Makefile
new file mode 100644
index 0000000000..b127962cf5
--- /dev/null
+++ b/usr/src/cmd/svc/shell/Makefile
@@ -0,0 +1,47 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+include ../../Makefile.cmd
+
+OWNER = root
+GROUP = bin
+FILEMODE = 0444
+
+SRCS = \
+ fs_include.sh \
+ krb_include.sh \
+ net_include.sh \
+ smf_include.sh
+
+SCRIPTS = $(SRCS:%=$(ROOT)/lib/svc/share/%)
+
+install: $(SCRIPTS)
+
+$(ROOT)/lib/svc/share/%: %
+ $(INS.file)
+
+all lint clobber clean _msg:
diff --git a/usr/src/cmd/svc/shell/fs_include.sh b/usr/src/cmd/svc/shell/fs_include.sh
new file mode 100644
index 0000000000..9d0b6d3512
--- /dev/null
+++ b/usr/src/cmd/svc/shell/fs_include.sh
@@ -0,0 +1,309 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+vfstab=${vfstab:=/etc/vfstab}
+
+#
+# readvfstab mount_point
+# -> (special, fsckdev, mountp, fstype, fsckpass, automnt, mntopts)
+#
+# A vfstab-like input stream is scanned for the mount point specified
+# as $1. Returns the fields of vfstab in the following shell
+# variables:
+#
+# special block device
+# fsckdev raw device
+# mountp mount point (must match $1, if found)
+# fstype file system type
+# fsckpass fsck(1M) pass number
+# automnt automount flag (yes or no)
+# mntopts file system-specific mount options.
+#
+# If the mount point can not be found in the standard input stream,
+# then all fields are set to empty values. This function assumes that
+# stdin is already set /etc/vfstab (or other appropriate input
+# stream).
+#
+readvfstab() {
+ while read special fsckdev mountp fstype fsckpass automnt mntopts; do
+ case "$special" in
+ '' ) # Ignore empty lines.
+ continue
+ ;;
+
+ '#'* ) # Ignore comment lines.
+ continue
+ ;;
+
+ '-') # Ignore "no-action" lines.
+ continue
+ ;;
+ esac
+
+ [ "x$mountp" = "x$1" ] && break
+ done
+}
+
+cecho() {
+ echo $*
+ echo $* >/dev/msglog
+}
+
+#
+# checkmessage raw_device fstype mountpoint
+# checkmessage2 raw_device fstype mountpoint
+#
+# Two simple auxilary routines to the shell function checkfs. Both
+# display instructions for a manual file system check.
+#
+checkmessage() {
+ cecho ""
+ cecho "WARNING - Unable to repair the $3 filesystem. Run fsck"
+ cecho "manually (fsck -F $2 $1)."
+ cecho ""
+}
+
+checkmessage2() {
+ cecho ""
+ cecho "WARNING - fatal error from fsck - error $4"
+ cecho "Unable to repair the $3 filesystem. Run fsck manually"
+ cecho "(fsck -F $2 $1)."
+ cecho ""
+}
+
+#
+# checkfs raw_device fstype mountpoint
+#
+# Check the file system specified. The return codes from fsck have the
+# following meanings.
+#
+# 0 file system is unmounted and okay
+# 32 file system is unmounted and needs checking (fsck -m only)
+# 33 file system is already mounted
+# 34 cannot stat device
+# 36 uncorrectable errors detected - terminate normally (4.1 code 8)
+# 37 a signal was caught during processing (4.1 exit 12)
+# 39 uncorrectable errors detected - terminate rightaway (4.1 code 8)
+# 40 for root, same as 0 (used here to remount root)
+#
+checkfs() {
+ # skip checking if the fsckdev is "-"
+ [ "x$1" = x- ] && return
+
+ # if fsck isn't present, it is probably because either the mount of
+ # /usr failed or the /usr filesystem is badly damanged. In either
+ # case, there is not much to be done automatically. Fail with
+ # a message to the user.
+
+ if [ ! -x /usr/sbin/fsck ]; then
+ cecho ""
+ cecho "WARNING - /usr/sbin/fsck not found. Most likely the"
+ cecho "mount of /usr failed or the /usr filesystem is badly"
+ cecho "damaged."
+ cecho ""
+ return 1
+ fi
+
+ /usr/sbin/fsck -F $2 -m $1 >/dev/null 2>&1
+
+ if [ $? -ne 0 ]; then
+ # Determine fsck options by file system type
+ case $2 in
+ ufs) foptions="-o p"
+ ;;
+ *) foptions="-y"
+ ;;
+ esac
+
+ cecho "The $3 file system ($1) is being checked."
+ /usr/sbin/fsck -F $2 $foptions $1
+
+ case $? in
+ 0|40) # File system OK
+ ;;
+
+ 1|34|36|37|39) # couldn't fix the file system - fail
+ checkmessage "$1" "$2" "$3"
+ return 1
+ ;;
+
+ *) # fsck child process killed (+ error code 35)
+ checkmessage2 "$1" "$2" "$3" "$?"
+ return 1
+ ;;
+ esac
+ fi
+
+ return 0
+}
+
+#
+# checkopt option option-string
+# -> ($option, $otherops)
+#
+# Check to see if a given mount option is present in the comma
+# separated list gotten from vfstab.
+#
+# Returns:
+# ${option} : the option if found the empty string if not found
+# ${otherops} : the option string with the found option deleted
+#
+checkopt() {
+ option=
+ otherops=
+
+ [ "x$2" = x- ] && return
+
+ searchop="$1"
+ set -- `IFS=, ; echo $2`
+
+ while [ $# -gt 0 ]; do
+ if [ "x$1" = "x$searchop" ]; then
+ option="$1"
+ else
+ if [ -z "$otherops" ]; then
+ otherops="$1"
+ else
+ otherops="${otherops},$1"
+ fi
+ fi
+ shift
+ done
+}
+
+#
+# hasopts $opts $allopts
+#
+# Check if all options from the list $opts are present in $allopts.
+# Both $opts and $allopts should be in comma separated format.
+#
+# Return 0 on success, and 1 otherwise.
+#
+hasopts() {
+ opts="$1"
+ allopts="$2"
+
+ set -- `IFS=, ; echo $opts`
+ while [ $# -gt 0 ]; do
+ if [ "$1" != "remount" ]; then
+ checkopt $1 $allopts
+ #
+ # Don't report errors if the filesystem is already
+ # read-write when mounting it as read-only.
+ #
+ [ -z "$option" ] && [ "$1" = "ro" ] && \
+ checkopt rw $allopts
+ [ -z "$option" ] && return 1
+ fi
+ shift
+ done
+ return 0
+}
+
+#
+# mounted $path $fsopts $fstype
+#
+# Check whether the specified file system of the given type is currently
+# mounted with all required filesystem options by going through /etc/mnttab
+# in our standard input.
+#
+# Return values:
+# 0 Success.
+# 1 The filesystem is not currently mounted, or mounted without required
+# options, or a filesystem of a different type is mounted instead.
+#
+mounted() {
+ path="$1"
+ fsopts="$2"
+ fstype="$3"
+
+ while read mntspec mntpath mnttype mntopts on; do
+ [ "$mntpath" = "$path" ] || continue
+ [ "$fstype" != "-" ] && [ "$mnttype" != "$fstype" ] && return 1
+ [ "$fsopts" = "-" ] && return 0
+ hasopts $fsopts $mntopts && return 0
+ done
+ return 1
+}
+
+#
+# mountfs $opts $path $type $fsopts $special
+#
+# Try to mount a filesystem. If failed, display our standard error
+# message on the console and print more details about what happened
+# to our service log.
+#
+# Arguments:
+# $opts - options for mount(1M) [optional]
+# $path - mount point
+# $type - file system type [optional]
+# $fsopts - file system specific options (-o) [optional]
+# $special - device on which the file system resides [optional]
+#
+# Return codes:
+# 0 - success.
+# otherwise - error code returned by mount(1M).
+#
+mountfs() {
+ opts="$1"
+ path="$2"
+ special="$5"
+
+ #
+ # Take care of optional arguments
+ #
+ [ "$opts" = "-" ] && opts=""
+ [ "$special" = "-" ] && special=""
+ [ "$3" = "-" ] && type=""
+ [ "$3" != "-" ] && type="-F $3"
+ [ "$4" = "-" ] && fsopts=""
+ [ "$4" != "-" ] && fsopts="-o $4"
+
+ cmd="/sbin/mount $opts $type $fsopts $special $path"
+ msg=`$cmd 2>&1`
+ err=$?
+
+ [ $err = 0 ] && return 0
+
+ #
+ # If the specified file system is already mounted with all
+ # required options, and has the same filesystem type
+ # then ignore errors and return success
+ #
+ mounted $path $4 $3 < /etc/mnttab && return 0
+
+ echo "ERROR: $SMF_FMRI failed to mount $path "\
+ "(see 'svcs -x' for details)" > /dev/msglog
+ echo "ERROR: $cmd failed, err=$err"
+ echo $msg
+ return $err
+}
diff --git a/usr/src/cmd/svc/shell/krb_include.sh b/usr/src/cmd/svc/shell/krb_include.sh
new file mode 100644
index 0000000000..4f16c23db3
--- /dev/null
+++ b/usr/src/cmd/svc/shell/krb_include.sh
@@ -0,0 +1,105 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#ident "%Z%%M% %I% %E% SMI"
+
+BINDIR=/usr/lib/krb5
+KDC_CONF_DIR=/etc/krb5
+
+# default kadm5.acl file path
+KADM5ACL=/etc/krb5/kadm5.acl
+
+# krb5kdc's default db path
+PRINCIPALDB=/var/krb5/principal
+
+# syslog facility.level
+OKFACLEV=daemon.notice
+ERRFACLEV=daemon.crit
+
+if [ ! -d $BINDIR ]; then # /usr not mounted
+ exit 1
+fi
+
+#return success (0) if an acl_file that is specified in the kdc.conf
+#(or if not specified, the default one), is configured ("_default_realm_"
+#replaced with the local realm name) else return failure (1).
+kadm5_acl_configed() {
+
+ #check acl_file relation values in the kdc.conf
+ ACLFILES=`awk -F= '/^[ ]*acl_file/ \
+ { printf("%s", $2) }' $KDC_CONF_DIR/kdc.conf`
+
+ if [ -z "$ACLFILES" ]; then
+ ACLFILES=$KADM5ACL
+ fi
+
+ for FILE in $ACLFILES; do
+ if [ -s $FILE ]; then
+ egrep -v '^[ ]*#' $FILE | \
+ egrep '_default_realm_' > /dev/null 2>&1
+ if [ $? -gt 0 ]; then
+ return 0
+ fi
+ fi
+ done
+
+ return 1
+}
+
+#return success (0) if a db exists in a path specified in kdc.conf or
+#if none are specified in the kdc.conf, check the default path else
+#return failure (1).
+db_exists() {
+
+ #check db names specified in the kdc.conf
+ DBNAMES=`awk -F= '/^[ ]*database_name/ \
+ { printf("%s", $2) }' $KDC_CONF_DIR/kdc.conf`
+
+ if [ -z "$DBNAMES" ]; then
+ #check default db path
+ DBNAMES=$PRINCIPALDB
+ fi
+
+ for DB in $DBNAMES; do
+ if [ -s $DB ]; then
+ return 0
+ elif [ -s ${DB}.db ]; then
+ #db suffix no longer needed
+ if mv ${DB}.db $DB; then
+ logger -p $OKFACLEV \
+ "$0; renamed ${DB}.db to $DB"
+ return 0
+ else
+ logger -p $ERRFACLEV \
+ "$0: rename of ${DB}.db to $DB FAILED"
+ return 1
+ fi
+ fi
+ done
+
+ return 1
+}
diff --git a/usr/src/cmd/svc/shell/net_include.sh b/usr/src/cmd/svc/shell/net_include.sh
new file mode 100644
index 0000000000..d39e610345
--- /dev/null
+++ b/usr/src/cmd/svc/shell/net_include.sh
@@ -0,0 +1,583 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T.
+# All rights reserved.
+#
+
+#
+# shcat file
+# Simulates cat in sh so it doesn't need to be on the root filesystem.
+#
+shcat() {
+ while [ $# -ge 1 ]; do
+ while read i; do
+ echo "$i"
+ done < $1
+ shift
+ done
+}
+
+#
+# Inet_list, list of IPv4 interfaces.
+# Inet_plumbed, list of plumbed IPv4 interfaces.
+# Inet_failed, list of IPv4 interfaces that failed to plumb.
+# Inet6_list, list of IPv6 interfaces.
+# Inet6_plumbed, list of plumbed IPv6 interfaces.
+# Inet6_failed, list of IPv6 interfaces that failed to plumb.
+#
+unset inet_list inet_plumbed inet_failed \
+ inet6_list inet6_plumbed inet6_failed
+#
+# get_physical interface
+#
+# Return physical interface corresponding to the given logical
+# interface.
+#
+get_physical()
+{
+ ORIGIFS="$IFS"
+ IFS="${IFS}:"
+ set -- $1
+ IFS="$ORIGIFS"
+
+ echo $1
+}
+
+#
+# get_logical interface
+#
+# Return logical interface number. Zero will be returned
+# if there is no explicit logical device number.
+#
+get_logical()
+{
+ ORIGIFS="$IFS"
+ IFS="${IFS}:"
+ set -- $1
+ IFS="$ORIGIFS"
+
+ if [ -z "$2" ]; then
+ echo 0
+ else
+ echo $2
+ fi
+}
+
+#
+# if_comp if1 if2
+#
+# Compare Interfaces. Do the physical interface names and logical interface
+# numbers match?
+#
+if_comp()
+{
+ [ "`get_physical $1`" = "`get_physical $2`" ] && \
+ [ `get_logical $1` -eq `get_logical $2` ]
+}
+
+#
+# physical_comp if1 if2
+#
+# Do the two devices share a physical interface?
+#
+physical_comp()
+{
+ [ "`get_physical $1`" = "`get_physical $2`" ]
+}
+
+#
+# in_list op item list
+#
+# Is "item" in the given list? Use "op" to do the test, applying it to
+# "item" and each member of the list in turn until it returns success.
+#
+in_list()
+{
+ op=$1
+ item=$2
+ shift 2
+
+ while [ $# -gt 0 ]; do
+ $op $item $1 && return 0
+ shift
+ done
+
+ return 1
+}
+
+#
+# get_group_from_hostname interface type
+#
+# Return all group settings from hostname file for a given interface.
+#
+# Example:
+# get_group_from_hostname hme0 inet
+#
+get_group_from_hostname()
+{
+ case "$2" in
+ inet) file=/etc/hostname.$1
+ ;;
+ inet6) file=/etc/hostname6.$1
+ ;;
+ *)
+ return
+ ;;
+ esac
+
+ [ -r "$file" ] || return
+
+ #
+ # Read through the hostname file looking for group settings
+ # There may be several group settings in the file. It is up
+ # to the caller to pick the right one (i.e. the last one).
+ #
+ while read line; do
+ [ -z "$line" ] && continue
+ /sbin/ifparse -s "$2" $line
+ done < "$file" | while read one two three; do
+ [ "$one" = "group" ] && echo "$two"
+ done
+}
+
+#
+# get_group_for_type interface type list
+#
+# Look through the set of hostname files associated with the same physical
+# interface as "interface", and determine which group they would configure.
+# Only hostname files associated with the physical interface or logical
+# interface zero are allowed to set the group.
+#
+get_group_for_type()
+{
+ physical=`get_physical $1`
+
+ type=$2
+ group=""
+
+ #
+ # The last setting of the group is the one that counts, which is
+ # the reason for the second while loop.
+ #
+ shift 2
+ while [ $# -gt 0 ]; do
+ if if_comp "$physical" $1; then
+ get_group_from_hostname $1 $type
+ fi
+ shift
+ done | while :; do
+ read next || {
+ echo "$group"
+ break
+ }
+ group="$next"
+ done
+}
+
+#
+# get_group interface [ configured | failed ]
+#
+# If there is both an inet and inet6 version of an interface, the group
+# could be set in either set of hostname files.
+#
+# Inet6 is configured after inet, so if the group is set in both
+# sets of hostname files, the inet6 file wins.
+#
+# The "configured" argument should be used to get the group for
+# an interface that has been plumbed into the stack and configured. Use
+# the "failed" argument to get the group for an interface that failed to
+# plumb.
+#
+get_group()
+{
+ group=""
+
+ case "$2" in
+ configured)
+ group=`get_group_for_type $1 inet6 $inet6_plumbed`
+ ;;
+ failed)
+ group=`get_group_for_type $1 inet6 $inet6_list`
+ ;;
+ *)
+ return
+ ;;
+ esac
+
+ if [ -z "$group" ]; then
+ if [ "$2" = configured ]; then
+ group=`get_group_for_type $1 inet $inet_plumbed`
+ else
+ group=`get_group_for_type $1 inet $inet_list`
+ fi
+ fi
+
+ echo $group
+}
+
+#
+# get_standby_from_hostname interface type
+#
+# Return any "standby" or "-standby" flags in the hostname file.
+#
+# Example:
+# get_standby_from_hostname hme0 inet6
+#
+#
+get_standby_from_hostname()
+{
+ case "$2" in
+ inet) file=/etc/hostname.$1
+ ;;
+ inet6) file=/etc/hostname6.$1
+ ;;
+ *)
+ return
+ ;;
+ esac
+
+ [ -r "$file" ] || return
+
+ #
+ # There may be several instances of the "standby" and
+ # "-standby" flags in the hostname file. It is up to
+ # the caller to pick the correct one.
+ #
+ while read line; do
+ [ -z "$line" ] && continue
+ /sbin/ifparse -s "$2" $line
+ done < "$file" | while read one two; do
+ [ "$one" = "standby" ] || [ "$one" = "-standby" ] \
+ && echo "$one"
+ done
+}
+
+#
+# get_standby_for_type interface type plumbed_list
+#
+# Look through the set of hostname files associated with the same physical
+# interface as "interface", and determine whether they would configure
+# the interface as a standby interface.
+#
+get_standby_for_type()
+{
+
+ physical=`get_physical $1`
+ type=$2
+
+ final=""
+
+ #
+ # The last "standby" or "-standby" flag is the one that counts,
+ # which is the reason for the second while loop.
+ #
+ shift 2
+ while [ $# -gt 0 ]; do
+ if [ "`get_physical $1`" = "$physical" ]; then
+ get_standby_from_hostname $1 $type
+ fi
+ shift
+ done | while :; do
+ read next || {
+ echo "$final"
+ break
+ }
+ final="$next"
+ done
+}
+
+#
+# is_standby interface
+#
+# Determine whether a configured interface is a standby interface.
+#
+# Both the inet and inet6 hostname file sets must be checked.
+# If "standby" or "-standby" is set in the inet6 hostname file set,
+# don't bother looking at the inet set.
+#
+is_standby()
+{
+ standby=`get_standby_for_type $1 inet6 $inet6_plumbed`
+
+ if [ -z "$standby" ]; then
+ standby=`get_standby_for_type $1 inet $inet_plumbed`
+ fi
+
+ # The return value is the value of the following test.
+ [ "$standby" = "standby" ]
+}
+
+#
+# get_alternate interface plumbed_list
+#
+# Look for a plumbed interface in the same group as "interface".
+# A standby interface is preferred over a non-standby interface.
+#
+# Example:
+# get_alternate hme0 $inet_plumbed
+#
+get_alternate()
+{
+ mygroup=`get_group $1 failed`
+ [ -z "$mygroup" ] && return
+
+ maybe=""
+
+ shift
+ while [ $# -gt 0 ]; do
+ group=`get_group $1 configured`
+ if [ "$group" = "$mygroup" ]; then
+ if is_standby $1; then
+ get_physical $1
+ return
+ else
+ [ -z "$maybe" ] && maybe=$1
+ fi
+ fi
+ shift
+ done
+
+ get_physical $maybe
+}
+
+#
+# doDHCPhostname interface
+# Pass to this function the name of an interface. It will return
+# true if one should enable the use of DHCP client-side host name
+# requests on the interface, and false otherwise.
+#
+doDHCPhostname()
+{
+ if [ -f /etc/dhcp.$1 ] && [ -f /etc/hostname.$1 ]; then
+ set -- `shcat /etc/hostname.$1`
+ [ $# -eq 2 -a "$1" = "inet" ]
+ return $?
+ fi
+ return 1
+}
+
+#
+# inet_process_hostname processor [ args ]
+#
+# Process an inet hostname file. The contents of the file
+# are taken from standard input. Each line is passed
+# on the command line to the "processor" command.
+# Command line arguments can be passed to the processor.
+#
+# Examples:
+# inet_process_hostname /sbin/ifconfig hme0 < /etc/hostname.hme0
+#
+# inet_process_hostname /sbin/ifparse -f < /etc/hostname.hme0
+#
+# If there is only line in an hostname file we assume it contains
+# the old style address which results in the interface being brought up
+# and the netmask and broadcast address being set.
+#
+# If there are multiple lines we assume the file contains a list of
+# commands to the processor with neither the implied bringing up of the
+# interface nor the setting of the default netmask and broadcast address.
+#
+# Return non-zero if any command fails so that the caller may alert
+# users to errors in the configuration.
+#
+inet_process_hostname()
+{
+ if doDHCPhostname $2; then
+ :
+ else
+ #
+ # Redirecting input from a file results in a sub-shell being
+ # used, hence this outer loop surrounding the "multiple_lines"
+ # and "ifcmds" variables.
+ #
+ while :; do
+ multiple_lines=false
+ ifcmds=""
+ retval=0
+
+ while read line; do
+ if [ -n "$ifcmds" ]; then
+ #
+ # This handles the first N-1
+ # lines of a N-line hostname file.
+ #
+ $* $ifcmds || retval=$?
+ multiple_lines=true
+ fi
+ ifcmds="$line"
+ done
+
+ #
+ # If the hostname file is empty or consists of only
+ # blank lines, break out of the outer loop without
+ # configuring the newly plumbed interface.
+ #
+ [ -z "$ifcmds" ] && return $retval
+ if [ $multiple_lines = false ]; then
+ # The traditional single-line hostname file.
+ ifcmds="$ifcmds netmask + broadcast + up"
+ fi
+
+ #
+ # This handles either the single-line case or
+ # the last line of the N-line case.
+ #
+ $* $ifcmds || return $?
+ return $retval
+ done
+ fi
+}
+
+#
+# inet6_process_hostname processor [ args ]
+#
+# Process an inet6 hostname file. The contents of the file
+# are taken from standard input. Each line is passed
+# on the command line to the "processor" command.
+# Command line arguments can be passed to the processor.
+#
+# Examples:
+# inet6_process_hostname /sbin/ifconfig hme0 inet6 < /etc/hostname6.hme0
+#
+# inet6_process_hostname /sbin/ifparse -f inet6 < /etc/hostname6.hme0
+#
+# Return non-zero if any of the commands fail so that the caller may alert
+# users to errors in the configuration.
+#
+inet6_process_hostname()
+{
+ retval=0
+ while read ifcmds; do
+ if [ -n "$ifcmds" ]; then
+ $* $ifcmds || retval=$?
+ fi
+ done
+ return $retval
+}
+
+#
+# Process interfaces that failed to plumb. Find an alternative
+# interface to host the addresses. For IPv6, only static addresses
+# defined in hostname6 files are moved, autoconfigured addresses are
+# not moved.
+#
+# Example:
+# move_addresses inet6
+#
+move_addresses()
+{
+ type="$1"
+ eval "failed=\"\$${type}_failed\""
+ eval "plumbed=\"\$${type}_plumbed\""
+ eval "list=\"\$${type}_list\""
+ process_hostname="${type}_process_hostname"
+ processed=""
+
+ if [ "$type" = inet ]; then
+ echo "moving addresses from failed IPv4 interfaces:\c"
+ zaddr="0.0.0.0"
+ hostpfx="/etc/hostname"
+ else
+ echo "moving addresses from failed IPv6 interfaces:\c"
+ zaddr="::"
+ hostpfx="/etc/hostname6"
+ fi
+
+ set -- $failed
+ while [ $# -gt 0 ]; do
+ in_list if_comp $1 $processed && { shift; continue; }
+
+ alternate="`get_alternate $1 $plumbed`"
+ if [ -z "$alternate" ]; then
+ in_list physical_comp $1 $processed || {
+ echo " $1 (couldn't move, no" \
+ "alternative interface)\c"
+ processed="$processed $1"
+ }
+ shift
+ continue
+ fi
+ #
+ # The hostname files are processed twice. In the first
+ # pass, we are looking for all commands that apply
+ # to the non-additional interface address. These may be
+ # scattered over several files. We won't know
+ # whether the address represents a failover address
+ # or not until we've read all the files associated with the
+ # interface.
+
+ # In the first pass through the hostname files, all
+ # additional logical interface commands are removed.
+ # The remaining commands are concatenated together and
+ # passed to ifparse to determine whether the
+ # non-additional logical interface address is a failover
+ # address. If it as a failover address, the
+ # address may not be the first item on the line,
+ # so we can't just substitute "addif" for "set".
+ # We prepend an "addif $zaddr" command, and let
+ # the embedded "set" command set the address later.
+ #
+ /sbin/ifparse -f $type `
+ for item in $list; do
+ if_comp $1 $item && \
+ $process_hostname /sbin/ifparse \
+ $type < $hostpfx.$item
+ done | while read three four; do
+ [ "$three" != addif ] && \
+ echo "$three $four \c"
+ done` | while read one two; do
+ [ -z "$one" ] && continue
+ line="addif $zaddr $one $two"
+ /sbin/ifconfig $alternate $type \
+ -standby $line >/dev/null
+ done
+
+ #
+ # In the second pass, look for the the "addif" commands
+ # that configure additional failover addresses. Addif
+ # commands are not valid in logical interface hostname
+ # files.
+ #
+ if [ "$1" = "`get_physical $1`" ]; then
+ $process_hostname /sbin/ifparse -f $type \
+ <$hostpfx.$1 | while read one two; do
+ [ "$one" = addif ] && \
+ /sbin/ifconfig $alternate $type -standby \
+ addif $two >/dev/null
+ done
+ fi
+
+ in_list physical_comp $1 $processed || {
+ echo " $1 (moved to $alternate)\c"
+ processed="$processed $1"
+ }
+ shift
+ done
+ echo "."
+}
diff --git a/usr/src/cmd/svc/shell/smf_include.sh b/usr/src/cmd/svc/shell/smf_include.sh
new file mode 100644
index 0000000000..a61bae426e
--- /dev/null
+++ b/usr/src/cmd/svc/shell/smf_include.sh
@@ -0,0 +1,159 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+smf_present () {
+ [ -r /etc/svc/volatile/repository_door ] && \
+ [ ! -f /etc/svc/volatile/repository_door ]
+}
+
+smf_clear_env () {
+ unset \
+ SMF_FMRI \
+ SMF_METHOD \
+ SMF_RESTARTER
+}
+
+# smf_console
+#
+# Use as "echo message 2>&1 | smf_console". If SMF_MSGLOG_REDIRECT is
+# unset, message will be displayed to console. SMF_MSGLOG_REDIRECT is
+# reserved for future use.
+#
+smf_console () {
+ /usr/bin/tee ${SMF_MSGLOG_REDIRECT:-/dev/msglog}
+}
+
+#
+# smf_netstrategy
+# -> (_INIT_NET_IF, _INIT_NET_STRATEGY)
+#
+# Sets _INIT_NET_IF to the name for the network-booted
+# interface if we are booting from the network. _INIT_NET_STRATEGY is
+# assigned the value of the current network configuration strategy.
+# Valid values for _INIT_NET_STRATEGY are "none", "dhcp", and "rarp".
+#
+# The network boot strategy for a zone is always "none".
+#
+smf_netstrategy () {
+ if [ "${_INIT_ZONENAME:=`/sbin/zonename`}" != "global" ]; then
+ _INIT_NET_STRATEGY="none" export _INIT_NET_STRATEGY
+ return 0
+ fi
+
+ set -- `/sbin/netstrategy`
+ if [ $? -eq 0 ]; then
+ [ "$1" = "nfs" -o "$1" = "cachefs" ] && \
+ _INIT_NET_IF="$2" export _INIT_NET_IF
+ _INIT_NET_STRATEGY="$3" export _INIT_NET_STRATEGY
+ else
+ return 1
+ fi
+}
+
+#
+# smf_kill_contract CONTRACT SIGNAL WAIT TIMEOUT
+#
+# To be called from stop methods of non-transient services.
+# Sends SIGNAL to the service contract CONTRACT. If the
+# WAIT argument is non-zero, smf_kill_contract will wait
+# until the contract is empty before returning, or until
+# TIMEOUT expires.
+#
+# Example, send SIGTERM to contract 200:
+#
+# smf_kill_contract 200 TERM
+#
+# Since killing a contract with pkill(1) is not atomic,
+# smf_kill_contract will continue to send SIGNAL to CONTRACT
+# every second until the contract is empty. This will catch
+# races between fork(2) and pkill(1).
+#
+# Returns 1 if the contract is invalid.
+# Returns 2 if WAIT is "1", TIMEOUT is > 0, and TIMEOUT expires.
+# Returns 0 on success.
+#
+smf_kill_contract() {
+
+ time_waited=0
+ time_to_wait=$4
+
+ [ -z "$time_to_wait" ] && time_to_wait=0
+
+ # Verify contract id is valid using pgrep
+ /usr/bin/pgrep -c $1 > /dev/null 2>&1
+ ret=$?
+ if [ $ret -gt 1 ] ; then
+ echo "Error, invalid contract \"$1\"" >&2
+ return 1
+ fi
+
+ # Return if contract is already empty.
+ [ $ret -eq 1 ] && return 0
+
+ # Kill contract.
+ /usr/bin/pkill -$2 -c $1
+ if [ $? -gt 1 ] ; then
+ echo "Error, could not kill contract \"$1\"" >&2
+ return 1
+ fi
+
+ # Return if WAIT is not set or is "0"
+ [ -z "$3" ] && return 0
+ [ "$3" -eq 0 ] && return 0
+
+ # If contract does not empty, keep killing the contract to catch
+ # any child processes missed because they were forking
+ /usr/bin/sleep 5
+ /usr/bin/pgrep -c $1 > /dev/null 2>&1
+ while [ $? -eq 0 ] ; do
+
+ time_waited=`/usr/bin/expr $time_waited + 5`
+
+ # Return if TIMEOUT was passed, and it has expired
+ [ "$time_to_wait" -gt 0 -a $time_waited -ge $time_to_wait ] && \
+ return 2
+ /usr/bin/pkill -$2 -c $1
+ /usr/bin/sleep 5
+ /usr/bin/pgrep -c $1 > /dev/null 2>&1
+ done
+
+ return 0
+}
+
+#
+# smf(5) method and monitor exit status definitions
+# SMF_EXIT_ERR_OTHER, although not defined, encompasses all non-zero
+# exit status values.
+#
+SMF_EXIT_OK=0
+SMF_EXIT_ERR_FATAL=95
+SMF_EXIT_ERR_CONFIG=96
+SMF_EXIT_MON_DEGRADE=97
+SMF_EXIT_MON_OFFLINE=98
+SMF_EXIT_ERR_NOSMF=99
+SMF_EXIT_ERR_PERM=100
diff --git a/usr/src/cmd/svc/startd/Makefile b/usr/src/cmd/svc/startd/Makefile
new file mode 100644
index 0000000000..3c9ad6e1e9
--- /dev/null
+++ b/usr/src/cmd/svc/startd/Makefile
@@ -0,0 +1,109 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+
+PROG = svc.startd
+OBJS = \
+ contract.o \
+ dict.o \
+ env.o \
+ expand.o \
+ file.o \
+ fork.o \
+ graph.o \
+ libscf.o \
+ log.o \
+ method.o \
+ misc.o \
+ protocol.o \
+ specials.o \
+ startd.o \
+ restarter.o \
+ wait.o \
+ utmpx.o
+
+ALLOBJS = $(OBJS) proc.o
+
+SRCS = $(OBJS:%.o=%.c)
+
+POFILES = $(ALLOBJS:%.o=%.po)
+
+include ../../Makefile.cmd
+include ../Makefile.ctf
+
+ROOTCMDDIR= $(ROOT)/lib/svc/bin
+
+CFLAGS += -v
+
+$(OBJS) := CPPFLAGS += \
+ -I. -I../common -D_REENTRANT -D_FILE_OFFSET_BITS=64
+
+$(POFILE) := CPPFLAGS += -I. -I../common
+
+proc.o := CPPFLAGS += -I. -I../common -D_REENTRANT
+
+LDLIBS += \
+ -lcontract \
+ -lkstat \
+ -lnvpair \
+ -lrestart \
+ -lscf \
+ -lsysevent \
+ -lumem \
+ -luutil
+
+FILEMODE = 0555
+OWNER = root
+GROUP = sys
+
+# lint doesn't like the unused _umem_*_init()
+# And lint thinks uadmin() is undefined.
+lint_SRCS := LINTFLAGS += -U_FILE_OFFSET_BITS -xerroff=E_NAME_DEF_NOT_USED2 -u
+lint_SRCS := CPPFLAGS += \
+ -I. -I../common -D_REENTRANT -D_FILE_OFFSET_BITS=64
+
+.KEEP_STATE:
+
+.PARALLEL: $(ALLOBJS)
+
+all: $(PROG)
+
+$(PROG): $(ALLOBJS)
+ $(LINK.c) -o $@ $(ALLOBJS) $(LDLIBS) $(CTFMERGE_HOOK)
+ $(POST_PROCESS)
+
+$(POFILE): $(POFILES)
+ cat $(POFILES) > $(POFILE)
+
+install: all $(ROOTCMD)
+
+clean:
+ $(RM) $(ALLOBJS)
+
+lint: lint_SRCS
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/startd/contract.c b/usr/src/cmd/svc/startd/contract.c
new file mode 100644
index 0000000000..d9eca7631a
--- /dev/null
+++ b/usr/src/cmd/svc/startd/contract.c
@@ -0,0 +1,373 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef _FILE_OFFSET_BITS
+#undef _FILE_OFFSET_BITS
+#endif /* _FILE_OFFSET_BITS */
+
+#include <sys/contract/process.h>
+#include <sys/ctfs.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libcontract.h>
+#include <libcontract_priv.h>
+#include <libuutil.h>
+#include <limits.h>
+#include <procfs.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "startd.h"
+
+void
+contract_abandon(ctid_t ctid)
+{
+ int err;
+
+ assert(ctid != 0);
+
+ err = contract_abandon_id(ctid);
+
+ if (err)
+ log_framework(LOG_NOTICE,
+ "failed to abandon contract %ld: %s\n", ctid,
+ strerror(err));
+}
+
+int
+contract_kill(ctid_t ctid, int sig, const char *fmri)
+{
+ if (sigsend(P_CTID, ctid, sig) == -1 && errno != ESRCH) {
+ log_error(LOG_WARNING,
+ "%s: Could not signal all contract members: %s\n", fmri,
+ strerror(errno));
+ return (-1);
+ }
+
+ return (0);
+}
+
+ctid_t
+contract_init()
+{
+ int psfd, csfd;
+ ctid_t ctid, configd_ctid = -1;
+ psinfo_t psi;
+ ct_stathdl_t s;
+ ctid_t *ctids;
+ uint_t nctids;
+ uint_t n;
+ int err;
+
+ /*
+ * 2. Acquire any contracts we should have inherited. First, find the
+ * contract we belong to, then get its status.
+ */
+ if ((psfd = open("/proc/self/psinfo", O_RDONLY)) < 0) {
+ log_error(LOG_WARNING, "Can not open /proc/self/psinfo; unable "
+ "to check to adopt contracts: %s\n", strerror(errno));
+ return (-1);
+ }
+
+ if (read(psfd, &psi, sizeof (psinfo_t)) != sizeof (psinfo_t)) {
+ log_error(LOG_WARNING, "Can not read from /proc/self/psinfo; "
+ "unable to adopt contracts: %s\n",
+ strerror(errno));
+ startd_close(psfd);
+ return (-1);
+ }
+
+ ctid = psi.pr_contract;
+
+ startd_close(psfd);
+
+ if ((csfd = contract_open(ctid, "process", "status", O_RDONLY)) < 0) {
+ log_error(LOG_WARNING, "Can not open containing contract "
+ "status; unable to adopt contracts: %s\n", strerror(errno));
+ return (-1);
+ }
+
+ /* 3. Go about adopting our member list. */
+
+ err = ct_status_read(csfd, CTD_ALL, &s);
+ startd_close(csfd);
+ if (err) {
+ log_error(LOG_WARNING, "Can not read containing contract "
+ "status; unable to adopt: %s\n", strerror(err));
+ return (-1);
+ }
+
+ if (err = ct_pr_status_get_contracts(s, &ctids, &nctids)) {
+ log_error(LOG_WARNING, "Can not get my inherited contracts; "
+ "unable to adopt: %s\n", strerror(err));
+ ct_status_free(s);
+ return (-1);
+ }
+
+ if (nctids == 0) {
+ /*
+ * We're booting, as a svc.startd which managed to fork a
+ * child will always have a svc.configd contract to adopt.
+ */
+ st->st_initial = 1;
+ ct_status_free(s);
+ return (-1);
+ }
+
+ /*
+ * We're restarting after an interruption of some kind.
+ */
+ log_framework(LOG_NOTICE, "restarting after interruption\n");
+ st->st_initial = 0;
+
+ /*
+ * 3'. Loop through the array, adopting them all where possible, and
+ * noting which one contains svc.configd (via a cookie vlaue of
+ * CONFIGD_COOKIE).
+ */
+ for (n = 0; n < nctids; n++) {
+ int ccfd;
+ ct_stathdl_t cs;
+
+ if ((ccfd = contract_open(ctids[n], "process", "ctl",
+ O_WRONLY)) < 0) {
+ log_error(LOG_WARNING, "Can not open contract %ld ctl "
+ "for adoption: %s\n", ctids[n], strerror(err));
+
+ continue;
+ }
+
+ if ((csfd = contract_open(ctids[n], "process", "status",
+ O_RDONLY)) < 0) {
+ log_error(LOG_WARNING, "Can not open contract %ld "
+ "status for cookie: %s\n", ctids[n], strerror(err));
+ startd_close(ccfd);
+
+ continue;
+ }
+
+ if (err = ct_ctl_adopt(ccfd)) {
+ log_error(LOG_WARNING, "Can not adopt contract %ld: "
+ "%s\n", ctids[n], strerror(err));
+ startd_close(ccfd);
+ startd_close(csfd);
+
+ continue;
+ }
+
+ startd_close(ccfd);
+
+ if (err = ct_status_read(csfd, CTD_COMMON, &cs)) {
+ log_error(LOG_WARNING, "Can not read contract %ld"
+ "status; unable to fetch cookie: %s\n", ctids[n],
+ strerror(err));
+
+ ct_status_free(cs);
+ startd_close(csfd);
+
+ continue;
+ }
+
+ if (ct_status_get_cookie(cs) == CONFIGD_COOKIE)
+ configd_ctid = ctids[n];
+
+ ct_status_free(cs);
+
+ startd_close(csfd);
+ }
+
+ ct_status_free(s);
+
+ return (configd_ctid);
+}
+
+int
+contract_is_empty(ctid_t ctid)
+{
+ int fd;
+ ct_stathdl_t ctstat;
+ pid_t *members;
+ uint_t num;
+ int ret;
+
+ fd = contract_open(ctid, "process", "status", O_RDONLY);
+ if (fd < 0)
+ return (1);
+
+ ret = ct_status_read(fd, CTD_ALL, &ctstat);
+ (void) close(fd);
+ if (ret != 0)
+ return (1);
+
+ ret = ct_pr_status_get_members(ctstat, &members, &num);
+ ct_status_free(ctstat);
+ if (ret != 0)
+ return (1);
+
+ if (num == 0)
+ return (1);
+ else
+ return (0);
+}
+
+typedef struct contract_bucket {
+ pthread_mutex_t cb_lock;
+ uu_list_t *cb_list;
+} contract_bucket_t;
+
+#define CI_HASH_SIZE 64
+#define CI_HASH_MASK (CI_HASH_SIZE - 1);
+
+/*
+ * contract_hash is a hash table of contract ids to restarter instance
+ * IDs. It can be used for quick lookups when processing contract events,
+ * because the restarter instance lock doesn't need to be held to access
+ * its entries.
+ */
+static contract_bucket_t contract_hash[CI_HASH_SIZE];
+
+static contract_bucket_t *
+contract_hold_bucket(ctid_t ctid)
+{
+ contract_bucket_t *bp;
+ int hash;
+
+ hash = ctid & CI_HASH_MASK;
+
+ bp = &contract_hash[hash];
+ MUTEX_LOCK(&bp->cb_lock);
+ return (bp);
+}
+
+static void
+contract_release_bucket(contract_bucket_t *bp)
+{
+ assert(PTHREAD_MUTEX_HELD(&bp->cb_lock));
+ MUTEX_UNLOCK(&bp->cb_lock);
+}
+
+static contract_entry_t *
+contract_lookup(contract_bucket_t *bp, ctid_t ctid)
+{
+ contract_entry_t *ce;
+
+ assert(PTHREAD_MUTEX_HELD(&bp->cb_lock));
+
+ if (bp->cb_list == NULL)
+ return (NULL);
+
+ for (ce = uu_list_first(bp->cb_list); ce != NULL;
+ ce = uu_list_next(bp->cb_list, ce)) {
+ if (ce->ce_ctid == ctid)
+ return (ce);
+ }
+
+ return (NULL);
+}
+
+static void
+contract_insert(contract_bucket_t *bp, contract_entry_t *ce)
+{
+ int r;
+
+ if (bp->cb_list == NULL)
+ bp->cb_list = startd_list_create(contract_list_pool, bp, 0);
+
+ uu_list_node_init(ce, &ce->ce_link, contract_list_pool);
+ r = uu_list_insert_before(bp->cb_list, NULL, ce);
+ assert(r == 0);
+}
+
+void
+contract_hash_init()
+{
+ int i;
+
+ for (i = 0; i < CI_HASH_SIZE; i++)
+ (void) pthread_mutex_init(&contract_hash[i].cb_lock,
+ &mutex_attrs);
+}
+
+void
+contract_hash_store(ctid_t ctid, int instid)
+{
+ contract_bucket_t *bp;
+ contract_entry_t *ce;
+
+ bp = contract_hold_bucket(ctid);
+ assert(contract_lookup(bp, ctid) == NULL);
+ ce = startd_alloc(sizeof (contract_entry_t));
+ ce->ce_ctid = ctid;
+ ce->ce_instid = instid;
+
+ contract_insert(bp, ce);
+
+ contract_release_bucket(bp);
+}
+
+void
+contract_hash_remove(ctid_t ctid)
+{
+ contract_bucket_t *bp;
+ contract_entry_t *ce;
+
+ bp = contract_hold_bucket(ctid);
+
+ ce = contract_lookup(bp, ctid);
+ if (ce != NULL) {
+ uu_list_remove(bp->cb_list, ce);
+ startd_free(ce, sizeof (contract_entry_t));
+ }
+
+ contract_release_bucket(bp);
+}
+
+/*
+ * int lookup_inst_by_contract()
+ * Lookup the instance id in the hash table by the contract id.
+ * Returns instid if found, -1 if not. Doesn't do a hold on the
+ * instance, so a check for continued existence is required.
+ */
+int
+lookup_inst_by_contract(ctid_t ctid)
+{
+ contract_bucket_t *bp;
+ contract_entry_t *ce;
+ int id = -1;
+
+ bp = contract_hold_bucket(ctid);
+ ce = contract_lookup(bp, ctid);
+ if (ce != NULL)
+ id = ce->ce_instid;
+ contract_release_bucket(bp);
+
+ return (id);
+}
diff --git a/usr/src/cmd/svc/startd/dict.c b/usr/src/cmd/svc/startd/dict.c
new file mode 100644
index 0000000000..2084d940ea
--- /dev/null
+++ b/usr/src/cmd/svc/startd/dict.c
@@ -0,0 +1,145 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * dict.c - simple dictionary facility
+ *
+ * We maintain a dictionary, sorted by name to facilitate rapid id lookup by
+ * name. It is used by both the restarter and graph code.
+ *
+ * Right now, the dictionary is implemented as a sorted linked list which maps
+ * instance names to graph vertex ids. It should eventually be converted to a
+ * better representation for quick lookups.
+ *
+ * For now, FMRIs are never deleted from the dictionary. A service deletion
+ * and insertion of the same instance FMRI will result in reuse of the same
+ * id. To implement dictionary entry delete, the locking strategy for graph
+ * vertex dependency linking must be checked for accuracy, as assumptions may
+ * exist that FMRI to id mapping is retained even after an instance is deleted.
+ */
+
+#include <sys/time.h>
+
+#include <assert.h>
+#include <libuutil.h>
+#include <string.h>
+
+#include "startd.h"
+
+static uu_list_pool_t *dict_pool;
+dictionary_t *dictionary;
+
+static u_longlong_t dictionary_lookups; /* number of lookups */
+static u_longlong_t dictionary_ns_total; /* nanoseconds spent */
+
+/*ARGSUSED*/
+static int
+dict_compare(const void *lc_arg, const void *rc_arg, void *private)
+{
+ const char *lc_name = ((const dict_entry_t *)lc_arg)->de_name;
+ const char *rc_name = ((const dict_entry_t *)rc_arg)->de_name;
+
+ return (strcmp(lc_name, rc_name));
+}
+
+int
+dict_lookup_byname(const char *name)
+{
+ int id;
+ dict_entry_t *entry, tmp;
+ hrtime_t now = gethrtime();
+
+ tmp.de_name = name;
+
+ (void) pthread_mutex_lock(&dictionary->dict_lock);
+ if ((entry = uu_list_find(dictionary->dict_list, &tmp, NULL,
+ NULL)) == NULL)
+ id = -1;
+ else
+ id = entry->de_id;
+
+ (void) pthread_mutex_unlock(&dictionary->dict_lock);
+
+ dictionary_lookups++;
+ dictionary_ns_total += gethrtime() - now;
+
+ return (id);
+}
+
+/*
+ * int dict_insert(char *)
+ * Returns the ID for name.
+ */
+int
+dict_insert(const char *name)
+{
+ dict_entry_t *entry, tmp;
+ uu_list_index_t idx;
+
+ assert(name != NULL);
+
+ tmp.de_name = name;
+
+ (void) pthread_mutex_lock(&dictionary->dict_lock);
+
+ if ((entry = uu_list_find(dictionary->dict_list, &tmp, NULL,
+ &idx)) != NULL) {
+ (void) pthread_mutex_unlock(&dictionary->dict_lock);
+ return (entry->de_id);
+ }
+
+ entry = startd_alloc(sizeof (dict_entry_t));
+
+ entry->de_id = dictionary->dict_new_id++;
+ entry->de_name = startd_alloc(strlen(name) + 1);
+ (void) strcpy((char *)entry->de_name, name);
+
+ uu_list_node_init(entry, &entry->de_link, dict_pool);
+
+ uu_list_insert(dictionary->dict_list, entry, idx);
+ (void) pthread_mutex_unlock(&dictionary->dict_lock);
+
+ return (entry->de_id);
+}
+
+void
+dict_init()
+{
+ dictionary = startd_zalloc(sizeof (dictionary_t));
+
+ (void) pthread_mutex_init(&dictionary->dict_lock, NULL);
+
+ dict_pool = startd_list_pool_create("dict", sizeof (dict_entry_t),
+ offsetof(dict_entry_t, de_link), dict_compare, UU_LIST_POOL_DEBUG);
+ assert(dict_pool != NULL);
+
+ dictionary->dict_new_id = 0;
+ dictionary->dict_list = startd_list_create(dict_pool, dictionary,
+ UU_LIST_SORTED);
+ assert(dictionary->dict_list != NULL);
+}
diff --git a/usr/src/cmd/svc/startd/env.c b/usr/src/cmd/svc/startd/env.c
new file mode 100644
index 0000000000..a6612f209a
--- /dev/null
+++ b/usr/src/cmd/svc/startd/env.c
@@ -0,0 +1,324 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <libuutil.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "startd.h"
+
+/*
+ * This file contains functions for setting the environment for
+ * processes started by svc.startd.
+ */
+
+#define MAXCMDL 512
+#define DEF_PATH "PATH=/usr/sbin:/usr/bin"
+
+static char *ENVFILE = "/etc/default/init"; /* Default env. */
+
+static char **glob_envp; /* Array of environment strings */
+static int glob_env_n; /* Number of environment slots allocated. */
+
+/*
+ * init_env()
+ * A clone of the work init.c does to provide as much compatibility
+ * for startup scripts as possible.
+ */
+void
+init_env()
+{
+ int i;
+ char line[MAXCMDL];
+ FILE *fp;
+ int inquotes, length, wslength;
+ char *tokp, *cp1, *cp2;
+ char **newp;
+
+ glob_env_n = 16;
+ glob_envp = startd_alloc(sizeof (*glob_envp) * glob_env_n);
+
+ glob_envp[0] = startd_alloc((unsigned)(strlen(DEF_PATH)+2));
+ (void) strcpy(glob_envp[0], DEF_PATH);
+
+ if ((fp = fopen(ENVFILE, "r")) == NULL) {
+ uu_warn("Cannot open %s. Environment not initialized.\n",
+ ENVFILE);
+
+ glob_envp[1] = NULL;
+ return;
+ }
+
+ i = 1;
+
+ while (fgets(line, MAXCMDL - 1, fp) != NULL) {
+ /*
+ * Toss newline
+ */
+ length = strlen(line);
+ if (line[length - 1] == '\n')
+ line[length - 1] = '\0';
+
+ /*
+ * Ignore blank or comment lines.
+ */
+ if (line[0] == '#' || line[0] == '\0' ||
+ (wslength = strspn(line, " \t\n")) == strlen(line) ||
+ strchr(line, '#') == line + wslength)
+ continue;
+
+ /*
+ * First make a pass through the line and change
+ * any non-quoted semi-colons to blanks so they
+ * will be treated as token separators below.
+ */
+ inquotes = 0;
+ for (cp1 = line; *cp1 != '\0'; cp1++) {
+ if (*cp1 == '"') {
+ if (inquotes == 0)
+ inquotes = 1;
+ else
+ inquotes = 0;
+ } else if (*cp1 == ';') {
+ if (inquotes == 0)
+ *cp1 = ' ';
+ }
+ }
+
+ /*
+ * Tokens within the line are separated by blanks
+ * and tabs. For each token in the line which
+ * contains a '=' we strip out any quotes and then
+ * stick the token in the environment array.
+ */
+ if ((tokp = strtok(line, " \t")) == NULL)
+ continue;
+
+ do {
+ cp1 = strchr(tokp, '=');
+ if (cp1 == NULL || cp1 == tokp)
+ continue;
+ length = strlen(tokp);
+ while ((cp1 = strpbrk(tokp, "\"\'")) != NULL) {
+ for (cp2 = cp1; cp2 < &tokp[length]; cp2++)
+ *cp2 = *(cp2 + 1);
+ length--;
+ }
+
+ /*
+ * init already started us with this umask, and we
+ * handled it in startd.c, so just skip it.
+ */
+ if (strncmp(tokp, "CMASK=", 6) == 0 ||
+ strncmp(tokp, "SMF_", 4) == 0)
+ continue;
+
+ glob_envp[i] = startd_alloc((unsigned)(length + 1));
+ (void) strcpy(glob_envp[i], tokp);
+
+ /*
+ * Double the environment size whenever it is
+ * full.
+ */
+ if (++i == glob_env_n) {
+ glob_env_n *= 2;
+ newp = startd_alloc(sizeof (*glob_envp) *
+ glob_env_n);
+ (void) memcpy(newp, glob_envp,
+ sizeof (*glob_envp) * glob_env_n / 2);
+ startd_free(glob_envp,
+ sizeof (*glob_envp) * glob_env_n / 2);
+ glob_envp = newp;
+ }
+ } while ((tokp = strtok(NULL, " \t")) != NULL);
+ }
+
+ startd_fclose(fp);
+
+ /* Append a null pointer to the environment array to mark its end. */
+ glob_envp[i] = NULL;
+}
+
+static int
+valid_env_var(const char *var, const restarter_inst_t *inst, const char *path)
+{
+ char *cp = strchr(var, '=');
+
+ if (cp == NULL || cp == var) {
+ if (inst != NULL)
+ log_instance(inst, B_FALSE, "Invalid environment "
+ "variable \"%s\".", var);
+ return (0);
+ } else if (strncmp(var, "SMF_", 4) == 0) {
+ if (inst != NULL)
+ log_instance(inst, B_FALSE, "Invalid environment "
+ "variable \"%s\"; \"SMF_\" prefix is reserved.",
+ var);
+ return (0);
+ } else if (path != NULL && strncmp(var, "PATH=", 5) == 0) {
+ return (0);
+ }
+
+ return (1);
+}
+
+static char **
+find_dup(const char *var, char **env, const restarter_inst_t *inst)
+{
+ char **p;
+ char *tmp;
+
+ for (p = env; *p != NULL; p++) {
+ assert((tmp = strchr(*p, '=')) != NULL);
+ tmp++;
+ if (strncmp(*p, var, tmp - *p) == 0)
+ break;
+ }
+
+ if (*p == NULL)
+ return (NULL);
+
+ if (inst != NULL)
+ log_instance(inst, B_FALSE, "Ignoring duplicate "
+ "environment variable \"%s\".", *p);
+ return (p);
+}
+
+/*
+ * Create an environment which is appropriate for spawning an SMF
+ * aware process. The new environment will consist of the values from
+ * the global environment as modified by the supplied (local) environment.
+ *
+ * In order to preserve the correctness of the new environment,
+ * various checks are performed on the local environment (init_env()
+ * is relied upon to ensure the global environment is correct):
+ *
+ * - All SMF_ entries are ignored. All SMF_ entries should be provided
+ * by this function.
+ * - Duplicates in the entry are eliminated.
+ * - Malformed entries are eliminated.
+ *
+ * Detected errors are logged as warnings to the appropriate instance
+ * logfile, since a single bad entry should not be enough to prevent
+ * an SMF_ functional environment from being created. The faulty entry
+ * is then ignored when building the environment.
+ *
+ * If env is NULL, then the return is an environment which contains
+ * all default values.
+ *
+ * If "path" is non-NULL, it will silently over-ride any previous
+ * PATH environment variable.
+ *
+ * NB: The returned env and strings are allocated using startd_alloc().
+ */
+char **
+set_smf_env(char **env, size_t env_sz, const char *path,
+ const restarter_inst_t *inst, const char *method)
+{
+ char **nenv;
+ char **p, **np;
+ size_t nenv_size;
+ size_t sz;
+
+ /*
+ * Max. of glob_env, env, three SMF_ variables,
+ * path, and terminating NULL.
+ */
+ nenv_size = glob_env_n + env_sz + 3 + 1 + 1;
+
+ nenv = startd_zalloc(sizeof (char *) * nenv_size);
+
+ np = nenv;
+
+ if (path != NULL) {
+ sz = strlen(path) + 1;
+ *np = startd_alloc(sz);
+ (void) strlcpy(*np, path, sz);
+ np++;
+ }
+
+
+ if (inst) {
+ sz = sizeof ("SMF_FMRI=") + strlen(inst->ri_i.i_fmri);
+ *np = startd_alloc(sz);
+ (void) strlcpy(*np, "SMF_FMRI=", sz);
+ (void) strlcat(*np, inst->ri_i.i_fmri, sz);
+ np++;
+ }
+
+ if (method) {
+ sz = sizeof ("SMF_METHOD=") + strlen(method);
+ *np = startd_alloc(sz);
+ (void) strlcpy(*np, "SMF_METHOD=", sz);
+ (void) strlcat(*np, method, sz);
+ np++;
+ }
+
+ sz = sizeof ("SMF_RESTARTER=") + strlen(SCF_SERVICE_STARTD);
+ *np = startd_alloc(sz);
+ (void) strlcpy(*np, "SMF_RESTARTER=", sz);
+ (void) strlcat(*np, SCF_SERVICE_STARTD, sz);
+ np++;
+
+ for (p = glob_envp; *p != NULL; p++) {
+ if (valid_env_var(*p, inst, path)) {
+ sz = strlen(*p) + 1;
+ *np = startd_alloc(sz);
+ (void) strlcpy(*np, *p, sz);
+ np++;
+ }
+ }
+
+ if (env) {
+ for (p = env; *p != NULL; p++) {
+ char **dup_pos;
+
+ if (!valid_env_var(*p, inst, path))
+ continue;
+
+ if ((dup_pos = find_dup(*p, nenv, inst)) != NULL) {
+ startd_free(*dup_pos, strlen(*dup_pos) + 1);
+ sz = strlen(*p) + 1;
+ *dup_pos = startd_alloc(sz);
+ (void) strlcpy(*dup_pos, *p, sz);
+ } else {
+ sz = strlen(*p) + 1;
+ *np = startd_alloc(sz);
+ (void) strlcpy(*np, *p, sz);
+ np++;
+ }
+ }
+ }
+ *np = NULL;
+
+ return (nenv);
+}
diff --git a/usr/src/cmd/svc/startd/expand.c b/usr/src/cmd/svc/startd/expand.c
new file mode 100644
index 0000000000..a919d42755
--- /dev/null
+++ b/usr/src/cmd/svc/startd/expand.c
@@ -0,0 +1,646 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <errno.h>
+
+#include "startd.h"
+
+/*
+ * Return an allocated copy of str, with the Bourne shell's metacharacters
+ * escaped by '\'. Returns NULL on (allocation) failure.
+ */
+static char *
+quote_for_shell(const char *str)
+{
+ const char *sp;
+ char *dst, *dp;
+ size_t dst_len;
+
+ const char * const metachars = ";&()|^<>\n \t\\\"\'`";
+
+ dst_len = 0;
+ for (sp = str; *sp != '\0'; ++sp) {
+ ++dst_len;
+
+ if (strchr(metachars, *sp) != NULL)
+ ++dst_len;
+ }
+
+ if (sp - str == dst_len)
+ return (safe_strdup(str));
+
+ dst = malloc(dst_len + 1);
+ if (dst == NULL)
+ return (NULL);
+
+ for (dp = dst, sp = str; *sp != '\0'; ++dp, ++sp) {
+ if (strchr(metachars, *sp) != NULL)
+ *dp++ = '\\';
+
+ *dp = *sp;
+ }
+ *dp = '\0';
+
+ return (dst);
+}
+
+/*
+ * Return an allocated string representation of the value v.
+ * Return NULL on error.
+ */
+static char *
+val_to_str(scf_value_t *v)
+{
+ char *buf;
+ ssize_t buflen, ret;
+
+ buflen = scf_value_get_as_string(v, NULL, 0);
+ assert(buflen >= 0);
+
+ buf = malloc(buflen + 1);
+ if (buf == NULL)
+ return (NULL);
+
+ ret = scf_value_get_as_string(v, buf, buflen + 1);
+ assert(ret == buflen);
+
+ return (buf);
+}
+
+/*
+ * Look up a property in the given snapshot, or the editing one
+ * if not found. Returns scf_error() on failure, or 0 otherwise.
+ */
+static int
+get_prop(const scf_instance_t *inst, scf_snapshot_t *snap,
+ const char *pgn, const char *pn, scf_propertygroup_t *pg,
+ scf_property_t *prop)
+{
+ int ret;
+
+ ret = scf_instance_get_pg_composed(inst, snap, pgn, pg);
+ if (ret != 0) {
+ snap = NULL;
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ ret = scf_instance_get_pg_composed(inst, snap, pgn, pg);
+ if (ret != 0)
+ return (scf_error());
+ }
+
+ if (scf_pg_get_property(pg, pn, prop) == 0)
+ return (0);
+
+ if (snap == NULL)
+ return (scf_error());
+
+ ret = scf_instance_get_pg_composed(inst, NULL, pgn, pg);
+ if (ret != 0)
+ return (scf_error());
+
+ if (scf_pg_get_property(pg, pn, prop) == 0)
+ return (0);
+
+ return (scf_error());
+}
+
+/*
+ * Get an allocated string representation of the values of the property
+ * specified by inst & prop_spec and store it in *retstr. prop_spec may
+ * be a full property FMRI, or a "property-group/property" pair relative
+ * to inst, or the name of a property in inst's "application" property
+ * group. In the latter two cases, the property is looked up in inst's
+ * snap snapshot. In the first case, the target instance's running
+ * snapshot will be used. In any case, if the property or its group
+ * can't be found, the "editing" snapshot will be checked. Multiple
+ * values will be separated by sep.
+ *
+ * On error, non-zero is returned, and *retstr is set to an error
+ * string.
+ *
+ * *retstr should always be freed by the caller.
+ */
+static int
+get_prop_val_str(const scf_instance_t *inst, scf_snapshot_t *snap,
+ const char *prop_spec, char sep, char **retstr)
+{
+ scf_handle_t *h = scf_instance_handle(inst);
+ scf_scope_t *scope = NULL;
+ scf_service_t *svc = NULL;
+ scf_instance_t *tmpinst = NULL;
+ scf_snapshot_t *tmpsnap = NULL;
+ scf_propertygroup_t *pg = NULL;
+ scf_iter_t *iter = NULL;
+ scf_property_t *prop = NULL;
+ scf_value_t *val = NULL;
+ char *spec;
+ char *str, *qstr;
+ size_t strl;
+ int ret;
+
+ spec = safe_strdup(prop_spec);
+
+ if (strstr(spec, ":properties") != NULL) {
+ const char *scn, *sn, *in, *pgn, *pn;
+
+ if (scf_parse_svc_fmri(spec, &scn, &sn, &in, &pgn,
+ &pn) != 0)
+ goto scferr;
+
+ if (sn == NULL || pgn == NULL || pn == NULL) {
+ free(spec);
+ *retstr = safe_strdup("parse error");
+ return (-1);
+ }
+
+ if ((scope = scf_scope_create(h)) == NULL ||
+ (svc = scf_service_create(h)) == NULL ||
+ (pg = scf_pg_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL)
+ goto scferr;
+
+ if (scf_handle_get_scope(h, scn == NULL ? SCF_SCOPE_LOCAL : scn,
+ scope) != 0)
+ goto properr;
+
+ if (scf_scope_get_service(scope, sn, svc) != 0)
+ goto properr;
+
+ if (in == NULL) {
+ if (scf_service_get_pg(svc, pgn, pg) != 0)
+ goto properr;
+ if (scf_pg_get_property(pg, pn, prop) != 0)
+ goto properr;
+ } else {
+ if ((tmpinst = scf_instance_create(h)) == NULL)
+ goto scferr;
+ if (scf_service_get_instance(svc, in, tmpinst) != 0)
+ goto properr;
+
+ tmpsnap = libscf_get_running_snapshot(tmpinst);
+ if (tmpsnap == NULL)
+ goto scferr;
+
+ if (get_prop(tmpinst, tmpsnap, pgn, pn, pg, prop) != 0)
+ goto properr;
+ }
+ } else {
+ char *slash, *pgn, *pn;
+
+ /* Try prop or pg/prop in inst. */
+
+ prop = scf_property_create(h);
+ if (prop == NULL)
+ goto scferr;
+
+ pg = scf_pg_create(h);
+ if (pg == NULL)
+ goto scferr;
+
+ slash = strchr(spec, '/');
+ if (slash == NULL) {
+ pgn = "application";
+ pn = spec;
+ } else {
+ *slash = '\0';
+ pgn = spec;
+ pn = slash + 1;
+ }
+
+ if (get_prop(inst, snap, pgn, pn, pg, prop) != 0)
+ goto properr;
+ }
+
+ iter = scf_iter_create(h);
+ if (iter == NULL)
+ goto scferr;
+
+
+ if (scf_iter_property_values(iter, prop) == -1)
+ goto scferr;
+
+ val = scf_value_create(h);
+ if (val == NULL)
+ goto scferr;
+
+ ret = scf_iter_next_value(iter, val);
+ if (ret == 0) {
+ *retstr = safe_strdup("");
+ goto out;
+ } else if (ret == -1) {
+ goto scferr;
+ }
+
+ str = val_to_str(val);
+ if (str == NULL)
+ goto err;
+
+ qstr = quote_for_shell(str);
+ free(str);
+ str = qstr;
+ if (qstr == NULL)
+ goto err;
+
+ strl = strlen(str);
+
+ while ((ret = scf_iter_next_value(iter, val)) == 1) {
+ char *nv, *qnv;
+ size_t nl;
+ void *p;
+
+ /* Append sep & val_to_str(val) to str. */
+
+ nv = val_to_str(val);
+ if (nv == NULL) {
+ free(str);
+ goto err;
+ }
+ qnv = quote_for_shell(nv);
+ free(nv);
+ if (qnv == NULL) {
+ free(str);
+ goto err;
+ }
+ nv = qnv;
+
+ nl = strl + 1 + strlen(nv);
+ p = realloc(str, nl + 1);
+ if (p == NULL) {
+ free(str);
+ free(nv);
+ goto err;
+ }
+ str = p;
+
+ str[strl] = sep;
+ (void) strcpy(&str[strl + 1], nv);
+
+ free(nv);
+
+ strl = nl;
+ }
+ if (ret == -1) {
+ free(str);
+ goto scferr;
+ }
+
+ *retstr = str;
+
+out:
+ scf_value_destroy(val);
+ scf_iter_destroy(iter);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ scf_instance_destroy(tmpinst);
+ scf_snapshot_destroy(tmpsnap);
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+ free(spec);
+ return (ret);
+scferr:
+ *retstr = safe_strdup(scf_strerror(scf_error()));
+ ret = -1;
+ goto out;
+properr:
+ ret = -1;
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ goto scferr;
+ *retstr = uu_msprintf("property \"%s\" not found", prop_spec);
+ if (*retstr != NULL)
+ goto out;
+err:
+ *retstr = safe_strdup(strerror(errno));
+ ret = -1;
+ goto out;
+}
+
+/*
+ * Interpret the token at the beginning of str (which should be just
+ * after the escape character), and set *retstr to point at it. Returns
+ * the number of characters swallowed. On error, this returns -1, and
+ * *retstr is set to an error string.
+ *
+ * *retstr should always be freed by the caller.
+ */
+static int
+expand_token(const char *str, scf_instance_t *inst, scf_snapshot_t *snap,
+ int method_type, char **retstr)
+{
+ scf_handle_t *h = scf_instance_handle(inst);
+
+ switch (str[0]) {
+ case 's': { /* service */
+ scf_service_t *svc;
+ char *sname;
+ ssize_t sname_len, szret;
+ int ret;
+
+ svc = scf_service_create(h);
+ if (svc == NULL) {
+ *retstr = safe_strdup(strerror(scf_error()));
+ return (-1);
+ }
+
+ ret = scf_instance_get_parent(inst, svc);
+ if (ret != 0) {
+ int err = scf_error();
+ scf_service_destroy(svc);
+ *retstr = safe_strdup(scf_strerror(err));
+ return (-1);
+ }
+
+ sname_len = scf_service_get_name(svc, NULL, 0);
+ if (sname_len < 0) {
+ int err = scf_error();
+ scf_service_destroy(svc);
+ *retstr = safe_strdup(scf_strerror(err));
+ return (-1);
+ }
+
+ sname = malloc(sname_len + 1);
+ if (sname == NULL) {
+ int err = scf_error();
+ scf_service_destroy(svc);
+ *retstr = safe_strdup(scf_strerror(err));
+ return (-1);
+ }
+
+ szret = scf_service_get_name(svc, sname, sname_len + 1);
+
+ if (szret < 0) {
+ int err = scf_error();
+ free(sname);
+ scf_service_destroy(svc);
+ *retstr = safe_strdup(scf_strerror(err));
+ return (-1);
+ }
+
+ scf_service_destroy(svc);
+ *retstr = sname;
+ return (1);
+ }
+
+ case 'i': { /* instance */
+ char *iname;
+ ssize_t iname_len, szret;
+
+ iname_len = scf_instance_get_name(inst, NULL, 0);
+ if (iname_len < 0) {
+ *retstr = safe_strdup(scf_strerror(scf_error()));
+ return (-1);
+ }
+
+ iname = malloc(iname_len + 1);
+ if (iname == NULL) {
+ *retstr = safe_strdup(strerror(errno));
+ return (-1);
+ }
+
+ szret = scf_instance_get_name(inst, iname, iname_len + 1);
+ if (szret < 0) {
+ free(iname);
+ *retstr = safe_strdup(scf_strerror(scf_error()));
+ return (-1);
+ }
+
+ *retstr = iname;
+ return (1);
+ }
+
+ case 'f': { /* fmri */
+ char *fmri;
+ ssize_t fmri_len;
+ int ret;
+
+ fmri_len = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH);
+ if (fmri_len == -1) {
+ *retstr = safe_strdup(scf_strerror(scf_error()));
+ return (-1);
+ }
+
+ fmri = malloc(fmri_len + 1);
+ if (fmri == NULL) {
+ *retstr = safe_strdup(strerror(errno));
+ return (-1);
+ }
+
+ ret = scf_instance_to_fmri(inst, fmri, fmri_len + 1);
+ if (ret == -1) {
+ free(fmri);
+ *retstr = safe_strdup(scf_strerror(scf_error()));
+ return (-1);
+ }
+
+ *retstr = fmri;
+ return (1);
+ }
+
+ case 'm': { /* method */
+ char *str = NULL;
+ switch (method_type) {
+ case METHOD_START:
+ str = "start";
+ break;
+ case METHOD_STOP:
+ str = "stop";
+ break;
+ case METHOD_REFRESH:
+ str = "refresh";
+ break;
+ default:
+ assert(0);
+ return (-1);
+ }
+ *retstr = safe_strdup(str);
+ return (1);
+ }
+
+ case 'r': /* restarter */
+ *retstr = safe_strdup("svc.startd");
+ return (1);
+
+ case '{': {
+ /* prop_spec[,:]? See get_prop_val_str() for prop_spec. */
+
+ char *close;
+ size_t len;
+ char *buf;
+ char sep;
+ int ret;
+ int skip;
+
+ close = strchr(str + 1, '}');
+ if (close == NULL) {
+ *retstr = safe_strdup("parse error");
+ return (-1);
+ }
+
+ len = close - (str + 1); /* between the {}'s */
+ skip = len + 2; /* including the {}'s */
+
+ /*
+ * If the last character is , or :, use it as the separator.
+ * Otherwise default to space.
+ */
+ sep = *(close - 1);
+ if (sep == ',' || sep == ':')
+ --len;
+ else
+ sep = ' ';
+
+ buf = malloc(len + 1);
+ if (buf == NULL) {
+ *retstr = safe_strdup(strerror(errno));
+ return (-1);
+ }
+
+ (void) strlcpy(buf, str + 1, len + 1);
+
+ ret = get_prop_val_str(inst, snap, buf, sep, retstr);
+
+ if (ret != 0) {
+ free(buf);
+ return (-1);
+ }
+
+ free(buf);
+ return (skip);
+ }
+
+ default:
+ *retstr = safe_strdup("unknown method token");
+ return (-1);
+ }
+}
+
+/*
+ * Expand method tokens in the given string, and place the result in
+ * *retstr. Tokens begin with the ESCAPE character. Returns 0 on
+ * success. On failure, returns -1 and an error string is placed in
+ * *retstr. Caller should free *retstr.
+ */
+#define ESCAPE '%'
+
+int
+expand_method_tokens(const char *str, scf_instance_t *inst,
+ scf_snapshot_t *snap, int method_type, char **retstr)
+{
+ char *expanded;
+ size_t exp_sz;
+ const char *sp;
+ int ei;
+
+ if (scf_instance_handle(inst) == NULL) {
+ *retstr = safe_strdup(scf_strerror(scf_error()));
+ return (-1);
+ }
+
+ exp_sz = strlen(str) + 1;
+ expanded = malloc(exp_sz);
+ if (expanded == NULL) {
+ *retstr = safe_strdup(strerror(errno));
+ return (-1);
+ }
+
+ /*
+ * Copy str into expanded, expanding %-tokens & realloc()ing as we go.
+ */
+
+ sp = str;
+ ei = 0;
+
+ for (;;) {
+ char *esc;
+ size_t len;
+
+ esc = strchr(sp, ESCAPE);
+ if (esc == NULL) {
+ (void) strcpy(expanded + ei, sp);
+ *retstr = expanded;
+ return (0);
+ }
+
+ /* Copy up to the escape character. */
+ len = esc - sp;
+
+ (void) strncpy(expanded + ei, sp, len);
+
+ sp += len;
+ ei += len;
+
+ if (sp[1] == '\0') {
+ expanded[ei] = '\0';
+ *retstr = expanded;
+ return (0);
+ }
+
+ if (sp[1] == ESCAPE) {
+ expanded[ei] = ESCAPE;
+
+ sp += 2;
+ ei++;
+ } else {
+ char *tokval;
+ int skip;
+ char *p;
+
+ skip = expand_token(sp + 1, inst, snap,
+ method_type, &tokval);
+ if (skip == -1) {
+ free(expanded);
+ *retstr = tokval;
+ return (-1);
+ }
+
+ len = strlen(tokval);
+ exp_sz += len;
+ p = realloc(expanded, exp_sz);
+ if (p == NULL) {
+ *retstr = safe_strdup(strerror(errno));
+ free(expanded);
+ free(tokval);
+ return (-1);
+ }
+ expanded = p;
+
+ (void) strcpy(expanded + ei, tokval);
+ sp += 1 + skip;
+ ei += len;
+
+ free(tokval);
+ }
+ }
+
+ /* NOTREACHED */
+}
diff --git a/usr/src/cmd/svc/startd/file.c b/usr/src/cmd/svc/startd/file.c
new file mode 100644
index 0000000000..0a70932dc0
--- /dev/null
+++ b/usr/src/cmd/svc/startd/file.c
@@ -0,0 +1,86 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * file.c - file dependency vertex code
+ *
+ * In principle, file dependencies should be retested on mount/unmount
+ * events, and dependency error flow used to determine whether a lost file
+ * affects the dependent service. If mount/unmount events are not available,
+ * the kstat facility (which registers or deregisters a statistic at
+ * mount/umount) could be used as an indirect filesystem event detector.
+ *
+ * In practice, file dependencies are checked only for existence at start
+ * time.
+ */
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+
+#include <startd.h>
+
+int
+file_ready(graph_vertex_t *v)
+{
+ char *fn;
+ struct stat sbuf;
+ int r;
+ char *file_fmri = v->gv_name;
+
+ /*
+ * Advance through file: FMRI until we have an absolute file path.
+ */
+ if (strncmp(file_fmri, "file:///", sizeof ("file:///") - 1) == 0) {
+ fn = file_fmri + sizeof ("file://") - 1;
+ } else if (strncmp(file_fmri, "file://localhost/",
+ sizeof ("file://localhost/") - 1) == 0) {
+ fn = file_fmri + sizeof ("file://localhost") - 1;
+ } else if (strncmp(file_fmri, "file://", sizeof ("file://") - 1)
+ == 0) {
+ fn = file_fmri + sizeof ("file://") - 1;
+
+ /*
+ * Again, search for the next '/'.
+ */
+ if ((fn = strchr(fn, '/')) == NULL)
+ return (0);
+ }
+
+ /*
+ * If stat(2) succeeds for that path, then the dependency is satisfied.
+ */
+ do {
+ r = stat(fn, &sbuf);
+ } while (r == -1 && errno == EINTR);
+
+ return (r == -1 ? 0 : 1);
+}
diff --git a/usr/src/cmd/svc/startd/fork.c b/usr/src/cmd/svc/startd/fork.c
new file mode 100644
index 0000000000..8968095008
--- /dev/null
+++ b/usr/src/cmd/svc/startd/fork.c
@@ -0,0 +1,647 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * fork.c - safe forking for svc.startd
+ *
+ * fork_configd() and fork_sulogin() are related, special cases that handle the
+ * spawning of specific client processes for svc.startd.
+ */
+
+#include <sys/contract/process.h>
+#include <sys/corectl.h>
+#include <sys/ctfs.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libcontract.h>
+#include <libcontract_priv.h>
+#include <limits.h>
+#include <port.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "configd_exit.h"
+#include "protocol.h"
+#include "startd.h"
+
+pid_t
+startd_fork1(int *forkerr)
+{
+ pid_t p;
+
+ /*
+ * prefork stack
+ */
+ wait_prefork();
+
+ p = fork1();
+
+ if (p == -1 && forkerr != NULL)
+ *forkerr = errno;
+
+ /*
+ * postfork stack
+ */
+ wait_postfork(p);
+
+ return (p);
+}
+
+/*
+ * void fork_mount(char *, char *)
+ * Run mount(1M) with the given options and mount point. (mount(1M) has much
+ * hidden knowledge; it's much less correct to reimplement that logic here to
+ * save a fork(2)/exec(2) invocation.)
+ */
+int
+fork_mount(char *path, char *opts)
+{
+ pid_t pid;
+ uint_t tries = 0;
+ int status;
+
+ for (pid = fork1(); pid == -1; pid = fork1()) {
+ if (++tries > MAX_MOUNT_RETRIES)
+ return (-1);
+
+ (void) sleep(tries);
+ }
+
+ if (pid != 0) {
+ (void) waitpid(pid, &status, 0);
+
+ /*
+ * If our mount(1M) invocation exited by peculiar means, or with
+ * a non-zero status, our mount likelihood is low.
+ */
+ if (!WIFEXITED(status) ||
+ WEXITSTATUS(status) != 0)
+ return (-1);
+
+ return (0);
+ }
+
+ (void) execl("/sbin/mount", "mount", "-o", opts, path, NULL);
+
+ return (-1);
+}
+
+/*
+ * pid_t fork_common(...)
+ * Common routine used by fork_sulogin and fork_configd to fork a
+ * process in a contract with the provided terms. Invokes
+ * fork_sulogin (with its no-fork argument set) on errors.
+ */
+static pid_t
+fork_common(const char *name, int retries, ctid_t *ctidp,
+ uint_t inf, uint_t crit, uint_t fatal, uint_t param, uint64_t cookie)
+{
+ uint_t tries = 0;
+ int ctfd, err;
+ pid_t pid;
+
+ /*
+ * Establish process contract terms.
+ */
+ if ((ctfd = open64(CTFS_ROOT "/process/template", O_RDWR)) == -1) {
+ fork_sulogin(B_TRUE, "Could not open process contract template "
+ "for %s: %s\n", name, strerror(errno));
+ /* NOTREACHED */
+ }
+
+ err = ct_tmpl_set_critical(ctfd, crit);
+ err |= ct_pr_tmpl_set_fatal(ctfd, fatal);
+ err |= ct_tmpl_set_informative(ctfd, inf);
+ err |= ct_pr_tmpl_set_param(ctfd, param);
+ err |= ct_tmpl_set_cookie(ctfd, cookie);
+ if (err) {
+ (void) close(ctfd);
+ fork_sulogin(B_TRUE, "Could not set %s process contract "
+ "terms\n", name);
+ /* NOTREACHED */
+ }
+
+ if (err = ct_tmpl_activate(ctfd)) {
+ (void) close(ctfd);
+ fork_sulogin(B_TRUE, "Could not activate %s process contract "
+ "template: %s\n", name, strerror(err));
+ /* NOTREACHED */
+ }
+
+ /*
+ * Attempt to fork "retries" times.
+ */
+ for (pid = fork1(); pid == -1; pid = fork1()) {
+ if (++tries > retries) {
+ /*
+ * When we exit the sulogin session, init(1M)
+ * will restart svc.startd(1M).
+ */
+ err = errno;
+ (void) ct_tmpl_clear(ctfd);
+ (void) close(ctfd);
+ fork_sulogin(B_TRUE, "Could not fork to start %s: %s\n",
+ name, strerror(err));
+ /* NOTREACHED */
+ }
+ (void) sleep(tries);
+ }
+
+ /*
+ * Clean up, return pid and ctid.
+ */
+ if (pid != 0 && (errno = contract_latest(ctidp)) != 0)
+ uu_die("Could not get new contract id for %s\n", name);
+ (void) ct_tmpl_clear(ctfd);
+ (void) close(ctfd);
+
+ return (pid);
+}
+
+/*
+ * void fork_sulogin(boolean_t, const char *, ...)
+ * When we are invoked with the -s flag from boot (or run into an unfixable
+ * situation), we run a private copy of sulogin. When the sulogin session
+ * is ended, we continue. This is the last fallback action for system
+ * maintenance.
+ *
+ * If immediate is true, fork_sulogin() executes sulogin(1M) directly, without
+ * forking.
+ *
+ * Because fork_sulogin() is needed potentially before we daemonize, we leave
+ * it outside the wait_register() framework.
+ */
+/*PRINTFLIKE2*/
+void
+fork_sulogin(boolean_t immediate, const char *format, ...)
+{
+ va_list args;
+ int i, fd_console;
+
+ (void) printf("Requesting System Maintenance Mode\n");
+
+ if (!booting_to_single_user)
+ (void) printf("(See /lib/svc/share/README for more "
+ "information.)\n");
+
+ va_start(args, format);
+ (void) vprintf(format, args);
+ va_end(args);
+
+ if (!immediate) {
+ ctid_t ctid;
+ pid_t pid;
+
+ pid = fork_common("sulogin", MAX_SULOGIN_RETRIES, &ctid,
+ CT_PR_EV_HWERR, 0, CT_PR_EV_HWERR, CT_PR_PGRPONLY,
+ SULOGIN_COOKIE);
+
+ if (pid != 0) {
+ (void) waitpid(pid, NULL, 0);
+ contract_abandon(ctid);
+ return;
+ }
+ /* close all inherited fds */
+ closefrom(0);
+ } else {
+ (void) printf("Directly executing sulogin.\n");
+ /*
+ * Can't call closefrom() in this MT section
+ * so safely close a minimum set of fds.
+ */
+ for (i = 0; i < 3; i++)
+ (void) close(i);
+ }
+
+ (void) setpgrp();
+
+ /* open the console for sulogin */
+ if ((fd_console = open("/dev/console", O_RDWR)) >= 0) {
+ if (fd_console != STDIN_FILENO)
+ while (dup2(fd_console, STDIN_FILENO) < 0 &&
+ errno == EINTR)
+ ;
+ if (fd_console != STDOUT_FILENO)
+ while (dup2(fd_console, STDOUT_FILENO) < 0 &&
+ errno == EINTR)
+ ;
+ if (fd_console != STDERR_FILENO)
+ while (dup2(fd_console, STDERR_FILENO) < 0 &&
+ errno == EINTR)
+ ;
+ if (fd_console > 2)
+ (void) close(fd_console);
+ }
+
+ (void) execl("/sbin/sulogin", "sulogin", NULL);
+
+ uu_warn("Could not exec() sulogin");
+
+ exit(1);
+}
+
+#define CONFIGD_PATH "/lib/svc/bin/svc.configd"
+
+/*
+ * void fork_configd(int status)
+ * We are interested in exit events (since the parent's exiting means configd
+ * is ready to run and since the child's exiting indicates an error case) and
+ * in empty events. This means we have a unique template for initiating
+ * configd.
+ */
+/*ARGSUSED*/
+void
+fork_configd(int exitstatus)
+{
+ pid_t pid;
+ ctid_t ctid = -1;
+ int err;
+ char path[PATH_MAX];
+
+retry:
+ log_framework(LOG_DEBUG, "fork_configd trying to start svc.configd\n");
+
+ /*
+ * If we're retrying, we will have an old contract lying around
+ * from the failure. Since we're going to be creating a new
+ * contract shortly, we abandon the old one now.
+ */
+ if (ctid != -1)
+ contract_abandon(ctid);
+ ctid = -1;
+
+ pid = fork_common("svc.configd", MAX_CONFIGD_RETRIES, &ctid,
+ 0, CT_PR_EV_EXIT, 0, CT_PR_INHERIT | CT_PR_REGENT, CONFIGD_COOKIE);
+
+ if (pid != 0) {
+ int exitstatus;
+
+ st->st_configd_pid = pid;
+
+ if (waitpid(pid, &exitstatus, 0) == -1) {
+ fork_sulogin(B_FALSE, "waitpid on svc.configd "
+ "failed: %s\n", strerror(errno));
+ } else if (WIFEXITED(exitstatus)) {
+ char *errstr;
+
+ /*
+ * Examine exitstatus. This will eventually get more
+ * complicated, as we will want to teach startd how to
+ * invoke configd with alternate repositories, etc.
+ *
+ * Note that exec(2) failure results in an exit status
+ * of 1, resulting in the default clause below.
+ */
+
+ /*
+ * Assign readable strings to cases we don't handle, or
+ * have error outcomes that cannot be eliminated.
+ */
+ switch (WEXITSTATUS(exitstatus)) {
+ case CONFIGD_EXIT_BAD_ARGS:
+ errstr = "bad arguments";
+ break;
+
+ case CONFIGD_EXIT_DATABASE_BAD:
+ errstr = "database corrupt";
+ break;
+
+ case CONFIGD_EXIT_DATABASE_LOCKED:
+ errstr = "database locked";
+ break;
+ case CONFIGD_EXIT_INIT_FAILED:
+ errstr = "initialization failure";
+ break;
+ case CONFIGD_EXIT_DOOR_INIT_FAILED:
+ errstr = "door initialization failure";
+ break;
+ case CONFIGD_EXIT_DATABASE_INIT_FAILED:
+ errstr = "database initialization failure";
+ break;
+ case CONFIGD_EXIT_NO_THREADS:
+ errstr = "no threads available";
+ break;
+ case CONFIGD_EXIT_LOST_MAIN_DOOR:
+ errstr = "lost door server attachment";
+ break;
+ case 1:
+ errstr = "execution failure";
+ break;
+ default:
+ errstr = "unknown error";
+ break;
+ }
+
+ /*
+ * Remedial actions for various configd failures.
+ */
+ switch (WEXITSTATUS(exitstatus)) {
+ case CONFIGD_EXIT_OKAY:
+ break;
+
+ case CONFIGD_EXIT_DATABASE_LOCKED:
+ /* attempt remount of / read-write */
+ if (fs_is_read_only("/", NULL) == 1) {
+ if (fs_remount("/") == -1)
+ fork_sulogin(B_FALSE,
+ "remount of root "
+ "filesystem failed\n");
+
+ goto retry;
+ }
+ break;
+
+ default:
+ fork_sulogin(B_FALSE, "svc.configd exited "
+ "with status %d (%s)\n",
+ WEXITSTATUS(exitstatus), errstr);
+ goto retry;
+ }
+ } else if (WIFSIGNALED(exitstatus)) {
+ char signame[SIG2STR_MAX];
+
+ if (sig2str(WTERMSIG(exitstatus), signame))
+ (void) snprintf(signame, SIG2STR_MAX,
+ "signum %d", WTERMSIG(exitstatus));
+
+ fork_sulogin(B_FALSE, "svc.configd signalled:"
+ " %s\n", signame);
+
+ goto retry;
+ } else {
+ fork_sulogin(B_FALSE, "svc.configd non-exit "
+ "condition: 0x%x\n", exitstatus);
+
+ goto retry;
+ }
+
+ /*
+ * Announce that we have a valid svc.configd status.
+ */
+ MUTEX_LOCK(&st->st_configd_live_lock);
+ st->st_configd_lives = 1;
+ err = pthread_cond_broadcast(&st->st_configd_live_cv);
+ assert(err == 0);
+ MUTEX_UNLOCK(&st->st_configd_live_lock);
+
+ log_framework(LOG_DEBUG, "fork_configd broadcasts configd is "
+ "live\n");
+ return;
+ }
+
+ /*
+ * Set our per-process core file path to leave core files in
+ * /etc/svc/volatile directory, named after the PID to aid in debugging.
+ */
+ (void) snprintf(path, sizeof (path),
+ "/etc/svc/volatile/core.configd.%%p");
+
+ (void) core_set_process_path(path, strlen(path) + 1, getpid());
+
+ log_framework(LOG_DEBUG, "executing svc.configd\n");
+
+ (void) execl(CONFIGD_PATH, CONFIGD_PATH, NULL);
+
+ /*
+ * Status code is used above to identify configd exec failure.
+ */
+ exit(1);
+}
+
+void *
+fork_configd_thread(void *vctid)
+{
+ int fd, err;
+ ctid_t configd_ctid = (ctid_t)vctid;
+
+ if (configd_ctid == -1) {
+ log_framework(LOG_DEBUG,
+ "fork_configd_thread starting svc.configd\n");
+ fork_configd(0);
+ } else {
+ /*
+ * configd_ctid is known: we broadcast and continue.
+ * test contract for appropriate state by verifying that
+ * there is one or more processes within it?
+ */
+ log_framework(LOG_DEBUG,
+ "fork_configd_thread accepting svc.configd with CTID %ld\n",
+ configd_ctid);
+ MUTEX_LOCK(&st->st_configd_live_lock);
+ st->st_configd_lives = 1;
+ (void) pthread_cond_broadcast(&st->st_configd_live_cv);
+ MUTEX_UNLOCK(&st->st_configd_live_lock);
+ }
+
+ fd = open64(CTFS_ROOT "/process/pbundle", O_RDONLY);
+ if (fd == -1)
+ uu_die("process bundle open failed");
+
+ /*
+ * Make sure we get all events (including those generated by configd
+ * before this thread was started).
+ */
+ err = ct_event_reset(fd);
+ assert(err == 0);
+
+ for (;;) {
+ int efd, sfd;
+ ct_evthdl_t ev;
+ uint32_t type;
+ ctevid_t evid;
+ ct_stathdl_t status;
+ ctid_t ctid;
+ uint64_t cookie;
+ pid_t pid;
+
+ if (err = ct_event_read_critical(fd, &ev)) {
+ assert(err != EINVAL && err != EAGAIN);
+ log_error(LOG_WARNING,
+ "Error reading next contract event: %s",
+ strerror(err));
+ continue;
+ }
+
+ evid = ct_event_get_evid(ev);
+ ctid = ct_event_get_ctid(ev);
+ type = ct_event_get_type(ev);
+
+ /* Fetch cookie. */
+ sfd = contract_open(ctid, "process", "status", O_RDONLY);
+ if (sfd < 0) {
+ ct_event_free(ev);
+ continue;
+ }
+
+ if (err = ct_status_read(sfd, CTD_COMMON, &status)) {
+ log_framework(LOG_WARNING, "Could not get status for "
+ "contract %ld: %s\n", ctid, strerror(err));
+
+ ct_event_free(ev);
+ startd_close(sfd);
+ continue;
+ }
+
+ cookie = ct_status_get_cookie(status);
+
+ ct_status_free(status);
+
+ startd_close(sfd);
+
+ /*
+ * Don't process events from contracts we aren't interested in.
+ */
+ if (cookie != CONFIGD_COOKIE) {
+ ct_event_free(ev);
+ continue;
+ }
+
+ if (type == CT_PR_EV_EXIT) {
+ int exitstatus;
+
+ (void) ct_pr_event_get_pid(ev, &pid);
+ (void) ct_pr_event_get_exitstatus(ev,
+ &exitstatus);
+
+ if (st->st_configd_pid != pid) {
+ /*
+ * This is the child exiting, so we
+ * abandon the contract and restart
+ * configd.
+ */
+ contract_abandon(ctid);
+ fork_configd(exitstatus);
+ }
+ }
+
+ efd = contract_open(ctid, "process", "ctl", O_WRONLY);
+ if (efd != -1) {
+ (void) ct_ctl_ack(efd, evid);
+ startd_close(efd);
+ }
+
+ ct_event_free(ev);
+
+ }
+
+ /*NOTREACHED*/
+ return (NULL);
+}
+
+void
+fork_rc_script(char rl, const char *arg, boolean_t wait)
+{
+ pid_t pid;
+ int tmpl, err, stat;
+ char path[20] = "/sbin/rc.", log[20] = "rc..log", timebuf[20];
+ time_t now;
+ struct tm ltime;
+ size_t sz;
+ char *pathenv;
+ char **nenv;
+
+ path[8] = rl;
+
+ tmpl = open64(CTFS_ROOT "/process/template", O_RDWR);
+ if (tmpl >= 0) {
+ err = ct_tmpl_set_critical(tmpl, 0);
+ assert(err == 0);
+
+ err = ct_tmpl_set_informative(tmpl, 0);
+ assert(err == 0);
+
+ err = ct_pr_tmpl_set_fatal(tmpl, 0);
+ assert(err == 0);
+
+ err = ct_tmpl_activate(tmpl);
+ assert(err == 0);
+
+ err = close(tmpl);
+ assert(err == 0);
+ } else {
+ uu_warn("Could not create contract template for %s.\n", path);
+ }
+
+ pid = startd_fork1(NULL);
+ if (pid < 0) {
+ return;
+ } else if (pid != 0) {
+ /* parent */
+ if (wait) {
+ do
+ err = waitpid(pid, &stat, 0);
+ while (err != 0 && errno == EINTR);
+
+ if (!WIFEXITED(stat)) {
+ log_framework(LOG_INFO,
+ "%s terminated with waitpid() status %d.\n",
+ path, stat);
+ } else if (WEXITSTATUS(stat) != 0) {
+ log_framework(LOG_INFO,
+ "%s failed with status %d.\n", path,
+ WEXITSTATUS(stat));
+ }
+ }
+
+ return;
+ }
+
+ /* child */
+
+ log[2] = rl;
+
+ setlog(log);
+
+ now = time(NULL);
+ sz = strftime(timebuf, sizeof (timebuf), "%b %e %T",
+ localtime_r(&now, &ltime));
+ assert(sz != 0);
+
+ (void) fprintf(stderr, "%s Executing %s %s\n", timebuf, path, arg);
+
+ if (rl == 'S')
+ pathenv = "PATH=/sbin:/usr/sbin:/usr/bin";
+ else
+ pathenv = "PATH=/usr/sbin:/usr/bin";
+
+ nenv = set_smf_env(NULL, 0, pathenv, NULL, NULL);
+
+ (void) execle(path, path, arg, 0, nenv);
+
+ perror("exec");
+ exit(0);
+}
diff --git a/usr/src/cmd/svc/startd/graph.c b/usr/src/cmd/svc/startd/graph.c
new file mode 100644
index 0000000000..49c265e1aa
--- /dev/null
+++ b/usr/src/cmd/svc/startd/graph.c
@@ -0,0 +1,5925 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * graph.c - master restarter graph engine
+ *
+ * The graph engine keeps a dependency graph of all service instances on the
+ * system, as recorded in the repository. It decides when services should
+ * be brought up or down based on service states and dependencies and sends
+ * commands to restarters to effect any changes. It also executes
+ * administrator commands sent by svcadm via the repository.
+ *
+ * The graph is stored in uu_list_t *dgraph and its vertices are
+ * graph_vertex_t's, each of which has a name and an integer id unique to
+ * its name (see dict.c). A vertex's type attribute designates the type
+ * of object it represents: GVT_INST for service instances, GVT_SVC for
+ * service objects (since service instances may depend on another service,
+ * rather than service instance), GVT_FILE for files (which services may
+ * depend on), and GVT_GROUP for dependencies on multiple objects. GVT_GROUP
+ * vertices are necessary because dependency lists may have particular
+ * grouping types (require any, require all, optional, or exclude) and
+ * event-propagation characteristics.
+ *
+ * The initial graph is built by libscf_populate_graph() invoking
+ * dgraph_add_instance() for each instance in the repository. The function
+ * adds a GVT_SVC vertex for the service if one does not already exist, adds
+ * a GVT_INST vertex named by the FMRI of the instance, and sets up the edges.
+ * The resulting web of vertices & edges associated with an instance's vertex
+ * includes
+ *
+ * - an edge from the GVT_SVC vertex for the instance's service
+ *
+ * - an edge to the GVT_INST vertex of the instance's resarter, if its
+ * restarter is not svc.startd
+ *
+ * - edges from other GVT_INST vertices if the instance is a restarter
+ *
+ * - for each dependency property group in the instance's "running"
+ * snapshot, an edge to a GVT_GROUP vertex named by the FMRI of the
+ * instance and the name of the property group
+ *
+ * - for each value of the "entities" property in each dependency property
+ * group, an edge from the corresponding GVT_GROUP vertex to a
+ * GVT_INST, GVT_SVC, or GVT_FILE vertex
+ *
+ * - edges from GVT_GROUP vertices for each dependent instance
+ *
+ * After the edges are set up the vertex's GV_CONFIGURED flag is set. If
+ * there are problems, or if a service is mentioned in a dependency but does
+ * not exist in the repository, the GV_CONFIGURED flag will be clear.
+ *
+ * The graph and all of its vertices are protected by the dgraph_lock mutex.
+ * See restarter.c for more information.
+ *
+ * The properties of an instance fall into two classes: immediate and
+ * snapshotted. Immediate properties should have an immediate effect when
+ * changed. Snapshotted properties should be read from a snapshot, so they
+ * only change when the snapshot changes. The immediate properties used by
+ * the graph engine are general/enabled, general/restarter, and the properties
+ * in the restarter_actions property group. Since they are immediate, they
+ * are not read out of a snapshot. The snapshotted properties used by the
+ * graph engine are those in the property groups with type "dependency" and
+ * are read out of the "running" snapshot. The "running" snapshot is created
+ * by the the graph engine as soon as possible, and it is updated, along with
+ * in-core copies of the data (dependency information for the graph engine) on
+ * receipt of the refresh command from svcadm. In addition, the graph engine
+ * updates the "start" snapshot from the "running" snapshot whenever a service
+ * comes online.
+ */
+
+#include <sys/uadmin.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <poll.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <sys/statvfs.h>
+#include <sys/uadmin.h>
+#include <zone.h>
+
+#include "startd.h"
+#include "protocol.h"
+
+
+#define MILESTONE_NONE ((graph_vertex_t *)1)
+
+#define CONSOLE_LOGIN_FMRI "svc:/system/console-login:default"
+#define FS_MINIMAL_FMRI "svc:/system/filesystem/minimal:default"
+
+static uu_list_pool_t *graph_edge_pool, *graph_vertex_pool;
+static uu_list_t *dgraph;
+static pthread_mutex_t dgraph_lock;
+
+/*
+ * milestone indicates the current subgraph. When NULL, it is the entire
+ * graph. When MILESTONE_NONE, it is the empty graph. Otherwise, it is all
+ * services on which the target vertex depends.
+ */
+static graph_vertex_t *milestone = NULL;
+static boolean_t initial_milestone_set = B_FALSE;
+static pthread_cond_t initial_milestone_cv = PTHREAD_COND_INITIALIZER;
+
+/* protected by dgraph_lock */
+static boolean_t sulogin_thread_running = B_FALSE;
+static boolean_t sulogin_running = B_FALSE;
+static boolean_t console_login_ready = B_FALSE;
+
+/* Number of services to come down to complete milestone transition. */
+static uint_t non_subgraph_svcs;
+
+/*
+ * These variables indicate what should be done when we reach the milestone
+ * target milestone, i.e., when non_subgraph_svcs == 0. They are acted upon in
+ * dgraph_set_instance_state().
+ */
+static int halting = -1;
+static boolean_t go_single_user_mode = B_FALSE;
+static boolean_t go_to_level1 = B_FALSE;
+
+/*
+ * This tracks the legacy runlevel to ensure we signal init and manage
+ * utmpx entries correctly.
+ */
+static char current_runlevel = '\0';
+
+/* Number of single user threads currently running */
+static pthread_mutex_t single_user_thread_lock;
+static int single_user_thread_count = 0;
+
+/* Statistics for dependency cycle-checking */
+static u_longlong_t dep_inserts = 0;
+static u_longlong_t dep_cycle_ns = 0;
+static u_longlong_t dep_insert_ns = 0;
+
+
+static const char * const emsg_invalid_restarter =
+ "Restarter FMRI for %s is invalid. Transitioning to maintenance.\n";
+static const char * const console_login_fmri = CONSOLE_LOGIN_FMRI;
+static const char * const single_user_fmri = SCF_MILESTONE_SINGLE_USER;
+static const char * const multi_user_fmri = SCF_MILESTONE_MULTI_USER;
+static const char * const multi_user_svr_fmri = SCF_MILESTONE_MULTI_USER_SERVER;
+
+
+/*
+ * These services define the system being "up". If none of them can come
+ * online, then we will run sulogin on the console. Note that the install ones
+ * are for the miniroot and when installing CDs after the first. can_come_up()
+ * does the decision making, and an sulogin_thread() runs sulogin, which can be
+ * started by dgraph_set_instance_state() or single_user_thread().
+ *
+ * NOTE: can_come_up() relies on SCF_MILESTONE_SINGLE_USER being the first
+ * entry, which is only used when booting_to_single_user (boot -s) is set.
+ * This is because when doing a "boot -s", sulogin is started from specials.c
+ * after milestone/single-user comes online, for backwards compatibility.
+ * In this case, SCF_MILESTONE_SINGLE_USER needs to be part of up_svcs
+ * to ensure sulogin will be spawned if milestone/single-user cannot be reached.
+ */
+static const char * const up_svcs[] = {
+ SCF_MILESTONE_SINGLE_USER,
+ CONSOLE_LOGIN_FMRI,
+ "svc:/system/install-setup:default",
+ "svc:/system/install:default",
+ NULL
+};
+
+/* This array must have an element for each non-NULL element of up_svcs[]. */
+static graph_vertex_t *up_svcs_p[] = { NULL, NULL, NULL, NULL };
+
+/* These are for seed repository magic. See can_come_up(). */
+static const char * const manifest_import =
+ "svc:/system/manifest-import:default";
+static graph_vertex_t *manifest_import_p = NULL;
+
+
+static char target_milestone_as_runlevel(void);
+static void graph_runlevel_changed(char rl, int online);
+static int dgraph_set_milestone(const char *, scf_handle_t *, boolean_t);
+static void vertex_send_event(graph_vertex_t *v, restarter_event_type_t e);
+static boolean_t should_be_in_subgraph(graph_vertex_t *v);
+
+/*
+ * graph_vertex_compare()
+ * This function can compare either int *id or * graph_vertex_t *gv
+ * values, as the vertex id is always the first element of a
+ * graph_vertex structure.
+ */
+/* ARGSUSED */
+static int
+graph_vertex_compare(const void *lc_arg, const void *rc_arg, void *private)
+{
+ int lc_id = ((const graph_vertex_t *)lc_arg)->gv_id;
+ int rc_id = *(int *)rc_arg;
+
+ if (lc_id > rc_id)
+ return (1);
+ if (lc_id < rc_id)
+ return (-1);
+ return (0);
+}
+
+void
+graph_init()
+{
+ graph_edge_pool = startd_list_pool_create("graph_edges",
+ sizeof (graph_edge_t), offsetof(graph_edge_t, ge_link), NULL,
+ UU_LIST_POOL_DEBUG);
+ assert(graph_edge_pool != NULL);
+
+ graph_vertex_pool = startd_list_pool_create("graph_vertices",
+ sizeof (graph_vertex_t), offsetof(graph_vertex_t, gv_link),
+ graph_vertex_compare, UU_LIST_POOL_DEBUG);
+ assert(graph_vertex_pool != NULL);
+
+ (void) pthread_mutex_init(&dgraph_lock, &mutex_attrs);
+ (void) pthread_mutex_init(&single_user_thread_lock, &mutex_attrs);
+ dgraph = startd_list_create(graph_vertex_pool, NULL, UU_LIST_SORTED);
+ assert(dgraph != NULL);
+
+ if (!st->st_initial)
+ current_runlevel = utmpx_get_runlevel();
+
+ log_framework(LOG_DEBUG, "Initialized graph\n");
+}
+
+static graph_vertex_t *
+vertex_get_by_name(const char *name)
+{
+ int id;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ id = dict_lookup_byname(name);
+ if (id == -1)
+ return (NULL);
+
+ return (uu_list_find(dgraph, &id, NULL, NULL));
+}
+
+static graph_vertex_t *
+vertex_get_by_id(int id)
+{
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ if (id == -1)
+ return (NULL);
+
+ return (uu_list_find(dgraph, &id, NULL, NULL));
+}
+
+/*
+ * Creates a new vertex with the given name, adds it to the graph, and returns
+ * a pointer to it. The graph lock must be held by this thread on entry.
+ */
+static graph_vertex_t *
+graph_add_vertex(const char *name)
+{
+ int id;
+ graph_vertex_t *v;
+ void *p;
+ uu_list_index_t idx;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ id = dict_insert(name);
+
+ v = startd_zalloc(sizeof (*v));
+
+ v->gv_id = id;
+
+ v->gv_name = startd_alloc(strlen(name) + 1);
+ (void) strcpy(v->gv_name, name);
+
+ v->gv_dependencies = startd_list_create(graph_edge_pool, v, 0);
+ v->gv_dependents = startd_list_create(graph_edge_pool, v, 0);
+
+ p = uu_list_find(dgraph, &id, NULL, &idx);
+ assert(p == NULL);
+
+ uu_list_node_init(v, &v->gv_link, graph_vertex_pool);
+ uu_list_insert(dgraph, v, idx);
+
+ return (v);
+}
+
+/*
+ * Removes v from the graph and frees it. The graph should be locked by this
+ * thread, and v should have no edges associated with it.
+ */
+static void
+graph_remove_vertex(graph_vertex_t *v)
+{
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ assert(uu_list_numnodes(v->gv_dependencies) == 0);
+ assert(uu_list_numnodes(v->gv_dependents) == 0);
+
+ startd_free(v->gv_name, strlen(v->gv_name) + 1);
+ uu_list_destroy(v->gv_dependencies);
+ uu_list_destroy(v->gv_dependents);
+ uu_list_remove(dgraph, v);
+
+ startd_free(v, sizeof (graph_vertex_t));
+}
+
+static void
+graph_add_edge(graph_vertex_t *fv, graph_vertex_t *tv)
+{
+ graph_edge_t *e, *re;
+ int r;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ e = startd_alloc(sizeof (graph_edge_t));
+ re = startd_alloc(sizeof (graph_edge_t));
+
+ e->ge_parent = fv;
+ e->ge_vertex = tv;
+
+ re->ge_parent = tv;
+ re->ge_vertex = fv;
+
+ uu_list_node_init(e, &e->ge_link, graph_edge_pool);
+ r = uu_list_insert_before(fv->gv_dependencies, NULL, e);
+ assert(r == 0);
+
+ uu_list_node_init(re, &re->ge_link, graph_edge_pool);
+ r = uu_list_insert_before(tv->gv_dependents, NULL, re);
+ assert(r == 0);
+}
+
+static void
+graph_remove_edge(graph_vertex_t *v, graph_vertex_t *dv)
+{
+ graph_edge_t *e;
+
+ for (e = uu_list_first(v->gv_dependencies);
+ e != NULL;
+ e = uu_list_next(v->gv_dependencies, e)) {
+ if (e->ge_vertex == dv) {
+ uu_list_remove(v->gv_dependencies, e);
+ startd_free(e, sizeof (graph_edge_t));
+ break;
+ }
+ }
+
+ for (e = uu_list_first(dv->gv_dependents);
+ e != NULL;
+ e = uu_list_next(dv->gv_dependents, e)) {
+ if (e->ge_vertex == v) {
+ uu_list_remove(dv->gv_dependents, e);
+ startd_free(e, sizeof (graph_edge_t));
+ break;
+ }
+ }
+}
+
+static void
+graph_walk_dependents(graph_vertex_t *v, void (*func)(graph_vertex_t *, void *),
+ void *arg)
+{
+ graph_edge_t *e;
+
+ for (e = uu_list_first(v->gv_dependents);
+ e != NULL;
+ e = uu_list_next(v->gv_dependents, e))
+ func(e->ge_vertex, arg);
+}
+
+static void
+graph_walk_dependencies(graph_vertex_t *v, void (*func)(graph_vertex_t *,
+ void *), void *arg)
+{
+ graph_edge_t *e;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ for (e = uu_list_first(v->gv_dependencies);
+ e != NULL;
+ e = uu_list_next(v->gv_dependencies, e)) {
+
+ func(e->ge_vertex, arg);
+ }
+}
+
+/*
+ * Generic graph walking function.
+ *
+ * Given a vertex, this function will walk either dependencies
+ * (WALK_DEPENDENCIES) or dependents (WALK_DEPENDENTS) of a vertex recursively
+ * for the entire graph. It will avoid cycles and never visit the same vertex
+ * twice.
+ *
+ * We avoid traversing exclusion dependencies, because they are allowed to
+ * create cycles in the graph. When propagating satisfiability, there is no
+ * need to walk exclusion dependencies because exclude_all_satisfied() doesn't
+ * test for satisfiability.
+ *
+ * The walker takes two callbacks. The first is called before examining the
+ * dependents of each vertex. The second is called on each vertex after
+ * examining its dependents. This allows is_path_to() to construct a path only
+ * after the target vertex has been found.
+ */
+typedef enum {
+ WALK_DEPENDENTS,
+ WALK_DEPENDENCIES
+} graph_walk_dir_t;
+
+typedef int (*graph_walk_cb_t)(graph_vertex_t *, void *);
+
+typedef struct graph_walk_info {
+ graph_walk_dir_t gi_dir;
+ uchar_t *gi_visited; /* vertex bitmap */
+ int (*gi_pre)(graph_vertex_t *, void *);
+ void (*gi_post)(graph_vertex_t *, void *);
+ void *gi_arg; /* callback arg */
+ int gi_ret; /* return value */
+} graph_walk_info_t;
+
+static int
+graph_walk_recurse(graph_edge_t *e, graph_walk_info_t *gip)
+{
+ uu_list_t *list;
+ int r;
+ graph_vertex_t *v = e->ge_vertex;
+ int i;
+ uint_t b;
+
+ i = v->gv_id / 8;
+ b = 1 << (v->gv_id % 8);
+
+ /*
+ * Check to see if we've visited this vertex already.
+ */
+ if (gip->gi_visited[i] & b)
+ return (UU_WALK_NEXT);
+
+ gip->gi_visited[i] |= b;
+
+ /*
+ * Don't follow exclusions.
+ */
+ if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL)
+ return (UU_WALK_NEXT);
+
+ /*
+ * Call pre-visit callback. If this doesn't terminate the walk,
+ * continue search.
+ */
+ if ((gip->gi_ret = gip->gi_pre(v, gip->gi_arg)) == UU_WALK_NEXT) {
+ /*
+ * Recurse using appropriate list.
+ */
+ if (gip->gi_dir == WALK_DEPENDENTS)
+ list = v->gv_dependents;
+ else
+ list = v->gv_dependencies;
+
+ r = uu_list_walk(list, (uu_walk_fn_t *)graph_walk_recurse,
+ gip, 0);
+ assert(r == 0);
+ }
+
+ /*
+ * Callbacks must return either UU_WALK_NEXT or UU_WALK_DONE.
+ */
+ assert(gip->gi_ret == UU_WALK_NEXT || gip->gi_ret == UU_WALK_DONE);
+
+ /*
+ * If given a post-callback, call the function for every vertex.
+ */
+ if (gip->gi_post != NULL)
+ (void) gip->gi_post(v, gip->gi_arg);
+
+ /*
+ * Preserve the callback's return value. If the callback returns
+ * UU_WALK_DONE, then we propagate that to the caller in order to
+ * terminate the walk.
+ */
+ return (gip->gi_ret);
+}
+
+static void
+graph_walk(graph_vertex_t *v, graph_walk_dir_t dir,
+ int (*pre)(graph_vertex_t *, void *),
+ void (*post)(graph_vertex_t *, void *), void *arg)
+{
+ graph_walk_info_t gi;
+ graph_edge_t fake;
+ size_t sz = dictionary->dict_new_id / 8 + 1;
+
+ gi.gi_visited = startd_zalloc(sz);
+ gi.gi_pre = pre;
+ gi.gi_post = post;
+ gi.gi_arg = arg;
+ gi.gi_dir = dir;
+ gi.gi_ret = 0;
+
+ /*
+ * Fake up an edge for the first iteration
+ */
+ fake.ge_vertex = v;
+ (void) graph_walk_recurse(&fake, &gi);
+
+ startd_free(gi.gi_visited, sz);
+}
+
+typedef struct child_search {
+ int id; /* id of vertex to look for */
+ uint_t depth; /* recursion depth */
+ /*
+ * While the vertex is not found, path is NULL. After the search, if
+ * the vertex was found then path should point to a -1-terminated
+ * array of vertex id's which constitute the path to the vertex.
+ */
+ int *path;
+} child_search_t;
+
+static int
+child_pre(graph_vertex_t *v, void *arg)
+{
+ child_search_t *cs = arg;
+
+ cs->depth++;
+
+ if (v->gv_id == cs->id) {
+ cs->path = startd_alloc((cs->depth + 1) * sizeof (int));
+ cs->path[cs->depth] = -1;
+ return (UU_WALK_DONE);
+ }
+
+ return (UU_WALK_NEXT);
+}
+
+static void
+child_post(graph_vertex_t *v, void *arg)
+{
+ child_search_t *cs = arg;
+
+ cs->depth--;
+
+ if (cs->path != NULL)
+ cs->path[cs->depth] = v->gv_id;
+}
+
+/*
+ * Look for a path from from to to. If one exists, returns a pointer to
+ * a NULL-terminated array of pointers to the vertices along the path. If
+ * there is no path, returns NULL.
+ */
+static int *
+is_path_to(graph_vertex_t *from, graph_vertex_t *to)
+{
+ child_search_t cs;
+
+ cs.id = to->gv_id;
+ cs.depth = 0;
+ cs.path = NULL;
+
+ graph_walk(from, WALK_DEPENDENCIES, child_pre, child_post, &cs);
+
+ return (cs.path);
+}
+
+/*
+ * Given an array of int's as returned by is_path_to, allocates a string of
+ * their names joined by newlines. Returns the size of the allocated buffer
+ * in *sz and frees path.
+ */
+static void
+path_to_str(int *path, char **cpp, size_t *sz)
+{
+ int i;
+ graph_vertex_t *v;
+ size_t allocd, new_allocd;
+ char *new, *name;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(path[0] != -1);
+
+ allocd = 1;
+ *cpp = startd_alloc(1);
+ (*cpp)[0] = '\0';
+
+ for (i = 0; path[i] != -1; ++i) {
+ name = NULL;
+
+ v = vertex_get_by_id(path[i]);
+
+ if (v == NULL)
+ name = "<deleted>";
+ else if (v->gv_type == GVT_INST || v->gv_type == GVT_SVC)
+ name = v->gv_name;
+
+ if (name != NULL) {
+ new_allocd = allocd + strlen(name) + 1;
+ new = startd_alloc(new_allocd);
+ (void) strcpy(new, *cpp);
+ (void) strcat(new, name);
+ (void) strcat(new, "\n");
+
+ startd_free(*cpp, allocd);
+
+ *cpp = new;
+ allocd = new_allocd;
+ }
+ }
+
+ startd_free(path, sizeof (int) * (i + 1));
+
+ *sz = allocd;
+}
+
+
+/*
+ * This function along with run_sulogin() implements an exclusion relationship
+ * between system/console-login and sulogin. run_sulogin() will fail if
+ * system/console-login is online, and the graph engine should call
+ * graph_clogin_start() to bring system/console-login online, which defers the
+ * start if sulogin is running.
+ */
+static void
+graph_clogin_start(graph_vertex_t *v)
+{
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ if (sulogin_running)
+ console_login_ready = B_TRUE;
+ else
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_START);
+}
+
+static void
+graph_su_start(graph_vertex_t *v)
+{
+ /*
+ * /etc/inittab used to have the initial /sbin/rcS as a 'sysinit'
+ * entry with a runlevel of 'S', before jumping to the final
+ * target runlevel (as set in initdefault). We mimic that legacy
+ * behavior here.
+ */
+ utmpx_set_runlevel('S', '0', B_FALSE);
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_START);
+}
+
+static void
+graph_post_su_online(void)
+{
+ graph_runlevel_changed('S', 1);
+}
+
+static void
+graph_post_su_disable(void)
+{
+ graph_runlevel_changed('S', 0);
+}
+
+static void
+graph_post_mu_online(void)
+{
+ graph_runlevel_changed('2', 1);
+}
+
+static void
+graph_post_mu_disable(void)
+{
+ graph_runlevel_changed('2', 0);
+}
+
+static void
+graph_post_mus_online(void)
+{
+ graph_runlevel_changed('3', 1);
+}
+
+static void
+graph_post_mus_disable(void)
+{
+ graph_runlevel_changed('3', 0);
+}
+
+static struct special_vertex_info {
+ const char *name;
+ void (*start_f)(graph_vertex_t *);
+ void (*post_online_f)(void);
+ void (*post_disable_f)(void);
+} special_vertices[] = {
+ { CONSOLE_LOGIN_FMRI, graph_clogin_start, NULL, NULL },
+ { SCF_MILESTONE_SINGLE_USER, graph_su_start,
+ graph_post_su_online, graph_post_su_disable },
+ { SCF_MILESTONE_MULTI_USER, NULL,
+ graph_post_mu_online, graph_post_mu_disable },
+ { SCF_MILESTONE_MULTI_USER_SERVER, NULL,
+ graph_post_mus_online, graph_post_mus_disable },
+ { NULL },
+};
+
+
+void
+vertex_send_event(graph_vertex_t *v, restarter_event_type_t e)
+{
+ switch (e) {
+ case RESTARTER_EVENT_TYPE_ADD_INSTANCE:
+ assert(v->gv_state == RESTARTER_STATE_UNINIT);
+
+ MUTEX_LOCK(&st->st_load_lock);
+ st->st_load_instances++;
+ MUTEX_UNLOCK(&st->st_load_lock);
+ break;
+
+ case RESTARTER_EVENT_TYPE_ENABLE:
+ log_framework(LOG_DEBUG, "Enabling %s.\n", v->gv_name);
+ assert(v->gv_state == RESTARTER_STATE_UNINIT ||
+ v->gv_state == RESTARTER_STATE_DISABLED ||
+ v->gv_state == RESTARTER_STATE_MAINT);
+ break;
+
+ case RESTARTER_EVENT_TYPE_DISABLE:
+ case RESTARTER_EVENT_TYPE_ADMIN_DISABLE:
+ log_framework(LOG_DEBUG, "Disabling %s.\n", v->gv_name);
+ assert(v->gv_state != RESTARTER_STATE_DISABLED);
+ break;
+
+ case RESTARTER_EVENT_TYPE_STOP:
+ log_framework(LOG_DEBUG, "Stopping %s.\n", v->gv_name);
+ assert(v->gv_state == RESTARTER_STATE_DEGRADED ||
+ v->gv_state == RESTARTER_STATE_ONLINE);
+ break;
+
+ case RESTARTER_EVENT_TYPE_START:
+ log_framework(LOG_DEBUG, "Starting %s.\n", v->gv_name);
+ assert(v->gv_state == RESTARTER_STATE_OFFLINE);
+ break;
+
+ case RESTARTER_EVENT_TYPE_REMOVE_INSTANCE:
+ case RESTARTER_EVENT_TYPE_ADMIN_DEGRADED:
+ case RESTARTER_EVENT_TYPE_ADMIN_REFRESH:
+ case RESTARTER_EVENT_TYPE_ADMIN_RESTART:
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF:
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON:
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE:
+ case RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE:
+ case RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY:
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad event %d.\n", __FILE__, __LINE__, e);
+#endif
+ abort();
+ }
+
+ restarter_protocol_send_event(v->gv_name, v->gv_restarter_channel, e);
+}
+
+static void
+graph_unset_restarter(graph_vertex_t *v)
+{
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(v->gv_flags & GV_CONFIGURED);
+
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_REMOVE_INSTANCE);
+
+ if (v->gv_restarter_id != -1) {
+ graph_vertex_t *rv;
+
+ rv = vertex_get_by_id(v->gv_restarter_id);
+ graph_remove_edge(v, rv);
+ }
+
+ v->gv_restarter_id = -1;
+ v->gv_restarter_channel = NULL;
+}
+
+static void
+delete_depgroup(graph_vertex_t *v)
+{
+ graph_edge_t *e;
+ graph_vertex_t *dv;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(v->gv_type == GVT_GROUP);
+ assert(uu_list_numnodes(v->gv_dependents) == 0);
+
+ while ((e = uu_list_first(v->gv_dependencies)) != NULL) {
+ dv = e->ge_vertex;
+
+ graph_remove_edge(v, dv);
+
+ switch (dv->gv_type) {
+ case GVT_INST: /* instance dependency */
+ break;
+
+ case GVT_SVC: /* service dependency */
+ if (uu_list_numnodes(dv->gv_dependents) == 0 &&
+ uu_list_numnodes(dv->gv_dependencies) == 0)
+ graph_remove_vertex(dv);
+ break;
+
+ case GVT_FILE: /* file dependency */
+ assert(uu_list_numnodes(dv->gv_dependencies) == 0);
+ if (uu_list_numnodes(dv->gv_dependents) == 0)
+ graph_remove_vertex(dv);
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected node type %d", __FILE__,
+ __LINE__, dv->gv_type);
+#endif
+ abort();
+ }
+ }
+
+ graph_remove_vertex(v);
+}
+
+static int
+delete_instance_deps_cb(graph_edge_t *e, void **ptrs)
+{
+ graph_vertex_t *v = ptrs[0];
+ boolean_t delete_restarter_dep = (boolean_t)ptrs[1];
+ graph_vertex_t *dv;
+
+ dv = e->ge_vertex;
+
+ /*
+ * We have four possibilities here:
+ * - GVT_INST: restarter
+ * - GVT_GROUP - GVT_INST: instance dependency
+ * - GVT_GROUP - GVT_SVC - GV_INST: service dependency
+ * - GVT_GROUP - GVT_FILE: file dependency
+ */
+ switch (dv->gv_type) {
+ case GVT_INST: /* restarter */
+ assert(dv->gv_id == v->gv_restarter_id);
+ if (delete_restarter_dep)
+ graph_remove_edge(v, dv);
+ break;
+
+ case GVT_GROUP: /* pg dependency */
+ graph_remove_edge(v, dv);
+ delete_depgroup(dv);
+ break;
+
+ case GVT_FILE:
+ /* These are currently not direct dependencies */
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad vertex type %d.\n", __FILE__, __LINE__,
+ dv->gv_type);
+#endif
+ abort();
+ }
+
+ return (UU_WALK_NEXT);
+}
+
+static void
+delete_instance_dependencies(graph_vertex_t *v, boolean_t delete_restarter_dep)
+{
+ void *ptrs[2];
+ int r;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(v->gv_type == GVT_INST);
+
+ ptrs[0] = v;
+ ptrs[1] = (void *)delete_restarter_dep;
+
+ r = uu_list_walk(v->gv_dependencies,
+ (uu_walk_fn_t *)delete_instance_deps_cb, &ptrs, UU_WALK_ROBUST);
+ assert(r == 0);
+}
+
+/*
+ * int graph_insert_vertex_unconfigured()
+ * Insert a vertex without sending any restarter events. If the vertex
+ * already exists or creation is successful, return a pointer to it in *vp.
+ *
+ * If type is not GVT_GROUP, dt can remain unset.
+ *
+ * Returns 0, EEXIST, or EINVAL if the arguments are invalid (i.e., fmri
+ * doesn't agree with type, or type doesn't agree with dt).
+ */
+static int
+graph_insert_vertex_unconfigured(const char *fmri, gv_type_t type,
+ depgroup_type_t dt, restarter_error_t rt, graph_vertex_t **vp)
+{
+ int r;
+ int i;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ switch (type) {
+ case GVT_SVC:
+ case GVT_INST:
+ if (strncmp(fmri, "svc:", sizeof ("svc:") - 1) != 0)
+ return (EINVAL);
+ break;
+
+ case GVT_FILE:
+ if (strncmp(fmri, "file:", sizeof ("file:") - 1) != 0)
+ return (EINVAL);
+ break;
+
+ case GVT_GROUP:
+ if (dt <= 0 || rt < 0)
+ return (EINVAL);
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unknown type %d.\n", __FILE__, __LINE__, type);
+#endif
+ abort();
+ }
+
+ *vp = vertex_get_by_name(fmri);
+ if (*vp != NULL)
+ return (EEXIST);
+
+ *vp = graph_add_vertex(fmri);
+
+ (*vp)->gv_type = type;
+ (*vp)->gv_depgroup = dt;
+ (*vp)->gv_restart = rt;
+
+ (*vp)->gv_flags = 0;
+ (*vp)->gv_state = RESTARTER_STATE_NONE;
+
+ for (i = 0; special_vertices[i].name != NULL; ++i) {
+ if (strcmp(fmri, special_vertices[i].name) == 0) {
+ (*vp)->gv_start_f = special_vertices[i].start_f;
+ (*vp)->gv_post_online_f =
+ special_vertices[i].post_online_f;
+ (*vp)->gv_post_disable_f =
+ special_vertices[i].post_disable_f;
+ break;
+ }
+ }
+
+ (*vp)->gv_restarter_id = -1;
+ (*vp)->gv_restarter_channel = 0;
+
+ if (type == GVT_INST) {
+ char *sfmri;
+ graph_vertex_t *sv;
+
+ sfmri = inst_fmri_to_svc_fmri(fmri);
+ sv = vertex_get_by_name(sfmri);
+ if (sv == NULL) {
+ r = graph_insert_vertex_unconfigured(sfmri, GVT_SVC, 0,
+ 0, &sv);
+ assert(r == 0);
+ }
+ startd_free(sfmri, max_scf_fmri_size);
+
+ graph_add_edge(sv, *vp);
+ }
+
+ /*
+ * If this vertex is in the subgraph, mark it as so, for both
+ * GVT_INST and GVT_SERVICE verteces.
+ * A GVT_SERVICE vertex can only be in the subgraph if another instance
+ * depends on it, in which case it's already been added to the graph
+ * and marked as in the subgraph (by refresh_vertex()). If a
+ * GVT_SERVICE vertex was freshly added (by the code above), it means
+ * that it has no dependents, and cannot be in the subgraph.
+ * Regardless of this, we still check that gv_flags includes
+ * GV_INSUBGRAPH in the event that future behavior causes the above
+ * code to add a GVT_SERVICE vertex which should be in the subgraph.
+ */
+
+ (*vp)->gv_flags |= (should_be_in_subgraph(*vp)? GV_INSUBGRAPH : 0);
+
+ return (0);
+}
+
+/*
+ * Returns 0 on success or ELOOP if the dependency would create a cycle.
+ */
+static int
+graph_insert_dependency(graph_vertex_t *fv, graph_vertex_t *tv, int **pathp)
+{
+ hrtime_t now;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ /* cycle detection */
+ now = gethrtime();
+
+ /* Don't follow exclusions. */
+ if (!(fv->gv_type == GVT_GROUP &&
+ fv->gv_depgroup == DEPGRP_EXCLUDE_ALL)) {
+ *pathp = is_path_to(tv, fv);
+ if (*pathp)
+ return (ELOOP);
+ }
+
+ dep_cycle_ns += gethrtime() - now;
+ ++dep_inserts;
+ now = gethrtime();
+
+ graph_add_edge(fv, tv);
+
+ dep_insert_ns += gethrtime() - now;
+
+ /* Check if the dependency adds the "to" vertex to the subgraph */
+ tv->gv_flags |= (should_be_in_subgraph(tv) ? GV_INSUBGRAPH : 0);
+
+ return (0);
+}
+
+static int
+inst_running(graph_vertex_t *v)
+{
+ assert(v->gv_type == GVT_INST);
+
+ if (v->gv_state == RESTARTER_STATE_ONLINE ||
+ v->gv_state == RESTARTER_STATE_DEGRADED)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * The dependency evaluation functions return
+ * 1 - dependency satisfied
+ * 0 - dependency unsatisfied
+ * -1 - dependency unsatisfiable (without administrator intervention)
+ *
+ * The functions also take a boolean satbility argument. When true, the
+ * functions may recurse in order to determine satisfiability.
+ */
+static int require_any_satisfied(graph_vertex_t *, boolean_t);
+static int dependency_satisfied(graph_vertex_t *, boolean_t);
+
+/*
+ * A require_all dependency is unsatisfied if any elements are unsatisfied. It
+ * is unsatisfiable if any elements are unsatisfiable.
+ */
+static int
+require_all_satisfied(graph_vertex_t *groupv, boolean_t satbility)
+{
+ graph_edge_t *edge;
+ int i;
+ boolean_t any_unsatisfied;
+
+ if (uu_list_numnodes(groupv->gv_dependencies) == 0)
+ return (1);
+
+ any_unsatisfied = B_FALSE;
+
+ for (edge = uu_list_first(groupv->gv_dependencies);
+ edge != NULL;
+ edge = uu_list_next(groupv->gv_dependencies, edge)) {
+ i = dependency_satisfied(edge->ge_vertex, satbility);
+ if (i == 1)
+ continue;
+
+ log_framework(LOG_DEBUG,
+ "require_all(%s): %s is unsatisfi%s.\n", groupv->gv_name,
+ edge->ge_vertex->gv_name, i == 0 ? "ed" : "able");
+
+ if (!satbility)
+ return (0);
+
+ if (i == -1)
+ return (-1);
+
+ any_unsatisfied = B_TRUE;
+ }
+
+ return (any_unsatisfied ? 0 : 1);
+}
+
+/*
+ * A require_any dependency is satisfied if any element is satisfied. It is
+ * satisfiable if any element is satisfiable.
+ */
+static int
+require_any_satisfied(graph_vertex_t *groupv, boolean_t satbility)
+{
+ graph_edge_t *edge;
+ int s;
+ boolean_t satisfiable;
+
+ if (uu_list_numnodes(groupv->gv_dependencies) == 0)
+ return (1);
+
+ satisfiable = B_FALSE;
+
+ for (edge = uu_list_first(groupv->gv_dependencies);
+ edge != NULL;
+ edge = uu_list_next(groupv->gv_dependencies, edge)) {
+ s = dependency_satisfied(edge->ge_vertex, satbility);
+
+ if (s == 1)
+ return (1);
+
+ log_framework(LOG_DEBUG,
+ "require_any(%s): %s is unsatisfi%s.\n",
+ groupv->gv_name, edge->ge_vertex->gv_name,
+ s == 0 ? "ed" : "able");
+
+ if (satbility && s == 0)
+ satisfiable = B_TRUE;
+ }
+
+ return (!satbility || satisfiable ? 0 : -1);
+}
+
+/*
+ * An optional_all dependency only considers elements which are configured,
+ * enabled, and not in maintenance. If any are unsatisfied, then the dependency
+ * is unsatisfied.
+ *
+ * Offline dependencies which are waiting for a dependency to come online are
+ * unsatisfied. Offline dependences which cannot possibly come online
+ * (unsatisfiable) are always considered satisfied.
+ */
+static int
+optional_all_satisfied(graph_vertex_t *groupv, boolean_t satbility)
+{
+ graph_edge_t *edge;
+ graph_vertex_t *v;
+ boolean_t any_qualified;
+ boolean_t any_unsatisfied;
+ int i;
+
+ any_qualified = B_FALSE;
+ any_unsatisfied = B_FALSE;
+
+ for (edge = uu_list_first(groupv->gv_dependencies);
+ edge != NULL;
+ edge = uu_list_next(groupv->gv_dependencies, edge)) {
+ v = edge->ge_vertex;
+
+ switch (v->gv_type) {
+ case GVT_INST:
+ /* Skip missing or disabled instances */
+ if ((v->gv_flags & (GV_CONFIGURED | GV_ENABLED)) !=
+ (GV_CONFIGURED | GV_ENABLED))
+ continue;
+
+ if (v->gv_state == RESTARTER_STATE_MAINT)
+ continue;
+
+ any_qualified = B_TRUE;
+ if (v->gv_state == RESTARTER_STATE_OFFLINE) {
+ /*
+ * For offline dependencies, treat unsatisfiable
+ * as satisfied.
+ */
+ i = dependency_satisfied(v, B_TRUE);
+ if (i == -1)
+ i = 1;
+ } else if (v->gv_state == RESTARTER_STATE_DISABLED) {
+ /*
+ * The service is enabled, but hasn't
+ * transitioned out of disabled yet. Treat it
+ * as unsatisfied (not unsatisfiable).
+ */
+ i = 0;
+ } else {
+ i = dependency_satisfied(v, satbility);
+ }
+ break;
+
+ case GVT_FILE:
+ any_qualified = B_TRUE;
+ i = dependency_satisfied(v, satbility);
+
+ break;
+
+ case GVT_SVC: {
+ boolean_t svc_any_qualified;
+ boolean_t svc_satisfied;
+ boolean_t svc_satisfiable;
+ graph_vertex_t *v2;
+ graph_edge_t *e2;
+
+ svc_any_qualified = B_FALSE;
+ svc_satisfied = B_FALSE;
+ svc_satisfiable = B_FALSE;
+
+ for (e2 = uu_list_first(v->gv_dependencies);
+ e2 != NULL;
+ e2 = uu_list_next(v->gv_dependencies, e2)) {
+ v2 = e2->ge_vertex;
+ assert(v2->gv_type == GVT_INST);
+
+ if ((v2->gv_flags &
+ (GV_CONFIGURED | GV_ENABLED)) !=
+ (GV_CONFIGURED | GV_ENABLED))
+ continue;
+
+ if (v2->gv_state == RESTARTER_STATE_MAINT)
+ continue;
+
+ svc_any_qualified = B_TRUE;
+
+ if (v2->gv_state == RESTARTER_STATE_OFFLINE) {
+ /*
+ * For offline dependencies, treat
+ * unsatisfiable as satisfied.
+ */
+ i = dependency_satisfied(v2, B_TRUE);
+ if (i == -1)
+ i = 1;
+ } else if (v2->gv_state ==
+ RESTARTER_STATE_DISABLED) {
+ i = 0;
+ } else {
+ i = dependency_satisfied(v2, satbility);
+ }
+
+ if (i == 1) {
+ svc_satisfied = B_TRUE;
+ break;
+ }
+ if (i == 0)
+ svc_satisfiable = B_TRUE;
+ }
+
+ if (!svc_any_qualified)
+ continue;
+ any_qualified = B_TRUE;
+ if (svc_satisfied) {
+ i = 1;
+ } else if (svc_satisfiable) {
+ i = 0;
+ } else {
+ i = -1;
+ }
+ break;
+ }
+
+ case GVT_GROUP:
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__,
+ __LINE__, v->gv_type);
+#endif
+ abort();
+ }
+
+ if (i == 1)
+ continue;
+
+ log_framework(LOG_DEBUG,
+ "optional_all(%s): %s is unsatisfi%s.\n", groupv->gv_name,
+ v->gv_name, i == 0 ? "ed" : "able");
+
+ if (!satbility)
+ return (0);
+ if (i == -1)
+ return (-1);
+ any_unsatisfied = B_TRUE;
+ }
+
+ if (!any_qualified)
+ return (1);
+
+ return (any_unsatisfied ? 0 : 1);
+}
+
+/*
+ * An exclude_all dependency is unsatisfied if any non-service element is
+ * satisfied or any service instance which is configured, enabled, and not in
+ * maintenance is satisfied. Usually when unsatisfied, it is also
+ * unsatisfiable.
+ */
+#define LOG_EXCLUDE(u, v) \
+ log_framework(LOG_DEBUG, "exclude_all(%s): %s is satisfied.\n", \
+ (u)->gv_name, (v)->gv_name)
+
+/* ARGSUSED */
+static int
+exclude_all_satisfied(graph_vertex_t *groupv, boolean_t satbility)
+{
+ graph_edge_t *edge, *e2;
+ graph_vertex_t *v, *v2;
+
+ for (edge = uu_list_first(groupv->gv_dependencies);
+ edge != NULL;
+ edge = uu_list_next(groupv->gv_dependencies, edge)) {
+ v = edge->ge_vertex;
+
+ switch (v->gv_type) {
+ case GVT_INST:
+ if ((v->gv_flags & GV_CONFIGURED) == 0)
+ continue;
+
+ switch (v->gv_state) {
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ LOG_EXCLUDE(groupv, v);
+ return (v->gv_flags & GV_ENABLED ? -1 : 0);
+
+ case RESTARTER_STATE_OFFLINE:
+ case RESTARTER_STATE_UNINIT:
+ LOG_EXCLUDE(groupv, v);
+ return (0);
+
+ case RESTARTER_STATE_DISABLED:
+ case RESTARTER_STATE_MAINT:
+ continue;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex state %d.\n",
+ __FILE__, __LINE__, v->gv_state);
+#endif
+ abort();
+ }
+ /* NOTREACHED */
+
+ case GVT_SVC:
+ break;
+
+ case GVT_FILE:
+ if (!file_ready(v))
+ continue;
+ LOG_EXCLUDE(groupv, v);
+ return (-1);
+
+ case GVT_GROUP:
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__,
+ __LINE__, v->gv_type);
+#endif
+ abort();
+ }
+
+ /* v represents a service */
+ if (uu_list_numnodes(v->gv_dependencies) == 0)
+ continue;
+
+ for (e2 = uu_list_first(v->gv_dependencies);
+ e2 != NULL;
+ e2 = uu_list_next(v->gv_dependencies, e2)) {
+ v2 = e2->ge_vertex;
+ assert(v2->gv_type == GVT_INST);
+
+ if ((v2->gv_flags & GV_CONFIGURED) == 0)
+ continue;
+
+ switch (v2->gv_state) {
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ LOG_EXCLUDE(groupv, v2);
+ return (v2->gv_flags & GV_ENABLED ? -1 : 0);
+
+ case RESTARTER_STATE_OFFLINE:
+ case RESTARTER_STATE_UNINIT:
+ LOG_EXCLUDE(groupv, v2);
+ return (0);
+
+ case RESTARTER_STATE_DISABLED:
+ case RESTARTER_STATE_MAINT:
+ continue;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex type %d.\n",
+ __FILE__, __LINE__, v2->gv_type);
+#endif
+ abort();
+ }
+ }
+ }
+
+ return (1);
+}
+
+/*
+ * int instance_satisfied()
+ * Determine if all the dependencies are satisfied for the supplied instance
+ * vertex. Return 1 if they are, 0 if they aren't, and -1 if they won't be
+ * without administrator intervention.
+ */
+static int
+instance_satisfied(graph_vertex_t *v, boolean_t satbility)
+{
+ assert(v->gv_type == GVT_INST);
+ assert(!inst_running(v));
+
+ return (require_all_satisfied(v, satbility));
+}
+
+/*
+ * Decide whether v can satisfy a dependency. v can either be a child of
+ * a group vertex, or of an instance vertex.
+ */
+static int
+dependency_satisfied(graph_vertex_t *v, boolean_t satbility)
+{
+ switch (v->gv_type) {
+ case GVT_INST:
+ if ((v->gv_flags & GV_CONFIGURED) == 0)
+ return (-1);
+
+ switch (v->gv_state) {
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ return (1);
+
+ case RESTARTER_STATE_OFFLINE:
+ if (!satbility)
+ return (0);
+ return (instance_satisfied(v, satbility) != -1 ?
+ 0 : -1);
+
+ case RESTARTER_STATE_DISABLED:
+ case RESTARTER_STATE_MAINT:
+ return (-1);
+
+ case RESTARTER_STATE_UNINIT:
+ return (0);
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex state %d.\n",
+ __FILE__, __LINE__, v->gv_state);
+#endif
+ abort();
+ /* NOTREACHED */
+ }
+
+ case GVT_SVC:
+ if (uu_list_numnodes(v->gv_dependencies) == 0)
+ return (-1);
+ return (require_any_satisfied(v, satbility));
+
+ case GVT_FILE:
+ /* i.e., we assume files will not be automatically generated */
+ return (file_ready(v) ? 1 : -1);
+
+ case GVT_GROUP:
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected node type %d.\n", __FILE__, __LINE__,
+ v->gv_type);
+#endif
+ abort();
+ /* NOTREACHED */
+ }
+
+ switch (v->gv_depgroup) {
+ case DEPGRP_REQUIRE_ANY:
+ return (require_any_satisfied(v, satbility));
+
+ case DEPGRP_REQUIRE_ALL:
+ return (require_all_satisfied(v, satbility));
+
+ case DEPGRP_OPTIONAL_ALL:
+ return (optional_all_satisfied(v, satbility));
+
+ case DEPGRP_EXCLUDE_ALL:
+ return (exclude_all_satisfied(v, satbility));
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unknown dependency grouping %d.\n", __FILE__,
+ __LINE__, v->gv_depgroup);
+#endif
+ abort();
+ }
+}
+
+static void
+start_if_satisfied(graph_vertex_t *v)
+{
+ if (v->gv_state == RESTARTER_STATE_OFFLINE &&
+ instance_satisfied(v, B_FALSE) == 1) {
+ if (v->gv_start_f == NULL)
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_START);
+ else
+ v->gv_start_f(v);
+ }
+}
+
+/*
+ * propagate_satbility()
+ *
+ * This function is used when the given vertex changes state in such a way that
+ * one of its dependents may become unsatisfiable. This happens when an
+ * instance transitions between offline -> online, or from !running ->
+ * maintenance, as well as when an instance is removed from the graph.
+ *
+ * We have to walk the all dependents, since optional_all dependencies several
+ * levels up could become (un)satisfied, instead of unsatisfiable. For example,
+ *
+ * +-----+ optional_all +-----+ require_all +-----+
+ * | A |--------------->| B |-------------->| C |
+ * +-----+ +-----+ +-----+
+ *
+ * offline -> maintenance
+ *
+ * If C goes into maintenance, it's not enough simply to check B. Because A has
+ * an optional dependency, what was previously an unsatisfiable situation is now
+ * satisfied (B will never come online, even though its state hasn't changed).
+ *
+ * Note that it's not necessary to continue examining dependents after reaching
+ * an optional_all dependency. It's not possible for an optional_all dependency
+ * to change satisfiability without also coming online, in which case we get a
+ * start event and propagation continues naturally. However, it does no harm to
+ * continue propagating satisfiability (as it is a relatively rare event), and
+ * keeps the walker code simple and generic.
+ */
+/*ARGSUSED*/
+static int
+satbility_cb(graph_vertex_t *v, void *arg)
+{
+ if (v->gv_type == GVT_INST)
+ start_if_satisfied(v);
+
+ return (UU_WALK_NEXT);
+}
+
+static void
+propagate_satbility(graph_vertex_t *v)
+{
+ graph_walk(v, WALK_DEPENDENTS, satbility_cb, NULL, NULL);
+}
+
+static void propagate_stop(graph_vertex_t *, void *);
+
+/* ARGSUSED */
+static void
+propagate_start(graph_vertex_t *v, void *arg)
+{
+ switch (v->gv_type) {
+ case GVT_INST:
+ start_if_satisfied(v);
+ break;
+
+ case GVT_GROUP:
+ if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) {
+ graph_walk_dependents(v, propagate_stop,
+ (void *)RERR_RESTART);
+ break;
+ }
+ /* FALLTHROUGH */
+
+ case GVT_SVC:
+ graph_walk_dependents(v, propagate_start, NULL);
+ break;
+
+ case GVT_FILE:
+#ifndef NDEBUG
+ uu_warn("%s:%d: propagate_start() encountered GVT_FILE.\n",
+ __FILE__, __LINE__);
+#endif
+ abort();
+ /* NOTREACHED */
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__,
+ v->gv_type);
+#endif
+ abort();
+ }
+}
+
+static void
+propagate_stop(graph_vertex_t *v, void *arg)
+{
+ graph_edge_t *e;
+ graph_vertex_t *svc;
+ restarter_error_t err = (restarter_error_t)arg;
+
+ switch (v->gv_type) {
+ case GVT_INST:
+ /* Restarter */
+ if (err > RERR_NONE && inst_running(v))
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_STOP);
+ break;
+
+ case GVT_SVC:
+ graph_walk_dependents(v, propagate_stop, arg);
+ break;
+
+ case GVT_FILE:
+#ifndef NDEBUG
+ uu_warn("%s:%d: propagate_stop() encountered GVT_FILE.\n",
+ __FILE__, __LINE__);
+#endif
+ abort();
+ /* NOTREACHED */
+
+ case GVT_GROUP:
+ if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) {
+ graph_walk_dependents(v, propagate_start, NULL);
+ break;
+ }
+
+ if (err == RERR_NONE || err > v->gv_restart)
+ break;
+
+ assert(uu_list_numnodes(v->gv_dependents) == 1);
+ e = uu_list_first(v->gv_dependents);
+ svc = e->ge_vertex;
+
+ if (inst_running(svc))
+ vertex_send_event(svc, RESTARTER_EVENT_TYPE_STOP);
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__,
+ v->gv_type);
+#endif
+ abort();
+ }
+}
+
+/*
+ * void graph_enable_by_vertex()
+ * If admin is non-zero, this is an administrative request for change
+ * of the enabled property. Thus, send the ADMIN_DISABLE rather than
+ * a plain DISABLE restarter event.
+ */
+static void
+graph_enable_by_vertex(graph_vertex_t *vertex, int enable, int admin)
+{
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert((vertex->gv_flags & GV_CONFIGURED));
+
+ vertex->gv_flags = (vertex->gv_flags & ~GV_ENABLED) |
+ (enable ? GV_ENABLED : 0);
+
+ if (enable) {
+ if (vertex->gv_state != RESTARTER_STATE_OFFLINE &&
+ vertex->gv_state != RESTARTER_STATE_DEGRADED &&
+ vertex->gv_state != RESTARTER_STATE_ONLINE)
+ vertex_send_event(vertex, RESTARTER_EVENT_TYPE_ENABLE);
+ } else {
+ if (vertex->gv_state != RESTARTER_STATE_DISABLED) {
+ if (admin)
+ vertex_send_event(vertex,
+ RESTARTER_EVENT_TYPE_ADMIN_DISABLE);
+ else
+ vertex_send_event(vertex,
+ RESTARTER_EVENT_TYPE_DISABLE);
+ }
+ }
+
+ /*
+ * Wait for state update from restarter before sending _START or
+ * _STOP.
+ */
+}
+
+static int configure_vertex(graph_vertex_t *, scf_instance_t *);
+
+/*
+ * Set the restarter for v to fmri_arg. That is, make sure a vertex for
+ * fmri_arg exists, make v depend on it, and send _ADD_INSTANCE for v. If
+ * v is already configured and fmri_arg indicates the current restarter, do
+ * nothing. If v is configured and fmri_arg is a new restarter, delete v's
+ * dependency on the restarter, send _REMOVE_INSTANCE for v, and set the new
+ * restarter. Returns 0 on success, EINVAL if the FMRI is invalid,
+ * ECONNABORTED if the repository connection is broken, and ELOOP
+ * if the dependency would create a cycle. In the last case, *pathp will
+ * point to a -1-terminated array of ids which compose the path from v to
+ * restarter_fmri.
+ */
+int
+graph_change_restarter(graph_vertex_t *v, const char *fmri_arg, scf_handle_t *h,
+ int **pathp)
+{
+ char *restarter_fmri = NULL;
+ graph_vertex_t *rv;
+ int err;
+ int id;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ if (fmri_arg[0] != '\0') {
+ err = fmri_canonify(fmri_arg, &restarter_fmri, B_TRUE);
+ if (err != 0) {
+ assert(err == EINVAL);
+ return (err);
+ }
+ }
+
+ if (restarter_fmri == NULL ||
+ strcmp(restarter_fmri, SCF_SERVICE_STARTD) == 0) {
+ if (v->gv_flags & GV_CONFIGURED) {
+ if (v->gv_restarter_id == -1) {
+ if (restarter_fmri != NULL)
+ startd_free(restarter_fmri,
+ max_scf_fmri_size);
+ return (0);
+ }
+
+ graph_unset_restarter(v);
+ }
+
+ /* Master restarter, nothing to do. */
+ v->gv_restarter_id = -1;
+ v->gv_restarter_channel = NULL;
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE);
+ return (0);
+ }
+
+ if (v->gv_flags & GV_CONFIGURED) {
+ id = dict_lookup_byname(restarter_fmri);
+ if (id != -1 && v->gv_restarter_id == id) {
+ startd_free(restarter_fmri, max_scf_fmri_size);
+ return (0);
+ }
+
+ graph_unset_restarter(v);
+ }
+
+ err = graph_insert_vertex_unconfigured(restarter_fmri, GVT_INST, 0,
+ RERR_NONE, &rv);
+ startd_free(restarter_fmri, max_scf_fmri_size);
+ assert(err == 0 || err == EEXIST);
+
+ if (rv->gv_delegate_initialized == 0) {
+ rv->gv_delegate_channel = restarter_protocol_init_delegate(
+ rv->gv_name);
+ rv->gv_delegate_initialized = 1;
+ }
+ v->gv_restarter_id = rv->gv_id;
+ v->gv_restarter_channel = rv->gv_delegate_channel;
+
+ err = graph_insert_dependency(v, rv, pathp);
+ if (err != 0) {
+ assert(err == ELOOP);
+ return (ELOOP);
+ }
+
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE);
+
+ if (!(rv->gv_flags & GV_CONFIGURED)) {
+ scf_instance_t *inst;
+
+ err = libscf_fmri_get_instance(h, rv->gv_name, &inst);
+ switch (err) {
+ case 0:
+ err = configure_vertex(rv, inst);
+ scf_instance_destroy(inst);
+ switch (err) {
+ case 0:
+ case ECANCELED:
+ break;
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ default:
+ bad_error("configure_vertex", err);
+ }
+ break;
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case ENOENT:
+ break;
+
+ case ENOTSUP:
+ /*
+ * The fmri doesn't specify an instance - translate
+ * to EINVAL.
+ */
+ return (EINVAL);
+
+ case EINVAL:
+ default:
+ bad_error("libscf_fmri_get_instance", err);
+ }
+ }
+
+ return (0);
+}
+
+
+/*
+ * Add all of the instances of the service named by fmri to the graph.
+ * Returns
+ * 0 - success
+ * ENOENT - service indicated by fmri does not exist
+ *
+ * In both cases *reboundp will be B_TRUE if the handle was rebound, or B_FALSE
+ * otherwise.
+ */
+static int
+add_service(const char *fmri, scf_handle_t *h, boolean_t *reboundp)
+{
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ scf_iter_t *iter;
+ char *inst_fmri;
+ int ret, r;
+
+ *reboundp = B_FALSE;
+
+ svc = safe_scf_service_create(h);
+ inst = safe_scf_instance_create(h);
+ iter = safe_scf_iter_create(h);
+ inst_fmri = startd_alloc(max_scf_fmri_size);
+
+rebound:
+ if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ libscf_handle_rebind(h);
+ *reboundp = B_TRUE;
+ goto rebound;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ bad_error("scf_handle_decode_fmri", scf_error());
+ }
+ }
+
+ if (scf_iter_service_instances(iter, svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ libscf_handle_rebind(h);
+ *reboundp = B_TRUE;
+ goto rebound;
+
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_iter_service_instances", scf_error())
+ }
+ }
+
+ for (;;) {
+ r = scf_iter_next_instance(iter, inst);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ libscf_handle_rebind(h);
+ *reboundp = B_TRUE;
+ goto rebound;
+
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ bad_error("scf_iter_next_instance",
+ scf_error());
+ }
+ }
+
+ if (scf_instance_to_fmri(inst, inst_fmri, max_scf_fmri_size) <
+ 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ *reboundp = B_TRUE;
+ goto rebound;
+
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_to_fmri", scf_error());
+ }
+ }
+
+ r = dgraph_add_instance(inst_fmri, inst, B_FALSE);
+ switch (r) {
+ case 0:
+ case ECANCELED:
+ break;
+
+ case EEXIST:
+ continue;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ *reboundp = B_TRUE;
+ goto rebound;
+
+ case EINVAL:
+ default:
+ bad_error("dgraph_add_instance", r);
+ }
+ }
+
+ ret = 0;
+
+out:
+ startd_free(inst_fmri, max_scf_fmri_size);
+ scf_iter_destroy(iter);
+ scf_instance_destroy(inst);
+ scf_service_destroy(svc);
+ return (ret);
+}
+
+struct depfmri_info {
+ graph_vertex_t *v; /* GVT_GROUP vertex */
+ gv_type_t type; /* type of dependency */
+ const char *inst_fmri; /* FMRI of parental GVT_INST vert. */
+ const char *pg_name; /* Name of dependency pg */
+ scf_handle_t *h;
+ int err; /* return error code */
+ int **pathp; /* return circular dependency path */
+};
+
+/*
+ * Find or create a vertex for fmri and make info->v depend on it.
+ * Returns
+ * 0 - success
+ * nonzero - failure
+ *
+ * On failure, sets info->err to
+ * EINVAL - fmri is invalid
+ * fmri does not match info->type
+ * ELOOP - Adding the dependency creates a circular dependency. *info->pathp
+ * will point to an array of the ids of the members of the cycle.
+ * ECONNABORTED - repository connection was broken
+ * ECONNRESET - succeeded, but repository connection was reset
+ */
+static int
+process_dependency_fmri(const char *fmri, struct depfmri_info *info)
+{
+ int err;
+ graph_vertex_t *depgroup_v, *v;
+ char *fmri_copy, *cfmri;
+ size_t fmri_copy_sz;
+ const char *scope, *service, *instance, *pg;
+ scf_instance_t *inst;
+ boolean_t rebound;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ /* Get or create vertex for FMRI */
+ depgroup_v = info->v;
+
+ if (strncmp(fmri, "file:", sizeof ("file:") - 1) == 0) {
+ if (info->type != GVT_FILE) {
+ log_framework(LOG_NOTICE,
+ "FMRI \"%s\" is not allowed for the \"%s\" "
+ "dependency's type of instance %s.\n", fmri,
+ info->pg_name, info->inst_fmri);
+ return (info->err = EINVAL);
+ }
+
+ err = graph_insert_vertex_unconfigured(fmri, info->type, 0,
+ RERR_NONE, &v);
+ switch (err) {
+ case 0:
+ break;
+
+ case EEXIST:
+ assert(v->gv_type == GVT_FILE);
+ break;
+
+ case EINVAL: /* prevented above */
+ default:
+ bad_error("graph_insert_vertex_unconfigured", err);
+ }
+ } else {
+ if (info->type != GVT_INST) {
+ log_framework(LOG_NOTICE,
+ "FMRI \"%s\" is not allowed for the \"%s\" "
+ "dependency's type of instance %s.\n", fmri,
+ info->pg_name, info->inst_fmri);
+ return (info->err = EINVAL);
+ }
+
+ /*
+ * We must canonify fmri & add a vertex for it.
+ */
+ fmri_copy_sz = strlen(fmri) + 1;
+ fmri_copy = startd_alloc(fmri_copy_sz);
+ (void) strcpy(fmri_copy, fmri);
+
+ /* Determine if the FMRI is a property group or instance */
+ if (scf_parse_svc_fmri(fmri_copy, &scope, &service,
+ &instance, &pg, NULL) != 0) {
+ startd_free(fmri_copy, fmri_copy_sz);
+ log_framework(LOG_NOTICE,
+ "Dependency \"%s\" of %s has invalid FMRI "
+ "\"%s\".\n", info->pg_name, info->inst_fmri,
+ fmri);
+ return (info->err = EINVAL);
+ }
+
+ if (service == NULL || pg != NULL) {
+ startd_free(fmri_copy, fmri_copy_sz);
+ log_framework(LOG_NOTICE,
+ "Dependency \"%s\" of %s does not designate a "
+ "service or instance.\n", info->pg_name,
+ info->inst_fmri);
+ return (info->err = EINVAL);
+ }
+
+ if (scope == NULL || strcmp(scope, SCF_SCOPE_LOCAL) == 0) {
+ cfmri = uu_msprintf("svc:/%s%s%s",
+ service, instance ? ":" : "", instance ? instance :
+ "");
+ } else {
+ cfmri = uu_msprintf("svc://%s/%s%s%s",
+ scope, service, instance ? ":" : "", instance ?
+ instance : "");
+ }
+
+ startd_free(fmri_copy, fmri_copy_sz);
+
+ err = graph_insert_vertex_unconfigured(cfmri, instance ?
+ GVT_INST : GVT_SVC, instance ? 0 : DEPGRP_REQUIRE_ANY,
+ RERR_NONE, &v);
+ uu_free(cfmri);
+ switch (err) {
+ case 0:
+ break;
+
+ case EEXIST:
+ /* Verify v. */
+ if (instance != NULL)
+ assert(v->gv_type == GVT_INST);
+ else
+ assert(v->gv_type == GVT_SVC);
+ break;
+
+ default:
+ bad_error("graph_insert_vertex_unconfigured", err);
+ }
+ }
+
+ /* Add dependency from depgroup_v to new vertex */
+ info->err = graph_insert_dependency(depgroup_v, v, info->pathp);
+ switch (info->err) {
+ case 0:
+ break;
+
+ case ELOOP:
+ return (ELOOP);
+
+ default:
+ bad_error("graph_insert_dependency", info->err);
+ }
+
+ /* This must be after we insert the dependency, to avoid looping. */
+ switch (v->gv_type) {
+ case GVT_INST:
+ if ((v->gv_flags & GV_CONFIGURED) != 0)
+ break;
+
+ inst = safe_scf_instance_create(info->h);
+
+ rebound = B_FALSE;
+
+rebound:
+ err = libscf_lookup_instance(v->gv_name, inst);
+ switch (err) {
+ case 0:
+ err = configure_vertex(v, inst);
+ switch (err) {
+ case 0:
+ case ECANCELED:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(info->h);
+ rebound = B_TRUE;
+ goto rebound;
+
+ default:
+ bad_error("configure_vertex", err);
+ }
+ break;
+
+ case ENOENT:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(info->h);
+ rebound = B_TRUE;
+ goto rebound;
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_fmri_get_instance", err);
+ }
+
+ scf_instance_destroy(inst);
+
+ if (rebound)
+ return (info->err = ECONNRESET);
+ break;
+
+ case GVT_SVC:
+ (void) add_service(v->gv_name, info->h, &rebound);
+ if (rebound)
+ return (info->err = ECONNRESET);
+ }
+
+ return (0);
+}
+
+struct deppg_info {
+ graph_vertex_t *v; /* GVT_INST vertex */
+ int err; /* return error */
+ int **pathp; /* return circular dependency path */
+};
+
+/*
+ * Make info->v depend on a new GVT_GROUP node for this property group,
+ * and then call process_dependency_fmri() for the values of the entity
+ * property. Return 0 on success, or if something goes wrong return nonzero
+ * and set info->err to ECONNABORTED, EINVAL, or the error code returned by
+ * process_dependency_fmri().
+ */
+static int
+process_dependency_pg(scf_propertygroup_t *pg, struct deppg_info *info)
+{
+ scf_handle_t *h;
+ depgroup_type_t deptype;
+ struct depfmri_info linfo;
+ char *fmri, *pg_name;
+ size_t fmri_sz;
+ graph_vertex_t *depgrp;
+ scf_property_t *prop;
+ int err;
+ int empty;
+ scf_error_t scferr;
+ ssize_t len;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ h = scf_pg_handle(pg);
+
+ pg_name = startd_alloc(max_scf_name_size);
+
+ len = scf_pg_get_name(pg, pg_name, max_scf_name_size);
+ if (len < 0) {
+ startd_free(pg_name, max_scf_name_size);
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (info->err = ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (info->err = 0);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_name", scf_error());
+ }
+ }
+
+ /*
+ * Skip over empty dependency groups. Since dependency property
+ * groups are updated atomically, they are either empty or
+ * fully populated.
+ */
+ empty = depgroup_empty(h, pg);
+ if (empty < 0) {
+ log_error(LOG_INFO,
+ "Error reading dependency group \"%s\" of %s: %s\n",
+ pg_name, info->v->gv_name, scf_strerror(scf_error()));
+ startd_free(pg_name, max_scf_name_size);
+ return (info->err = EINVAL);
+
+ } else if (empty == 1) {
+ log_framework(LOG_DEBUG,
+ "Ignoring empty dependency group \"%s\" of %s\n",
+ pg_name, info->v->gv_name);
+ startd_free(pg_name, max_scf_name_size);
+ return (info->err = 0);
+ }
+
+ fmri_sz = strlen(info->v->gv_name) + 1 + len + 1;
+ fmri = startd_alloc(fmri_sz);
+
+ (void) snprintf(fmri, max_scf_name_size, "%s>%s", info->v->gv_name,
+ pg_name);
+
+ /* Validate the pg before modifying the graph */
+ deptype = depgroup_read_grouping(h, pg);
+ if (deptype == DEPGRP_UNSUPPORTED) {
+ log_error(LOG_INFO,
+ "Dependency \"%s\" of %s has an unknown grouping value.\n",
+ pg_name, info->v->gv_name);
+ startd_free(fmri, fmri_sz);
+ startd_free(pg_name, max_scf_name_size);
+ return (info->err = EINVAL);
+ }
+
+ prop = safe_scf_property_create(h);
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_ENTITIES, prop) != 0) {
+ scferr = scf_error();
+ scf_property_destroy(prop);
+ if (scferr == SCF_ERROR_DELETED) {
+ startd_free(fmri, fmri_sz);
+ startd_free(pg_name, max_scf_name_size);
+ return (info->err = 0);
+ } else if (scferr != SCF_ERROR_NOT_FOUND) {
+ startd_free(fmri, fmri_sz);
+ startd_free(pg_name, max_scf_name_size);
+ return (info->err = ECONNABORTED);
+ }
+
+ log_error(LOG_INFO,
+ "Dependency \"%s\" of %s is missing a \"%s\" property.\n",
+ pg_name, info->v->gv_name, SCF_PROPERTY_ENTITIES);
+
+ startd_free(fmri, fmri_sz);
+ startd_free(pg_name, max_scf_name_size);
+
+ return (info->err = EINVAL);
+ }
+
+ /* Create depgroup vertex for pg */
+ err = graph_insert_vertex_unconfigured(fmri, GVT_GROUP, deptype,
+ depgroup_read_restart(h, pg), &depgrp);
+ assert(err == 0);
+ startd_free(fmri, fmri_sz);
+
+ /* Add dependency from inst vertex to new vertex */
+ err = graph_insert_dependency(info->v, depgrp, info->pathp);
+ /* ELOOP can't happen because this should be a new vertex */
+ assert(err == 0);
+
+ linfo.v = depgrp;
+ linfo.type = depgroup_read_scheme(h, pg);
+ linfo.inst_fmri = info->v->gv_name;
+ linfo.pg_name = pg_name;
+ linfo.h = h;
+ linfo.err = 0;
+ linfo.pathp = info->pathp;
+ err = walk_property_astrings(prop, (callback_t)process_dependency_fmri,
+ &linfo);
+
+ scf_property_destroy(prop);
+ startd_free(pg_name, max_scf_name_size);
+
+ switch (err) {
+ case 0:
+ case EINTR:
+ return (info->err = linfo.err);
+
+ case ECONNABORTED:
+ case EINVAL:
+ return (info->err = err);
+
+ case ECANCELED:
+ return (info->err = 0);
+
+ case ECONNRESET:
+ return (info->err = ECONNABORTED);
+
+ default:
+ bad_error("walk_property_astrings", err);
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Build the dependency info for v from the repository. Returns 0 on success,
+ * ECONNABORTED on repository disconnection, EINVAL if the repository
+ * configuration is invalid, and ELOOP if a dependency would cause a cycle.
+ * In the last case, *pathp will point to a -1-terminated array of ids which
+ * constitute the rest of the dependency cycle.
+ */
+static int
+set_dependencies(graph_vertex_t *v, scf_instance_t *inst, int **pathp)
+{
+ struct deppg_info info;
+ int err;
+ uint_t old_configured;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ /*
+ * Mark the vertex as configured during dependency insertion to avoid
+ * dependency cycles (which can appear in the graph if one of the
+ * vertices is an exclusion-group).
+ */
+ old_configured = v->gv_flags & GV_CONFIGURED;
+ v->gv_flags |= GV_CONFIGURED;
+
+ info.err = 0;
+ info.v = v;
+ info.pathp = pathp;
+
+ err = walk_dependency_pgs(inst, (callback_t)process_dependency_pg,
+ &info);
+
+ if (!old_configured)
+ v->gv_flags &= ~GV_CONFIGURED;
+
+ switch (err) {
+ case 0:
+ case EINTR:
+ return (info.err);
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case ECANCELED:
+ /* Should get delete event, so return 0. */
+ return (0);
+
+ default:
+ bad_error("walk_dependency_pgs", err);
+ /* NOTREACHED */
+ }
+}
+
+
+static void
+handle_cycle(const char *fmri, int *path)
+{
+ const char *cp;
+ size_t sz;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ path_to_str(path, (char **)&cp, &sz);
+
+ log_error(LOG_ERR, "Putting service %s into maintenance "
+ "because it completes a dependency cycle:\n%s", fmri ? fmri : "?",
+ cp);
+
+ startd_free((void *)cp, sz);
+}
+
+/*
+ * When run on the dependencies of a vertex, populates list with
+ * graph_edge_t's which point to the instance vertices (no GVT_GROUP nodes)
+ * on which the vertex depends.
+ */
+static int
+append_insts(graph_edge_t *e, uu_list_t *list)
+{
+ graph_vertex_t *v = e->ge_vertex;
+ graph_edge_t *new;
+ int r;
+
+ switch (v->gv_type) {
+ case GVT_INST:
+ case GVT_SVC:
+ break;
+
+ case GVT_GROUP:
+ r = uu_list_walk(v->gv_dependencies,
+ (uu_walk_fn_t *)append_insts, list, 0);
+ assert(r == 0);
+ return (UU_WALK_NEXT);
+
+ case GVT_FILE:
+ return (UU_WALK_NEXT);
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__,
+ __LINE__, v->gv_type);
+#endif
+ abort();
+ }
+
+ new = startd_alloc(sizeof (*new));
+ new->ge_vertex = v;
+ uu_list_node_init(new, &new->ge_link, graph_edge_pool);
+ r = uu_list_insert_before(list, NULL, new);
+ assert(r == 0);
+ return (UU_WALK_NEXT);
+}
+
+static boolean_t
+should_be_in_subgraph(graph_vertex_t *v)
+{
+ graph_edge_t *e;
+
+ if (v == milestone)
+ return (B_TRUE);
+
+ /*
+ * v is in the subgraph if any of its dependents are in the subgraph.
+ * Except for EXCLUDE_ALL dependents. And OPTIONAL dependents only
+ * count if we're enabled.
+ */
+ for (e = uu_list_first(v->gv_dependents);
+ e != NULL;
+ e = uu_list_next(v->gv_dependents, e)) {
+ graph_vertex_t *dv = e->ge_vertex;
+
+ if (!(dv->gv_flags & GV_INSUBGRAPH))
+ continue;
+
+ /*
+ * Don't include instances that are optional and disabled.
+ */
+ if (v->gv_type == GVT_INST && dv->gv_type == GVT_SVC) {
+
+ int in = 0;
+ graph_edge_t *ee;
+
+ for (ee = uu_list_first(dv->gv_dependents);
+ ee != NULL;
+ ee = uu_list_next(dv->gv_dependents, ee)) {
+
+ graph_vertex_t *ddv = e->ge_vertex;
+
+ if (ddv->gv_type == GVT_GROUP &&
+ ddv->gv_depgroup == DEPGRP_EXCLUDE_ALL)
+ continue;
+
+ if (ddv->gv_type == GVT_GROUP &&
+ ddv->gv_depgroup == DEPGRP_OPTIONAL_ALL &&
+ !(v->gv_flags & GV_ENBLD_NOOVR))
+ continue;
+
+ in = 1;
+ }
+ if (!in)
+ continue;
+ }
+ if (v->gv_type == GVT_INST &&
+ dv->gv_type == GVT_GROUP &&
+ dv->gv_depgroup == DEPGRP_OPTIONAL_ALL &&
+ !(v->gv_flags & GV_ENBLD_NOOVR))
+ continue;
+
+ /* Don't include excluded services and instances */
+ if (dv->gv_type == GVT_GROUP &&
+ dv->gv_depgroup == DEPGRP_EXCLUDE_ALL)
+ continue;
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Ensures that GV_INSUBGRAPH is set properly for v and its descendents. If
+ * any bits change, manipulate the repository appropriately. Returns 0 or
+ * ECONNABORTED.
+ */
+static int
+eval_subgraph(graph_vertex_t *v, scf_handle_t *h)
+{
+ boolean_t old = (v->gv_flags & GV_INSUBGRAPH) != 0;
+ boolean_t new;
+ graph_edge_t *e;
+ scf_instance_t *inst;
+ int ret = 0, r;
+
+ assert(milestone != NULL && milestone != MILESTONE_NONE);
+
+ new = should_be_in_subgraph(v);
+
+ if (new == old)
+ return (0);
+
+ log_framework(LOG_DEBUG, new ? "Adding %s to the subgraph.\n" :
+ "Removing %s from the subgraph.\n", v->gv_name);
+
+ v->gv_flags = (v->gv_flags & ~GV_INSUBGRAPH) |
+ (new ? GV_INSUBGRAPH : 0);
+
+ if (v->gv_type == GVT_INST && (v->gv_flags & GV_CONFIGURED)) {
+ int err;
+
+get_inst:
+ err = libscf_fmri_get_instance(h, v->gv_name, &inst);
+ if (err != 0) {
+ switch (err) {
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ ret = ECONNABORTED;
+ goto get_inst;
+
+ case ENOENT:
+ break;
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_fmri_get_instance", err);
+ }
+ } else {
+ const char *f;
+
+ if (new) {
+ err = libscf_delete_enable_ovr(inst);
+ f = "libscf_delete_enable_ovr";
+ } else {
+ err = libscf_set_enable_ovr(inst, 0);
+ f = "libscf_set_enable_ovr";
+ }
+ scf_instance_destroy(inst);
+ switch (err) {
+ case 0:
+ case ECANCELED:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ /*
+ * We must continue so the graph is updated,
+ * but we must return ECONNABORTED so any
+ * libscf state held by any callers is reset.
+ */
+ ret = ECONNABORTED;
+ goto get_inst;
+
+ case EROFS:
+ case EPERM:
+ log_error(LOG_WARNING,
+ "Could not set %s/%s for %s: %s.\n",
+ SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED,
+ v->gv_name, strerror(err));
+ break;
+
+ default:
+ bad_error(f, err);
+ }
+ }
+ }
+
+ for (e = uu_list_first(v->gv_dependencies);
+ e != NULL;
+ e = uu_list_next(v->gv_dependencies, e)) {
+ r = eval_subgraph(e->ge_vertex, h);
+ if (r != 0) {
+ assert(r == ECONNABORTED);
+ ret = ECONNABORTED;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * Delete the (property group) dependencies of v & create new ones based on
+ * inst. If doing so would create a cycle, log a message and put the instance
+ * into maintenance. Update GV_INSUBGRAPH flags as necessary. Returns 0 or
+ * ECONNABORTED.
+ */
+static int
+refresh_vertex(graph_vertex_t *v, scf_instance_t *inst)
+{
+ int err;
+ int *path;
+ char *fmri;
+ int r;
+ scf_handle_t *h = scf_instance_handle(inst);
+ uu_list_t *old_deps;
+ int ret = 0;
+ graph_edge_t *e;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(v->gv_type == GVT_INST);
+
+ log_framework(LOG_DEBUG, "Graph engine: Refreshing %s.\n", v->gv_name);
+
+ if (milestone > MILESTONE_NONE) {
+ /*
+ * In case some of v's dependencies are being deleted we must
+ * make a list of them now for GV_INSUBGRAPH-flag evaluation
+ * after the new dependencies are in place.
+ */
+ old_deps = startd_list_create(graph_edge_pool, NULL, 0);
+
+ err = uu_list_walk(v->gv_dependencies,
+ (uu_walk_fn_t *)append_insts, old_deps, 0);
+ assert(err == 0);
+ }
+
+ delete_instance_dependencies(v, B_FALSE);
+
+ err = set_dependencies(v, inst, &path);
+ switch (err) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ ret = err;
+ goto out;
+
+ case EINVAL:
+ case ELOOP:
+ r = libscf_instance_get_fmri(inst, &fmri);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ ret = ECONNABORTED;
+ goto out;
+
+ case ECANCELED:
+ ret = 0;
+ goto out;
+
+ default:
+ bad_error("libscf_instance_get_fmri", r);
+ }
+
+ if (err == EINVAL) {
+ log_error(LOG_ERR, "Transitioning %s "
+ "to maintenance due to misconfiguration.\n",
+ fmri ? fmri : "?");
+ vertex_send_event(v,
+ RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY);
+ } else {
+ handle_cycle(fmri, path);
+ vertex_send_event(v,
+ RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE);
+ }
+ startd_free(fmri, max_scf_fmri_size);
+ ret = 0;
+ goto out;
+
+ default:
+ bad_error("set_dependencies", err);
+ }
+
+ if (milestone > MILESTONE_NONE) {
+ boolean_t aborted = B_FALSE;
+
+ for (e = uu_list_first(old_deps);
+ e != NULL;
+ e = uu_list_next(old_deps, e)) {
+ if (eval_subgraph(e->ge_vertex, h) ==
+ ECONNABORTED)
+ aborted = B_TRUE;
+ }
+
+ for (e = uu_list_first(v->gv_dependencies);
+ e != NULL;
+ e = uu_list_next(v->gv_dependencies, e)) {
+ if (eval_subgraph(e->ge_vertex, h) ==
+ ECONNABORTED)
+ aborted = B_TRUE;
+ }
+
+ if (aborted) {
+ ret = ECONNABORTED;
+ goto out;
+ }
+ }
+
+ if (v->gv_state == RESTARTER_STATE_OFFLINE) {
+ if (instance_satisfied(v, B_FALSE) == 1) {
+ if (v->gv_start_f == NULL)
+ vertex_send_event(v,
+ RESTARTER_EVENT_TYPE_START);
+ else
+ v->gv_start_f(v);
+ }
+ }
+
+ ret = 0;
+
+out:
+ if (milestone > MILESTONE_NONE) {
+ void *cookie = NULL;
+
+ while ((e = uu_list_teardown(old_deps, &cookie)) != NULL)
+ startd_free(e, sizeof (*e));
+
+ uu_list_destroy(old_deps);
+ }
+
+ return (ret);
+}
+
+/*
+ * Set up v according to inst. That is, make sure it depends on its
+ * restarter and set up its dependencies. Send the ADD_INSTANCE command to
+ * the restarter, and send ENABLE or DISABLE as appropriate.
+ *
+ * Returns 0 on success, ECONNABORTED on repository disconnection, or
+ * ECANCELED if inst is deleted.
+ */
+static int
+configure_vertex(graph_vertex_t *v, scf_instance_t *inst)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg;
+ scf_snapshot_t *snap;
+ char *restarter_fmri = startd_alloc(max_scf_value_size);
+ int enabled, enabled_ovr;
+ int err;
+ int *path;
+
+ restarter_fmri[0] = '\0';
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(v->gv_type == GVT_INST);
+ assert((v->gv_flags & GV_CONFIGURED) == 0);
+
+ /* GV_INSUBGRAPH should already be set properly. */
+ assert(should_be_in_subgraph(v) ==
+ ((v->gv_flags & GV_INSUBGRAPH) != 0));
+
+ log_framework(LOG_DEBUG, "Graph adding %s.\n", v->gv_name);
+
+ h = scf_instance_handle(inst);
+
+ /*
+ * If the instance does not have a restarter property group,
+ * initialize its state to uninitialized/none, in case the restarter
+ * is not enabled.
+ */
+ pg = safe_scf_pg_create(h);
+
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, pg) != 0) {
+ instance_data_t idata;
+ uint_t count = 0, msecs = ALLOC_DELAY;
+
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ scf_pg_destroy(pg);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ scf_pg_destroy(pg);
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+
+ switch (err = libscf_instance_get_fmri(inst,
+ (char **)&idata.i_fmri)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ scf_pg_destroy(pg);
+ return (err);
+
+ default:
+ bad_error("libscf_instance_get_fmri", err);
+ }
+
+ idata.i_state = RESTARTER_STATE_NONE;
+ idata.i_next_state = RESTARTER_STATE_NONE;
+
+init_state:
+ switch (err = _restarter_commit_states(h, &idata,
+ RESTARTER_STATE_UNINIT, RESTARTER_STATE_NONE, NULL)) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ ++count;
+ if (count < ALLOC_RETRY) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ goto init_state;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+
+ case ECONNABORTED:
+ scf_pg_destroy(pg);
+ return (ECONNABORTED);
+
+ case ENOENT:
+ scf_pg_destroy(pg);
+ return (ECANCELED);
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_NOTICE, "Could not initialize state for "
+ "%s: %s.\n", idata.i_fmri, strerror(err));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("_restarter_commit_states", err);
+ }
+
+ startd_free((void *)idata.i_fmri, max_scf_fmri_size);
+ }
+
+ scf_pg_destroy(pg);
+
+ if (milestone != NULL) {
+ /*
+ * Make sure the enable-override is set properly before we
+ * read whether we should be enabled.
+ */
+ if (milestone == MILESTONE_NONE ||
+ !(v->gv_flags & GV_INSUBGRAPH)) {
+ switch (err = libscf_set_enable_ovr(inst, 0)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ return (err);
+
+ case EROFS:
+ log_error(LOG_WARNING,
+ "Could not set %s/%s for %s: %s.\n",
+ SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED,
+ v->gv_name, strerror(err));
+ break;
+
+ case EPERM:
+ uu_die("Permission denied.\n");
+ /* NOTREACHED */
+
+ default:
+ bad_error("libscf_set_enable_ovr", err);
+ }
+ } else {
+ assert(v->gv_flags & GV_INSUBGRAPH);
+ switch (err = libscf_delete_enable_ovr(inst)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ return (err);
+
+ case EPERM:
+ uu_die("Permission denied.\n");
+ /* NOTREACHED */
+
+ default:
+ bad_error("libscf_delete_enable_ovr", err);
+ }
+ }
+ }
+
+ err = libscf_get_basic_instance_data(h, inst, v->gv_name, &enabled,
+ &enabled_ovr, &restarter_fmri);
+ switch (err) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ startd_free(restarter_fmri, max_scf_value_size);
+ return (err);
+
+ case ENOENT:
+ log_framework(LOG_DEBUG,
+ "Ignoring %s because it has no general property group.\n",
+ v->gv_name);
+ startd_free(restarter_fmri, max_scf_value_size);
+ return (0);
+
+ default:
+ bad_error("libscf_get_basic_instance_data", err);
+ }
+
+ if (enabled == -1) {
+ startd_free(restarter_fmri, max_scf_value_size);
+ return (0);
+ }
+
+ v->gv_flags = (v->gv_flags & ~GV_ENBLD_NOOVR) |
+ (enabled ? GV_ENBLD_NOOVR : 0);
+
+ if (enabled_ovr != -1)
+ enabled = enabled_ovr;
+
+ v->gv_state = RESTARTER_STATE_UNINIT;
+
+ snap = libscf_get_or_make_running_snapshot(inst, v->gv_name, B_TRUE);
+ scf_snapshot_destroy(snap);
+
+ /* Set up the restarter. (Sends _ADD_INSTANCE on success.) */
+ err = graph_change_restarter(v, restarter_fmri, h, &path);
+ if (err != 0) {
+ instance_data_t idata;
+ uint_t count = 0, msecs = ALLOC_DELAY;
+ const char *reason;
+
+ if (err == ECONNABORTED) {
+ startd_free(restarter_fmri, max_scf_value_size);
+ return (err);
+ }
+
+ assert(err == EINVAL || err == ELOOP);
+
+ if (err == EINVAL) {
+ log_framework(LOG_WARNING, emsg_invalid_restarter,
+ v->gv_name);
+ reason = "invalid_restarter";
+ } else {
+ handle_cycle(v->gv_name, path);
+ reason = "dependency_cycle";
+ }
+
+ startd_free(restarter_fmri, max_scf_value_size);
+
+ /*
+ * We didn't register the instance with the restarter, so we
+ * must set maintenance mode ourselves.
+ */
+ err = libscf_instance_get_fmri(inst, (char **)&idata.i_fmri);
+ if (err != 0) {
+ assert(err == ECONNABORTED || err == ECANCELED);
+ return (err);
+ }
+
+ idata.i_state = RESTARTER_STATE_NONE;
+ idata.i_next_state = RESTARTER_STATE_NONE;
+
+set_maint:
+ switch (err = _restarter_commit_states(h, &idata,
+ RESTARTER_STATE_MAINT, RESTARTER_STATE_NONE, reason)) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ ++count;
+ if (count < ALLOC_RETRY) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ goto set_maint;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case ENOENT:
+ return (ECANCELED);
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_NOTICE, "Could not initialize state for "
+ "%s: %s.\n", idata.i_fmri, strerror(err));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("_restarter_commit_states", err);
+ }
+
+ startd_free((void *)idata.i_fmri, max_scf_fmri_size);
+
+ v->gv_state = RESTARTER_STATE_MAINT;
+
+ goto out;
+ }
+ startd_free(restarter_fmri, max_scf_value_size);
+
+ /* Add all the other dependencies. */
+ err = refresh_vertex(v, inst);
+ if (err != 0) {
+ assert(err == ECONNABORTED);
+ return (err);
+ }
+
+out:
+ v->gv_flags |= GV_CONFIGURED;
+
+ graph_enable_by_vertex(v, enabled, 0);
+
+ return (0);
+}
+
+static void
+do_uadmin(void)
+{
+ int fd, left;
+ struct statvfs vfs;
+
+ const char * const resetting = "/etc/svc/volatile/resetting";
+
+ fd = creat(resetting, 0777);
+ if (fd >= 0)
+ startd_close(fd);
+ else
+ uu_warn("Could not create \"%s\"", resetting);
+
+ /* Kill dhcpagent if we're not using nfs for root */
+ if ((statvfs("/", &vfs) == 0) &&
+ (strncmp(vfs.f_basetype, "nfs", sizeof ("nfs") - 1) != 0))
+ (void) system("/usr/bin/pkill -x -u 0 dhcpagent");
+
+ (void) system("/usr/sbin/killall");
+ left = 5;
+ while (left > 0)
+ left = sleep(left);
+
+ (void) system("/usr/sbin/killall 9");
+ left = 10;
+ while (left > 0)
+ left = sleep(left);
+
+ sync();
+ sync();
+ sync();
+
+ (void) system("/sbin/umountall");
+ (void) system("/sbin/umount /tmp >/dev/null 2>&1");
+ (void) system("/sbin/umount /var/adm >/dev/null 2>&1");
+ (void) system("/sbin/umount /var/run >/dev/null 2>&1");
+ (void) system("/sbin/umount /var >/dev/null 2>&1");
+ (void) system("/sbin/umount /usr >/dev/null 2>&1");
+
+ uu_warn("The system is down.\n");
+
+ (void) uadmin(A_SHUTDOWN, halting, NULL);
+ uu_warn("uadmin() failed");
+
+ if (remove(resetting) != 0 && errno != ENOENT)
+ uu_warn("Could not remove \"%s\"", resetting);
+}
+
+/*
+ * If any of the up_svcs[] are online or satisfiable, return true. If they are
+ * all missing, disabled, in maintenance, or unsatisfiable, return false.
+ */
+boolean_t
+can_come_up(void)
+{
+ int i;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ /*
+ * If we are booting to single user (boot -s),
+ * SCF_MILESTONE_SINGLE_USER is needed to come up because startd
+ * spawns sulogin after single-user is online (see specials.c).
+ */
+ i = (booting_to_single_user ? 0 : 1);
+
+ for (; up_svcs[i] != NULL; ++i) {
+ if (up_svcs_p[i] == NULL) {
+ up_svcs_p[i] = vertex_get_by_name(up_svcs[i]);
+
+ if (up_svcs_p[i] == NULL)
+ continue;
+ }
+
+ /*
+ * Ignore unconfigured services (the ones that have been
+ * mentioned in a dependency from other services, but do
+ * not exist in the repository). Services which exist
+ * in the repository but don't have general/enabled
+ * property will be also ignored.
+ */
+ if (!(up_svcs_p[i]->gv_flags & GV_CONFIGURED))
+ continue;
+
+ switch (up_svcs_p[i]->gv_state) {
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ /*
+ * Deactivate verbose boot once a login service has been
+ * reached.
+ */
+ st->st_log_login_reached = 1;
+ /*FALLTHROUGH*/
+ case RESTARTER_STATE_UNINIT:
+ return (B_TRUE);
+
+ case RESTARTER_STATE_OFFLINE:
+ if (instance_satisfied(up_svcs_p[i], B_TRUE) != -1)
+ return (B_TRUE);
+ log_framework(LOG_DEBUG,
+ "can_come_up(): %s is unsatisfiable.\n",
+ up_svcs_p[i]->gv_name);
+ continue;
+
+ case RESTARTER_STATE_DISABLED:
+ case RESTARTER_STATE_MAINT:
+ log_framework(LOG_DEBUG,
+ "can_come_up(): %s is in state %s.\n",
+ up_svcs_p[i]->gv_name,
+ instance_state_str[up_svcs_p[i]->gv_state]);
+ continue;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unexpected vertex state %d.\n",
+ __FILE__, __LINE__, up_svcs_p[i]->gv_state);
+#endif
+ abort();
+ }
+ }
+
+ /*
+ * In the seed repository, console-login is unsatisfiable because
+ * services are missing. To behave correctly in that case we don't want
+ * to return false until manifest-import is online.
+ */
+
+ if (manifest_import_p == NULL) {
+ manifest_import_p = vertex_get_by_name(manifest_import);
+
+ if (manifest_import_p == NULL)
+ return (B_FALSE);
+ }
+
+ switch (manifest_import_p->gv_state) {
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ case RESTARTER_STATE_DISABLED:
+ case RESTARTER_STATE_MAINT:
+ break;
+
+ case RESTARTER_STATE_OFFLINE:
+ if (instance_satisfied(manifest_import_p, B_TRUE) == -1)
+ break;
+ /* FALLTHROUGH */
+
+ case RESTARTER_STATE_UNINIT:
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Runs sulogin. Returns
+ * 0 - success
+ * EALREADY - sulogin is already running
+ * EBUSY - console-login is running
+ */
+static int
+run_sulogin(const char *msg)
+{
+ graph_vertex_t *v;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ if (sulogin_running)
+ return (EALREADY);
+
+ v = vertex_get_by_name(console_login_fmri);
+ if (v != NULL && inst_running(v))
+ return (EBUSY);
+
+ sulogin_running = B_TRUE;
+
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ fork_sulogin(B_FALSE, msg);
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ sulogin_running = B_FALSE;
+
+ if (console_login_ready) {
+ v = vertex_get_by_name(console_login_fmri);
+
+ if (v != NULL && v->gv_state == RESTARTER_STATE_OFFLINE &&
+ !inst_running(v)) {
+ if (v->gv_start_f == NULL)
+ vertex_send_event(v,
+ RESTARTER_EVENT_TYPE_START);
+ else
+ v->gv_start_f(v);
+ }
+
+ console_login_ready = B_FALSE;
+ }
+
+ return (0);
+}
+
+/*
+ * The sulogin thread runs sulogin while can_come_up() is false. run_sulogin()
+ * keeps sulogin from stepping on console-login's toes.
+ */
+/* ARGSUSED */
+static void *
+sulogin_thread(void *unused)
+{
+ MUTEX_LOCK(&dgraph_lock);
+
+ assert(sulogin_thread_running);
+
+ do
+ (void) run_sulogin("Console login service(s) cannot run\n");
+ while (!can_come_up());
+
+ sulogin_thread_running = B_FALSE;
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ return (NULL);
+}
+
+/* ARGSUSED */
+void *
+single_user_thread(void *unused)
+{
+ uint_t left;
+ scf_handle_t *h;
+ scf_instance_t *inst;
+ scf_property_t *prop;
+ scf_value_t *val;
+ const char *msg;
+ char *buf;
+ int r;
+
+ MUTEX_LOCK(&single_user_thread_lock);
+ single_user_thread_count++;
+
+ if (!booting_to_single_user) {
+ /*
+ * From rcS.sh: Look for ttymon, in.telnetd, in.rlogind and
+ * processes in their process groups so they can be terminated.
+ */
+ (void) fputs("svc.startd: Killing user processes: ", stdout);
+ (void) system("/usr/sbin/killall");
+ (void) system("/usr/sbin/killall 9");
+ (void) system("/usr/bin/pkill -TERM -v -u 0,1");
+
+ left = 5;
+ while (left > 0)
+ left = sleep(left);
+
+ (void) system("/usr/bin/pkill -KILL -v -u 0,1");
+ (void) puts("done.");
+ }
+
+ if (go_single_user_mode || booting_to_single_user) {
+ msg = "SINGLE USER MODE\n";
+ } else {
+ assert(go_to_level1);
+
+ fork_rc_script('1', "start", B_TRUE);
+
+ uu_warn("The system is ready for administration.\n");
+
+ msg = "";
+ }
+
+ MUTEX_UNLOCK(&single_user_thread_lock);
+
+ for (;;) {
+ MUTEX_LOCK(&dgraph_lock);
+ r = run_sulogin(msg);
+ MUTEX_UNLOCK(&dgraph_lock);
+ if (r == 0)
+ break;
+
+ assert(r == EALREADY || r == EBUSY);
+
+ left = 3;
+ while (left > 0)
+ left = sleep(left);
+ }
+
+ MUTEX_LOCK(&single_user_thread_lock);
+
+ /*
+ * If another single user thread has started, let it finish changing
+ * the run level.
+ */
+ if (single_user_thread_count > 1) {
+ single_user_thread_count--;
+ MUTEX_UNLOCK(&single_user_thread_lock);
+ return (NULL);
+ }
+
+ h = libscf_handle_create_bound_loop();
+ inst = scf_instance_create(h);
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+ buf = startd_alloc(max_scf_fmri_size);
+
+lookup:
+ if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ r = libscf_create_self(h);
+ if (r == 0)
+ goto lookup;
+ assert(r == ECONNABORTED);
+ /* FALLTHROUGH */
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto lookup;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_handle_decode_fmri", scf_error());
+ }
+ }
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ r = libscf_inst_delete_prop(inst, SCF_PG_OPTIONS_OVR,
+ SCF_PROPERTY_MILESTONE);
+ switch (r) {
+ case 0:
+ case ECANCELED:
+ break;
+
+ case ECONNABORTED:
+ MUTEX_UNLOCK(&dgraph_lock);
+ libscf_handle_rebind(h);
+ goto lookup;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_WARNING, "Could not clear temporary milestone: "
+ "%s.\n", strerror(r));
+ break;
+
+ default:
+ bad_error("libscf_inst_delete_prop", r);
+ }
+
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ r = libscf_get_milestone(inst, prop, val, buf, max_scf_fmri_size);
+ switch (r) {
+ case ECANCELED:
+ case ENOENT:
+ case EINVAL:
+ (void) strcpy(buf, "all");
+ /* FALLTHROUGH */
+
+ case 0:
+ uu_warn("Returning to milestone %s.\n", buf);
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto lookup;
+
+ default:
+ bad_error("libscf_get_milestone", r);
+ }
+
+ r = dgraph_set_milestone(buf, h, B_FALSE);
+ switch (r) {
+ case 0:
+ case ECONNRESET:
+ case EALREADY:
+ case EINVAL:
+ case ENOENT:
+ break;
+
+ default:
+ bad_error("dgraph_set_milestone", r);
+ }
+
+ /*
+ * See graph_runlevel_changed().
+ */
+ MUTEX_LOCK(&dgraph_lock);
+ utmpx_set_runlevel(target_milestone_as_runlevel(), 'S', B_TRUE);
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ startd_free(buf, max_scf_fmri_size);
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_instance_destroy(inst);
+ scf_handle_destroy(h);
+
+ /*
+ * We'll give ourselves 3 seconds to respond to all of the enablings
+ * that setting the milestone should have created before checking
+ * whether to run sulogin.
+ */
+ left = 3;
+ while (left > 0)
+ left = sleep(left);
+
+ MUTEX_LOCK(&dgraph_lock);
+ /*
+ * Clearing these variables will allow the sulogin thread to run. We
+ * check here in case there aren't any more state updates anytime soon.
+ */
+ go_to_level1 = go_single_user_mode = booting_to_single_user = B_FALSE;
+ if (!sulogin_thread_running && !can_come_up()) {
+ (void) startd_thread_create(sulogin_thread, NULL);
+ sulogin_thread_running = B_TRUE;
+ }
+ MUTEX_UNLOCK(&dgraph_lock);
+ single_user_thread_count--;
+ MUTEX_UNLOCK(&single_user_thread_lock);
+ return (NULL);
+}
+
+
+/*
+ * Dependency graph operations API. These are handle-independent thread-safe
+ * graph manipulation functions which are the entry points for the event
+ * threads below.
+ */
+
+/*
+ * If a configured vertex exists for inst_fmri, return EEXIST. If no vertex
+ * exists for inst_fmri, add one. Then fetch the restarter from inst, make
+ * this vertex dependent on it, and send _ADD_INSTANCE to the restarter.
+ * Fetch whether the instance should be enabled from inst and send _ENABLE or
+ * _DISABLE as appropriate. Finally rummage through inst's dependency
+ * property groups and add vertices and edges as appropriate. If anything
+ * goes wrong after sending _ADD_INSTANCE, send _ADMIN_MAINT_ON to put the
+ * instance in maintenance. Don't send _START or _STOP until we get a state
+ * update in case we're being restarted and the service is already running.
+ *
+ * To support booting to a milestone, we must also make sure all dependencies
+ * encountered are configured, if they exist in the repository.
+ *
+ * Returns 0 on success, ECONNABORTED on repository disconnection, EINVAL if
+ * inst_fmri is an invalid (or not canonical) FMRI, ECANCELED if inst is
+ * deleted, or EEXIST if a configured vertex for inst_fmri already exists.
+ */
+int
+dgraph_add_instance(const char *inst_fmri, scf_instance_t *inst,
+ boolean_t lock_graph)
+{
+ graph_vertex_t *v;
+ int err;
+
+ if (strcmp(inst_fmri, SCF_SERVICE_STARTD) == 0)
+ return (0);
+
+ /* Check for a vertex for inst_fmri. */
+ if (lock_graph) {
+ MUTEX_LOCK(&dgraph_lock);
+ } else {
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ }
+
+ v = vertex_get_by_name(inst_fmri);
+
+ if (v != NULL) {
+ assert(v->gv_type == GVT_INST);
+
+ if (v->gv_flags & GV_CONFIGURED) {
+ if (lock_graph)
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (EEXIST);
+ }
+ } else {
+ /* Add the vertex. */
+ err = graph_insert_vertex_unconfigured(inst_fmri, GVT_INST, 0,
+ RERR_NONE, &v);
+ if (err != 0) {
+ assert(err == EINVAL);
+ if (lock_graph)
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (EINVAL);
+ }
+ }
+
+ err = configure_vertex(v, inst);
+
+ if (lock_graph)
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ return (err);
+}
+
+/*
+ * Locate the vertex for this property group's instance. If it doesn't exist
+ * or is unconfigured, call dgraph_add_instance() & return. Otherwise fetch
+ * the restarter for the instance, and if it has changed, send
+ * _REMOVE_INSTANCE to the old restarter, remove the dependency, make sure the
+ * new restarter has a vertex, add a new dependency, and send _ADD_INSTANCE to
+ * the new restarter. Then fetch whether the instance should be enabled, and
+ * if it is different from what we had, or if we changed the restarter, send
+ * the appropriate _ENABLE or _DISABLE command.
+ *
+ * Returns 0 on success, ENOTSUP if the pg's parent is not an instance,
+ * ECONNABORTED on repository disconnection, ECANCELED if the instance is
+ * deleted, or -1 if the instance's general property group is deleted or if
+ * its enabled property is misconfigured.
+ */
+static int
+dgraph_update_general(scf_propertygroup_t *pg)
+{
+ scf_handle_t *h;
+ scf_instance_t *inst;
+ char *fmri;
+ char *restarter_fmri;
+ graph_vertex_t *v;
+ int err;
+ int enabled, enabled_ovr;
+ int oldflags;
+
+ /* Find the vertex for this service */
+ h = scf_pg_handle(pg);
+
+ inst = safe_scf_instance_create(h);
+
+ if (scf_pg_get_parent_instance(pg, inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (ENOTSUP);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (0);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_parent_instance", scf_error());
+ }
+ }
+
+ err = libscf_instance_get_fmri(inst, &fmri);
+ switch (err) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ scf_instance_destroy(inst);
+ return (ECONNABORTED);
+
+ case ECANCELED:
+ scf_instance_destroy(inst);
+ return (0);
+
+ default:
+ bad_error("libscf_instance_get_fmri", err);
+ }
+
+ log_framework(LOG_DEBUG,
+ "Graph engine: Reloading general properties for %s.\n", fmri);
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ v = vertex_get_by_name(fmri);
+ if (v == NULL || !(v->gv_flags & GV_CONFIGURED)) {
+ /* Will get the up-to-date properties. */
+ MUTEX_UNLOCK(&dgraph_lock);
+ err = dgraph_add_instance(fmri, inst, B_TRUE);
+ startd_free(fmri, max_scf_fmri_size);
+ scf_instance_destroy(inst);
+ return (err == ECANCELED ? 0 : err);
+ }
+
+ /* Read enabled & restarter from repository. */
+ restarter_fmri = startd_alloc(max_scf_value_size);
+ err = libscf_get_basic_instance_data(h, inst, v->gv_name, &enabled,
+ &enabled_ovr, &restarter_fmri);
+ if (err != 0 || enabled == -1) {
+ MUTEX_UNLOCK(&dgraph_lock);
+ scf_instance_destroy(inst);
+ startd_free(fmri, max_scf_fmri_size);
+
+ switch (err) {
+ case ENOENT:
+ case 0:
+ startd_free(restarter_fmri, max_scf_value_size);
+ return (-1);
+
+ case ECONNABORTED:
+ case ECANCELED:
+ startd_free(restarter_fmri, max_scf_value_size);
+ return (err);
+
+ default:
+ bad_error("libscf_get_basic_instance_data", err);
+ }
+ }
+
+ oldflags = v->gv_flags;
+ v->gv_flags = (v->gv_flags & ~GV_ENBLD_NOOVR) |
+ (enabled ? GV_ENBLD_NOOVR : 0);
+
+ if (enabled_ovr != -1)
+ enabled = enabled_ovr;
+
+ /*
+ * If GV_ENBLD_NOOVR has changed, then we need to re-evaluate the
+ * subgraph.
+ */
+ if (milestone > MILESTONE_NONE && v->gv_flags != oldflags)
+ (void) eval_subgraph(v, h);
+
+ scf_instance_destroy(inst);
+
+ /* Ignore restarter change for now. */
+
+ startd_free(restarter_fmri, max_scf_value_size);
+ startd_free(fmri, max_scf_fmri_size);
+
+ /*
+ * Always send _ENABLE or _DISABLE. We could avoid this if the
+ * restarter didn't change and the enabled value didn't change, but
+ * that's not easy to check and improbable anyway, so we'll just do
+ * this.
+ */
+ graph_enable_by_vertex(v, enabled, 1);
+
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ return (0);
+}
+
+/*
+ * Delete all of the property group dependencies of v, update inst's running
+ * snapshot, and add the dependencies in the new snapshot. If any of the new
+ * dependencies would create a cycle, send _ADMIN_MAINT_ON. Otherwise
+ * reevaluate v's dependencies, send _START or _STOP as appropriate, and do
+ * the same for v's dependents.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - inst was deleted
+ * EINVAL - inst is invalid (e.g., missing general/enabled)
+ * -1 - libscf_snapshots_refresh() failed
+ */
+static int
+dgraph_refresh_instance(graph_vertex_t *v, scf_instance_t *inst)
+{
+ int r;
+ int enabled;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(v->gv_type == GVT_INST);
+
+ /* Only refresh services with valid general/enabled properties. */
+ r = libscf_get_basic_instance_data(scf_instance_handle(inst), inst,
+ v->gv_name, &enabled, NULL, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ return (r);
+
+ case ENOENT:
+ log_framework(LOG_DEBUG,
+ "Ignoring %s because it has no general property group.\n",
+ v->gv_name);
+ return (EINVAL);
+
+ default:
+ bad_error("libscf_get_basic_instance_data", r);
+ }
+
+ if (enabled == -1)
+ return (EINVAL);
+
+ r = libscf_snapshots_refresh(inst, v->gv_name);
+ if (r != 0) {
+ if (r != -1)
+ bad_error("libscf_snapshots_refresh", r);
+
+ /* error logged */
+ return (r);
+ }
+
+ r = refresh_vertex(v, inst);
+ if (r != 0 && r != ECONNABORTED)
+ bad_error("refresh_vertex", r);
+ return (r);
+}
+
+/*
+ * Returns 1 if any instances which directly depend on the passed instance
+ * (or it's service) are running.
+ */
+static int
+has_running_nonsubgraph_dependents(graph_vertex_t *v)
+{
+ graph_vertex_t *vv;
+ graph_edge_t *e;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ for (e = uu_list_first(v->gv_dependents);
+ e != NULL;
+ e = uu_list_next(v->gv_dependents, e)) {
+
+ vv = e->ge_vertex;
+ if (vv->gv_type == GVT_INST) {
+ if (inst_running(vv) &&
+ ((vv->gv_flags & GV_INSUBGRAPH) == 0))
+ return (1);
+ } else {
+ /*
+ * For dependency group or service vertices, keep
+ * traversing to see if instances are running.
+ */
+ if (has_running_nonsubgraph_dependents(vv))
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/*
+ * For the dependency, disable the instance which makes up the dependency if
+ * it is not in the subgraph and running. If the dependency instance is in
+ * the subgraph or it is not running, continue by disabling all of it's
+ * non-subgraph dependencies.
+ */
+static void
+disable_nonsubgraph_dependencies(graph_vertex_t *v, void *arg)
+{
+ int r;
+ scf_handle_t *h = (scf_handle_t *)arg;
+ scf_instance_t *inst = NULL;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ /* Continue recursing non-inst nodes */
+ if (v->gv_type != GVT_INST)
+ goto recurse;
+
+ /*
+ * For instances that are in the subgraph or already not running,
+ * skip and attempt to disable their non-dependencies.
+ */
+ if ((v->gv_flags & GV_INSUBGRAPH) || (!inst_running(v)))
+ goto recurse;
+
+ /*
+ * If not all this instance's dependents have stopped
+ * running, do not disable.
+ */
+ if (has_running_nonsubgraph_dependents(v))
+ return;
+
+ inst = scf_instance_create(h);
+ if (inst == NULL) {
+ log_error(LOG_WARNING, "Unable to gracefully disable instance:"
+ " %s due to lack of resources\n", v->gv_name);
+ goto disable;
+ }
+again:
+ r = scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto again;
+
+ case SCF_ERROR_NOT_FOUND:
+ goto recurse;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_handle_decode_fmri",
+ scf_error());
+ }
+ }
+ r = libscf_set_enable_ovr(inst, 0);
+ switch (r) {
+ case 0:
+ scf_instance_destroy(inst);
+ return;
+ case ECANCELED:
+ scf_instance_destroy(inst);
+ goto recurse;
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto again;
+ case EPERM:
+ case EROFS:
+ log_error(LOG_WARNING,
+ "Could not set %s/%s for %s: %s.\n",
+ SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED,
+ v->gv_name, strerror(r));
+ goto disable;
+ default:
+ bad_error("libscf_set_enable_ovr", r);
+ }
+disable:
+ graph_enable_by_vertex(v, 0, 0);
+ return;
+recurse:
+ graph_walk_dependencies(v, disable_nonsubgraph_dependencies,
+ arg);
+}
+
+/*
+ * Find the vertex for inst_name. If it doesn't exist, return ENOENT.
+ * Otherwise set its state to state. If the instance has entered a state
+ * which requires automatic action, take it (Uninitialized: do
+ * dgraph_refresh_instance() without the snapshot update. Disabled: if the
+ * instance should be enabled, send _ENABLE. Offline: if the instance should
+ * be disabled, send _DISABLE, and if its dependencies are satisfied, send
+ * _START. Online, Degraded: if the instance wasn't running, update its start
+ * snapshot. Maintenance: no action.)
+ *
+ * Also fails with ECONNABORTED, or EINVAL if state is invalid.
+ */
+static int
+dgraph_set_instance_state(scf_handle_t *h, const char *inst_name,
+ restarter_instance_state_t state, restarter_error_t serr)
+{
+ graph_vertex_t *v;
+ int err = 0, r;
+ int was_running, up_or_down;
+ restarter_instance_state_t old_state;
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ v = vertex_get_by_name(inst_name);
+ if (v == NULL) {
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (ENOENT);
+ }
+
+ switch (state) {
+ case RESTARTER_STATE_UNINIT:
+ case RESTARTER_STATE_DISABLED:
+ case RESTARTER_STATE_OFFLINE:
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ case RESTARTER_STATE_MAINT:
+ break;
+
+ default:
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (EINVAL);
+ }
+
+ log_framework(LOG_DEBUG, "Graph noting %s %s -> %s.\n", v->gv_name,
+ instance_state_str[v->gv_state], instance_state_str[state]);
+
+ old_state = v->gv_state;
+ was_running = inst_running(v);
+
+ v->gv_state = state;
+
+ up_or_down = was_running ^ inst_running(v);
+
+ if (up_or_down && milestone != NULL && !inst_running(v) &&
+ ((v->gv_flags & GV_INSUBGRAPH) == 0 ||
+ milestone == MILESTONE_NONE)) {
+ --non_subgraph_svcs;
+ if (non_subgraph_svcs == 0) {
+ if (halting != -1) {
+ do_uadmin();
+ } else if (go_single_user_mode || go_to_level1) {
+ (void) startd_thread_create(single_user_thread,
+ NULL);
+ }
+ } else {
+ graph_walk_dependencies(v,
+ disable_nonsubgraph_dependencies, (void *)h);
+ }
+ }
+
+ switch (state) {
+ case RESTARTER_STATE_UNINIT: {
+ scf_instance_t *inst;
+
+ /* Initialize instance by refreshing it. */
+
+ err = libscf_fmri_get_instance(h, v->gv_name, &inst);
+ switch (err) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (ECONNABORTED);
+
+ case ENOENT:
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (0);
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_fmri_get_instance", err);
+ }
+
+ err = refresh_vertex(v, inst);
+ if (err == 0)
+ graph_enable_by_vertex(v, v->gv_flags & GV_ENABLED, 0);
+
+ scf_instance_destroy(inst);
+ break;
+ }
+
+ case RESTARTER_STATE_DISABLED:
+ /*
+ * If the instance should be disabled, no problem. Otherwise,
+ * send an enable command, which should result in the instance
+ * moving to OFFLINE.
+ */
+ if (v->gv_flags & GV_ENABLED) {
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_ENABLE);
+ } else if (was_running && v->gv_post_disable_f) {
+ v->gv_post_disable_f();
+ }
+ break;
+
+ case RESTARTER_STATE_OFFLINE:
+ /*
+ * If the instance should be enabled, see if we can start it.
+ * Otherwise send a disable command.
+ */
+ if (v->gv_flags & GV_ENABLED) {
+ if (instance_satisfied(v, B_FALSE) == 1) {
+ if (v->gv_start_f == NULL) {
+ vertex_send_event(v,
+ RESTARTER_EVENT_TYPE_START);
+ } else {
+ v->gv_start_f(v);
+ }
+ } else {
+ log_framework(LOG_DEBUG,
+ "Dependencies of %s not satisfied, "
+ "not starting.\n", v->gv_name);
+ }
+ } else {
+ if (was_running && v->gv_post_disable_f)
+ v->gv_post_disable_f();
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_DISABLE);
+ }
+ break;
+
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ /*
+ * If the instance has just come up, update the start
+ * snapshot.
+ */
+ if (!was_running) {
+ /*
+ * Don't fire if we're just recovering state
+ * after a restart.
+ */
+ if (old_state != RESTARTER_STATE_UNINIT &&
+ v->gv_post_online_f)
+ v->gv_post_online_f();
+
+ r = libscf_snapshots_poststart(h, v->gv_name, B_TRUE);
+ switch (r) {
+ case 0:
+ case ENOENT:
+ /*
+ * If ENOENT, the instance must have been
+ * deleted. Pretend we were successful since
+ * we should get a delete event later.
+ */
+ break;
+
+ case ECONNABORTED:
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (ECONNABORTED);
+
+ case EACCES:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_snapshots_poststart", r);
+ }
+ }
+ if (!(v->gv_flags & GV_ENABLED))
+ vertex_send_event(v, RESTARTER_EVENT_TYPE_DISABLE);
+ break;
+
+ case RESTARTER_STATE_MAINT:
+ /* No action. */
+ break;
+
+ default:
+ /* Should have been caught above. */
+#ifndef NDEBUG
+ uu_warn("%s:%d: Uncaught case %d.\n", __FILE__, __LINE__,
+ state);
+#endif
+ abort();
+ }
+
+ /*
+ * If the service came up or went down, propagate the event. We must
+ * treat offline -> disabled as a start since it can satisfy
+ * optional_all dependencies. And we must treat !running -> maintenance
+ * as a start because maintenance satisfies optional and exclusion
+ * dependencies.
+ */
+ if (inst_running(v)) {
+ if (!was_running) {
+ log_framework(LOG_DEBUG, "Propagating start of %s.\n",
+ v->gv_name);
+
+ graph_walk_dependents(v, propagate_start, NULL);
+ } else if (serr == RERR_REFRESH) {
+ /* For refresh we'll get a message sans state change */
+
+ log_framework(LOG_DEBUG, "Propagating refresh of %s.\n",
+ v->gv_name);
+
+ graph_walk_dependents(v, propagate_stop, (void *)serr);
+ }
+ } else if (was_running) {
+ log_framework(LOG_DEBUG, "Propagating stop of %s.\n",
+ v->gv_name);
+
+ graph_walk_dependents(v, propagate_stop, (void *)serr);
+ } else if (v->gv_state == RESTARTER_STATE_DISABLED) {
+ log_framework(LOG_DEBUG, "Propagating disable of %s.\n",
+ v->gv_name);
+
+ graph_walk_dependents(v, propagate_start, NULL);
+ propagate_satbility(v);
+ } else if (v->gv_state == RESTARTER_STATE_MAINT) {
+ log_framework(LOG_DEBUG, "Propagating maintenance of %s.\n",
+ v->gv_name);
+
+ graph_walk_dependents(v, propagate_start, NULL);
+ propagate_satbility(v);
+ }
+
+ if (state != old_state && st->st_load_complete &&
+ !go_single_user_mode && !go_to_level1 &&
+ halting == -1) {
+ if (!can_come_up() && !sulogin_thread_running) {
+ (void) startd_thread_create(sulogin_thread, NULL);
+ sulogin_thread_running = B_TRUE;
+ }
+ }
+
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ return (err);
+}
+
+
+static void
+remove_inst_vertex(graph_vertex_t *v)
+{
+ graph_edge_t *e;
+ graph_vertex_t *sv;
+ int i;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+ assert(uu_list_numnodes(v->gv_dependents) == 1);
+
+ e = uu_list_first(v->gv_dependents);
+ sv = e->ge_vertex;
+ graph_remove_edge(sv, v);
+
+ for (i = 0; up_svcs[i] != NULL; ++i) {
+ if (up_svcs_p[i] == v)
+ up_svcs_p[i] = NULL;
+ }
+
+ if (manifest_import_p == v)
+ manifest_import_p = NULL;
+
+ graph_remove_vertex(v);
+
+ if (uu_list_numnodes(sv->gv_dependencies) == 0 &&
+ uu_list_numnodes(sv->gv_dependents) == 0)
+ graph_remove_vertex(sv);
+}
+
+/*
+ * If a vertex for fmri exists and it is enabled, send _DISABLE to the
+ * restarter. If it is running, send _STOP. Send _REMOVE_INSTANCE. Delete
+ * all property group dependencies, and the dependency on the restarter,
+ * disposing of vertices as appropriate. If other vertices depend on this
+ * one, mark it unconfigured and return. Otherwise remove the vertex. Always
+ * returns 0.
+ */
+static int
+dgraph_remove_instance(const char *fmri, scf_handle_t *h)
+{
+ graph_vertex_t *v;
+ graph_edge_t *e;
+ uu_list_t *old_deps;
+ int err;
+
+ log_framework(LOG_DEBUG, "Graph engine: Removing %s.\n", fmri);
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ v = vertex_get_by_name(fmri);
+ if (v == NULL) {
+ MUTEX_UNLOCK(&dgraph_lock);
+ return (0);
+ }
+
+ /* Send restarter delete event. */
+ if (v->gv_flags & GV_CONFIGURED)
+ graph_unset_restarter(v);
+
+ if (milestone > MILESTONE_NONE) {
+ /*
+ * Make a list of v's current dependencies so we can
+ * reevaluate their GV_INSUBGRAPH flags after the dependencies
+ * are removed.
+ */
+ old_deps = startd_list_create(graph_edge_pool, NULL, 0);
+
+ err = uu_list_walk(v->gv_dependencies,
+ (uu_walk_fn_t *)append_insts, old_deps, 0);
+ assert(err == 0);
+ }
+
+ delete_instance_dependencies(v, B_TRUE);
+
+ /*
+ * Deleting an instance can both satisfy and unsatisfy dependencies,
+ * depending on their type. First propagate the stop as a RERR_RESTART
+ * event -- deletion isn't a fault, just a normal stop. This gives
+ * dependent services the chance to do a clean shutdown. Then, mark
+ * the service as unconfigured and propagate the start event for the
+ * optional_all dependencies that might have become satisfied.
+ */
+ graph_walk_dependents(v, propagate_stop, (void *)RERR_RESTART);
+
+ v->gv_flags &= ~GV_CONFIGURED;
+
+ graph_walk_dependents(v, propagate_start, NULL);
+ propagate_satbility(v);
+
+ /*
+ * If there are no (non-service) dependents, the vertex can be
+ * completely removed.
+ */
+ if (v != milestone && uu_list_numnodes(v->gv_dependents) == 1)
+ remove_inst_vertex(v);
+
+ if (milestone > MILESTONE_NONE) {
+ void *cookie = NULL;
+
+ while ((e = uu_list_teardown(old_deps, &cookie)) != NULL) {
+ while (eval_subgraph(e->ge_vertex, h) == ECONNABORTED)
+ libscf_handle_rebind(h);
+
+ startd_free(e, sizeof (*e));
+ }
+
+ uu_list_destroy(old_deps);
+ }
+
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ return (0);
+}
+
+/*
+ * Return the eventual (maybe current) milestone in the form of a
+ * legacy runlevel.
+ */
+static char
+target_milestone_as_runlevel()
+{
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ if (milestone == NULL)
+ return ('3');
+ else if (milestone == MILESTONE_NONE)
+ return ('0');
+
+ if (strcmp(milestone->gv_name, multi_user_fmri) == 0)
+ return ('2');
+ else if (strcmp(milestone->gv_name, single_user_fmri) == 0)
+ return ('S');
+ else if (strcmp(milestone->gv_name, multi_user_svr_fmri) == 0)
+ return ('3');
+
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: Unknown milestone name \"%s\".\n",
+ __FILE__, __LINE__, milestone->gv_name);
+#endif
+ abort();
+ /* NOTREACHED */
+}
+
+static struct {
+ char rl;
+ int sig;
+} init_sigs[] = {
+ { 'S', SIGBUS },
+ { '0', SIGINT },
+ { '1', SIGQUIT },
+ { '2', SIGILL },
+ { '3', SIGTRAP },
+ { '4', SIGIOT },
+ { '5', SIGEMT },
+ { '6', SIGFPE },
+ { 0, 0 }
+};
+
+static void
+signal_init(char rl)
+{
+ pid_t init_pid;
+ int i;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ if (zone_getattr(getzoneid(), ZONE_ATTR_INITPID, &init_pid,
+ sizeof (init_pid)) != sizeof (init_pid)) {
+ log_error(LOG_NOTICE, "Could not get pid to signal init.\n");
+ return;
+ }
+
+ for (i = 0; init_sigs[i].rl != 0; ++i)
+ if (init_sigs[i].rl == rl)
+ break;
+
+ if (init_sigs[i].rl != 0) {
+ if (kill(init_pid, init_sigs[i].sig) != 0) {
+ switch (errno) {
+ case EPERM:
+ case ESRCH:
+ log_error(LOG_NOTICE, "Could not signal init: "
+ "%s.\n", strerror(errno));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("kill", errno);
+ }
+ }
+ }
+}
+
+/*
+ * This is called when one of the major milestones changes state, or when
+ * init is signalled and tells us it was told to change runlevel. We wait
+ * to reach the milestone because this allows /etc/inittab entries to retain
+ * some boot ordering: historically, entries could place themselves before/after
+ * the running of /sbin/rcX scripts but we can no longer make the
+ * distinction because the /sbin/rcX scripts no longer exist as punctuation
+ * marks in /etc/inittab.
+ *
+ * Also, we only trigger an update when we reach the eventual target
+ * milestone: without this, an /etc/inittab entry marked only for
+ * runlevel 2 would be executed for runlevel 3, which is not how
+ * /etc/inittab entries work.
+ *
+ * If we're single user coming online, then we set utmpx to the target
+ * runlevel so that legacy scripts can work as expected.
+ */
+static void
+graph_runlevel_changed(char rl, int online)
+{
+ char trl;
+
+ assert(PTHREAD_MUTEX_HELD(&dgraph_lock));
+
+ trl = target_milestone_as_runlevel();
+
+ if (online) {
+ if (rl == trl) {
+ signal_init(trl);
+ current_runlevel = rl;
+ } else if (rl == 'S') {
+ /*
+ * At boot, set the entry early for the benefit of the
+ * legacy init scripts.
+ */
+ utmpx_set_runlevel(trl, 'S', B_FALSE);
+ }
+ } else {
+ if (rl == '3' && trl == '2') {
+ signal_init(trl);
+ current_runlevel = rl;
+ } else if (rl == '2' && trl == 'S') {
+ signal_init(trl);
+ current_runlevel = rl;
+ }
+ }
+}
+
+/*
+ * Move to a backwards-compatible runlevel by executing the appropriate
+ * /etc/rc?.d/K* scripts and/or setting the milestone.
+ *
+ * Returns
+ * 0 - success
+ * ECONNRESET - success, but handle was reset
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - pg was deleted
+ */
+static int
+dgraph_set_runlevel(scf_propertygroup_t *pg, scf_property_t *prop)
+{
+ char rl;
+ scf_handle_t *h;
+ int r;
+ const char *ms = NULL; /* what to commit as options/milestone */
+ boolean_t rebound = B_FALSE;
+ int mark_rl = 0;
+
+ const char * const stop = "stop";
+
+ r = libscf_extract_runlevel(prop, &rl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ return (r);
+
+ case EINVAL:
+ case ENOENT:
+ log_error(LOG_WARNING, "runlevel property is misconfigured; "
+ "ignoring.\n");
+ /* delete the bad property */
+ goto nolock_out;
+
+ default:
+ bad_error("libscf_extract_runlevel", r);
+ }
+
+ switch (rl) {
+ case 's':
+ rl = 'S';
+ /* FALLTHROUGH */
+
+ case 'S':
+ case '2':
+ case '3':
+ /*
+ * These cases cause a milestone change, so
+ * graph_runlevel_changed() will eventually deal with
+ * signalling init.
+ */
+ break;
+
+ case '0':
+ case '1':
+ case '4':
+ case '5':
+ case '6':
+ mark_rl = 1;
+ break;
+
+ default:
+ log_framework(LOG_NOTICE, "Unknown runlevel '%c'.\n", rl);
+ ms = NULL;
+ goto nolock_out;
+ }
+
+ h = scf_pg_handle(pg);
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ /*
+ * Since this triggers no milestone changes, force it by hand.
+ */
+ if (current_runlevel == '4' && rl == '3')
+ mark_rl = 1;
+
+ if (rl == current_runlevel) {
+ ms = NULL;
+ goto out;
+ }
+
+ log_framework(LOG_DEBUG, "Changing to runlevel '%c'.\n", rl);
+
+ /*
+ * Make sure stop rc scripts see the new settings via who -r.
+ */
+ utmpx_set_runlevel(rl, current_runlevel, B_TRUE);
+
+ /*
+ * Some run levels don't have a direct correspondence to any
+ * milestones, so we have to signal init directly.
+ */
+ if (mark_rl) {
+ current_runlevel = rl;
+ signal_init(rl);
+ }
+
+ switch (rl) {
+ case 'S':
+ uu_warn("The system is coming down for administration. "
+ "Please wait.\n");
+ fork_rc_script(rl, stop, B_FALSE);
+ ms = single_user_fmri;
+ go_single_user_mode = B_TRUE;
+ break;
+
+ case '0':
+ fork_rc_script(rl, stop, B_TRUE);
+ halting = AD_HALT;
+ goto uadmin;
+
+ case '5':
+ fork_rc_script(rl, stop, B_TRUE);
+ halting = AD_POWEROFF;
+ goto uadmin;
+
+ case '6':
+ fork_rc_script(rl, stop, B_TRUE);
+ halting = AD_BOOT;
+ goto uadmin;
+
+uadmin:
+ uu_warn("The system is coming down. Please wait.\n");
+ ms = "none";
+
+ /*
+ * We can't wait until all services are offline since this
+ * thread is responsible for taking them offline. Instead we
+ * set halting to the second argument for uadmin() and call
+ * do_uadmin() from dgraph_set_instance_state() when
+ * appropriate.
+ */
+ break;
+
+ case '1':
+ if (current_runlevel != 'S') {
+ uu_warn("Changing to state 1.\n");
+ fork_rc_script(rl, stop, B_FALSE);
+ } else {
+ uu_warn("The system is coming up for administration. "
+ "Please wait.\n");
+ }
+ ms = single_user_fmri;
+ go_to_level1 = B_TRUE;
+ break;
+
+ case '2':
+ if (current_runlevel == '3' || current_runlevel == '4')
+ fork_rc_script(rl, stop, B_FALSE);
+ ms = multi_user_fmri;
+ break;
+
+ case '3':
+ case '4':
+ ms = "all";
+ break;
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: Uncaught case %d ('%c').\n",
+ __FILE__, __LINE__, rl, rl);
+#endif
+ abort();
+ }
+
+out:
+ MUTEX_UNLOCK(&dgraph_lock);
+
+nolock_out:
+ switch (r = libscf_clear_runlevel(pg, ms)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+ goto nolock_out;
+
+ case ECANCELED:
+ break;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_NOTICE, "Could not delete \"%s/%s\" property: "
+ "%s.\n", SCF_PG_OPTIONS, "runlevel", strerror(r));
+ break;
+
+ default:
+ bad_error("libscf_clear_runlevel", r);
+ }
+
+ return (rebound ? ECONNRESET : 0);
+}
+
+static int
+mark_subgraph(graph_edge_t *e, void *arg)
+{
+ graph_vertex_t *v;
+ int r;
+ int optional = (int)arg;
+
+ v = e->ge_vertex;
+
+ /* If it's already in the subgraph, skip. */
+ if (v->gv_flags & GV_INSUBGRAPH)
+ return (UU_WALK_NEXT);
+
+ /*
+ * Keep track if walk has entered an optional dependency group
+ */
+ if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_OPTIONAL_ALL) {
+ optional = 1;
+ }
+ /*
+ * Quit if we are in an optional dependency group and the instance
+ * is disabled
+ */
+ if (optional && (v->gv_type == GVT_INST) &&
+ (!(v->gv_flags & GV_ENBLD_NOOVR)))
+ return (UU_WALK_NEXT);
+
+ v->gv_flags |= GV_INSUBGRAPH;
+
+ /* Skip all excluded dependencies. */
+ if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL)
+ return (UU_WALK_NEXT);
+
+ r = uu_list_walk(v->gv_dependencies, (uu_walk_fn_t *)mark_subgraph,
+ (void *)optional, 0);
+ assert(r == 0);
+ return (UU_WALK_NEXT);
+}
+
+/*
+ * "Restrict" the graph to dependencies of fmri. We implement it by walking
+ * all services, override-disabling those which are not descendents of the
+ * instance, and removing any enable-override for the rest. milestone is set
+ * to the vertex which represents fmri so that the other graph operations may
+ * act appropriately.
+ *
+ * If norepository is true, the function will not change the repository.
+ *
+ * Returns
+ * 0 - success
+ * ECONNRESET - success, but handle was rebound
+ * EINVAL - fmri is invalid (error is logged)
+ * EALREADY - the milestone is already set to fmri
+ * ENOENT - a configured vertex does not exist for fmri (an error is logged)
+ */
+static int
+dgraph_set_milestone(const char *fmri, scf_handle_t *h, boolean_t norepository)
+{
+ const char *cfmri, *fs;
+ graph_vertex_t *nm, *v;
+ int ret = 0, r;
+ scf_instance_t *inst;
+ boolean_t isall, isnone, rebound = B_FALSE;
+
+ /* Validate fmri */
+ isall = (strcmp(fmri, "all") == 0);
+ isnone = (strcmp(fmri, "none") == 0);
+
+ if (!isall && !isnone) {
+ if (fmri_canonify(fmri, (char **)&cfmri, B_FALSE) == EINVAL)
+ goto reject;
+
+ if (strcmp(cfmri, single_user_fmri) != 0 &&
+ strcmp(cfmri, multi_user_fmri) != 0 &&
+ strcmp(cfmri, multi_user_svr_fmri) != 0) {
+ startd_free((void *)cfmri, max_scf_fmri_size);
+reject:
+ log_framework(LOG_WARNING,
+ "Rejecting request for invalid milestone \"%s\".\n",
+ fmri);
+ return (EINVAL);
+ }
+ }
+
+ inst = safe_scf_instance_create(h);
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ if (milestone == NULL) {
+ if (isall) {
+ log_framework(LOG_DEBUG,
+ "Milestone already set to all.\n");
+ ret = EALREADY;
+ goto out;
+ }
+ } else if (milestone == MILESTONE_NONE) {
+ if (isnone) {
+ log_framework(LOG_DEBUG,
+ "Milestone already set to none.\n");
+ ret = EALREADY;
+ goto out;
+ }
+ } else {
+ if (!isall && !isnone &&
+ strcmp(cfmri, milestone->gv_name) == 0) {
+ log_framework(LOG_DEBUG,
+ "Milestone already set to %s.\n", cfmri);
+ ret = EALREADY;
+ goto out;
+ }
+ }
+
+ if (!isall && !isnone) {
+ nm = vertex_get_by_name(cfmri);
+ if (nm == NULL || !(nm->gv_flags & GV_CONFIGURED)) {
+ log_framework(LOG_WARNING, "Cannot set milestone to %s "
+ "because no such service exists.\n", cfmri);
+ ret = ENOENT;
+ goto out;
+ }
+ }
+
+ log_framework(LOG_DEBUG, "Changing milestone to %s.\n", fmri);
+
+ /*
+ * Set milestone, removing the old one if this was the last reference.
+ */
+ if (milestone > MILESTONE_NONE &&
+ (milestone->gv_flags & GV_CONFIGURED) == 0)
+ remove_inst_vertex(milestone);
+
+ if (isall)
+ milestone = NULL;
+ else if (isnone)
+ milestone = MILESTONE_NONE;
+ else
+ milestone = nm;
+
+ /* Clear all GV_INSUBGRAPH bits. */
+ for (v = uu_list_first(dgraph); v != NULL; v = uu_list_next(dgraph, v))
+ v->gv_flags &= ~GV_INSUBGRAPH;
+
+ if (!isall && !isnone) {
+ /* Set GV_INSUBGRAPH for milestone & descendents. */
+ milestone->gv_flags |= GV_INSUBGRAPH;
+
+ r = uu_list_walk(milestone->gv_dependencies,
+ (uu_walk_fn_t *)mark_subgraph, NULL, 0);
+ assert(r == 0);
+ }
+
+ /* Un-override services in the subgraph & override-disable the rest. */
+ if (norepository)
+ goto out;
+
+ non_subgraph_svcs = 0;
+ for (v = uu_list_first(dgraph);
+ v != NULL;
+ v = uu_list_next(dgraph, v)) {
+ if (v->gv_type != GVT_INST ||
+ (v->gv_flags & GV_CONFIGURED) == 0)
+ continue;
+
+again:
+ r = scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+ goto again;
+
+ case SCF_ERROR_NOT_FOUND:
+ continue;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_NOT_BOUND:
+ bad_error("scf_handle_decode_fmri",
+ scf_error());
+ }
+ }
+
+ if (isall || (v->gv_flags & GV_INSUBGRAPH)) {
+ r = libscf_delete_enable_ovr(inst);
+ fs = "libscf_delete_enable_ovr";
+ } else {
+ assert(isnone || (v->gv_flags & GV_INSUBGRAPH) == 0);
+
+ if (inst_running(v))
+ ++non_subgraph_svcs;
+
+ if (has_running_nonsubgraph_dependents(v))
+ continue;
+
+ r = libscf_set_enable_ovr(inst, 0);
+ fs = "libscf_set_enable_ovr";
+ }
+ switch (r) {
+ case 0:
+ case ECANCELED:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+ goto again;
+
+ case EPERM:
+ case EROFS:
+ log_error(LOG_WARNING,
+ "Could not set %s/%s for %s: %s.\n",
+ SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED,
+ v->gv_name, strerror(r));
+ break;
+
+ default:
+ bad_error(fs, r);
+ }
+ }
+
+ if (halting != -1) {
+ if (non_subgraph_svcs > 1)
+ uu_warn("%d system services are now being stopped.\n",
+ non_subgraph_svcs);
+ else if (non_subgraph_svcs == 1)
+ uu_warn("One system service is now being stopped.\n");
+ else if (non_subgraph_svcs == 0)
+ do_uadmin();
+ }
+
+ ret = rebound ? ECONNRESET : 0;
+
+out:
+ MUTEX_UNLOCK(&dgraph_lock);
+ if (!isall && !isnone)
+ startd_free((void *)cfmri, max_scf_fmri_size);
+ scf_instance_destroy(inst);
+ return (ret);
+}
+
+
+/*
+ * Returns 0, ECONNABORTED, or EINVAL.
+ */
+static int
+handle_graph_update_event(scf_handle_t *h, graph_protocol_event_t *e)
+{
+ int r;
+
+ switch (e->gpe_type) {
+ case GRAPH_UPDATE_RELOAD_GRAPH:
+ log_error(LOG_WARNING,
+ "graph_event: reload graph unimplemented\n");
+ break;
+
+ case GRAPH_UPDATE_STATE_CHANGE: {
+ protocol_states_t *states = e->gpe_data;
+
+ switch (r = dgraph_set_instance_state(h, e->gpe_inst,
+ states->ps_state, states->ps_err)) {
+ case 0:
+ case ENOENT:
+ break;
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case EINVAL:
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "dgraph_set_instance_state() "
+ "failed with unexpected error %d at %s:%d.\n", r,
+ __FILE__, __LINE__);
+#endif
+ abort();
+ }
+
+ startd_free(states, sizeof (protocol_states_t));
+ break;
+ }
+
+ default:
+ log_error(LOG_WARNING,
+ "graph_event_loop received an unknown event: %d\n",
+ e->gpe_type);
+ break;
+ }
+
+ return (0);
+}
+
+/*
+ * graph_event_thread()
+ * Wait for state changes from the restarters.
+ */
+/*ARGSUSED*/
+void *
+graph_event_thread(void *unused)
+{
+ scf_handle_t *h;
+ int err;
+
+ h = libscf_handle_create_bound_loop();
+
+ /*CONSTCOND*/
+ while (1) {
+ graph_protocol_event_t *e;
+
+ MUTEX_LOCK(&gu->gu_lock);
+
+ while (gu->gu_wakeup == 0)
+ (void) pthread_cond_wait(&gu->gu_cv, &gu->gu_lock);
+
+ gu->gu_wakeup = 0;
+
+ while ((e = graph_event_dequeue()) != NULL) {
+ MUTEX_LOCK(&e->gpe_lock);
+ MUTEX_UNLOCK(&gu->gu_lock);
+
+ while ((err = handle_graph_update_event(h, e)) ==
+ ECONNABORTED)
+ libscf_handle_rebind(h);
+
+ if (err == 0)
+ graph_event_release(e);
+ else
+ graph_event_requeue(e);
+
+ MUTEX_LOCK(&gu->gu_lock);
+ }
+
+ MUTEX_UNLOCK(&gu->gu_lock);
+ }
+
+ /*
+ * Unreachable for now -- there's currently no graceful cleanup
+ * called on exit().
+ */
+ MUTEX_UNLOCK(&gu->gu_lock);
+ scf_handle_destroy(h);
+ return (NULL);
+}
+
+static void
+set_initial_milestone(scf_handle_t *h)
+{
+ scf_instance_t *inst;
+ char *fmri, *cfmri;
+ size_t sz;
+ int r;
+
+ inst = safe_scf_instance_create(h);
+ fmri = startd_alloc(max_scf_fmri_size);
+
+ /*
+ * If -m milestone= was specified, we want to set options_ovr/milestone
+ * to it. Otherwise we want to read what the milestone should be set
+ * to. Either way we need our inst.
+ */
+get_self:
+ if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto get_self;
+
+ case SCF_ERROR_NOT_FOUND:
+ if (st->st_subgraph != NULL &&
+ st->st_subgraph[0] != '\0') {
+ sz = strlcpy(fmri, st->st_subgraph,
+ max_scf_fmri_size);
+ assert(sz < max_scf_fmri_size);
+ } else {
+ fmri[0] = '\0';
+ }
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_handle_decode_fmri", scf_error());
+ }
+ } else {
+ if (st->st_subgraph != NULL && st->st_subgraph[0] != '\0') {
+ scf_propertygroup_t *pg;
+
+ pg = safe_scf_pg_create(h);
+
+ sz = strlcpy(fmri, st->st_subgraph, max_scf_fmri_size);
+ assert(sz < max_scf_fmri_size);
+
+ r = libscf_inst_get_or_add_pg(inst, SCF_PG_OPTIONS_OVR,
+ SCF_PG_OPTIONS_OVR_TYPE, SCF_PG_OPTIONS_OVR_FLAGS,
+ pg);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto get_self;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_WARNING, "Could not set %s/%s: "
+ "%s.\n", SCF_PG_OPTIONS_OVR,
+ SCF_PROPERTY_MILESTONE, strerror(r));
+ /* FALLTHROUGH */
+
+ case ECANCELED:
+ sz = strlcpy(fmri, st->st_subgraph,
+ max_scf_fmri_size);
+ assert(sz < max_scf_fmri_size);
+ break;
+
+ default:
+ bad_error("libscf_inst_get_or_add_pg", r);
+ }
+
+ r = libscf_clear_runlevel(pg, fmri);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto get_self;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_WARNING, "Could not set %s/%s: "
+ "%s.\n", SCF_PG_OPTIONS_OVR,
+ SCF_PROPERTY_MILESTONE, strerror(r));
+ /* FALLTHROUGH */
+
+ case ECANCELED:
+ sz = strlcpy(fmri, st->st_subgraph,
+ max_scf_fmri_size);
+ assert(sz < max_scf_fmri_size);
+ break;
+
+ default:
+ bad_error("libscf_clear_runlevel", r);
+ }
+
+ scf_pg_destroy(pg);
+ } else {
+ scf_property_t *prop;
+ scf_value_t *val;
+
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ r = libscf_get_milestone(inst, prop, val, fmri,
+ max_scf_fmri_size);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto get_self;
+
+ case EINVAL:
+ log_error(LOG_WARNING, "Milestone property is "
+ "misconfigured. Defaulting to \"all\".\n");
+ /* FALLTHROUGH */
+
+ case ECANCELED:
+ case ENOENT:
+ fmri[0] = '\0';
+ break;
+
+ default:
+ bad_error("libscf_get_milestone", r);
+ }
+
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ }
+ }
+
+ if (fmri[0] == '\0' || strcmp(fmri, "all") == 0)
+ goto out;
+
+ if (strcmp(fmri, "none") != 0) {
+retry:
+ if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ log_error(LOG_WARNING,
+ "Requested milestone \"%s\" is invalid. "
+ "Reverting to \"all\".\n", fmri);
+ goto out;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ log_error(LOG_WARNING, "Requested milestone "
+ "\"%s\" does not specify an instance. "
+ "Reverting to \"all\".\n", fmri);
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto retry;
+
+ case SCF_ERROR_NOT_FOUND:
+ log_error(LOG_WARNING, "Requested milestone "
+ "\"%s\" not in repository. Reverting to "
+ "\"all\".\n", fmri);
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_handle_decode_fmri",
+ scf_error());
+ }
+ }
+
+ r = fmri_canonify(fmri, &cfmri, B_FALSE);
+ assert(r == 0);
+
+ r = dgraph_add_instance(cfmri, inst, B_TRUE);
+ startd_free(cfmri, max_scf_fmri_size);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto retry;
+
+ case EINVAL:
+ log_error(LOG_WARNING,
+ "Requested milestone \"%s\" is invalid. "
+ "Reverting to \"all\".\n", fmri);
+ goto out;
+
+ case ECANCELED:
+ log_error(LOG_WARNING,
+ "Requested milestone \"%s\" not "
+ "in repository. Reverting to \"all\".\n",
+ fmri);
+ goto out;
+
+ case EEXIST:
+ default:
+ bad_error("dgraph_add_instance", r);
+ }
+ }
+
+ log_console(LOG_INFO, "Booting to milestone \"%s\".\n", fmri);
+
+ r = dgraph_set_milestone(fmri, h, B_FALSE);
+ switch (r) {
+ case 0:
+ case ECONNRESET:
+ case EALREADY:
+ break;
+
+ case EINVAL:
+ case ENOENT:
+ default:
+ bad_error("dgraph_set_milestone", r);
+ }
+
+out:
+ startd_free(fmri, max_scf_fmri_size);
+ scf_instance_destroy(inst);
+}
+
+void
+set_restart_milestone(scf_handle_t *h)
+{
+ scf_instance_t *inst;
+ scf_property_t *prop;
+ scf_value_t *val;
+ char *fmri;
+ int r;
+
+ inst = safe_scf_instance_create(h);
+
+get_self:
+ if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL,
+ inst, NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto get_self;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_handle_decode_fmri", scf_error());
+ }
+
+ scf_instance_destroy(inst);
+ return;
+ }
+
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+ fmri = startd_alloc(max_scf_fmri_size);
+
+ r = libscf_get_milestone(inst, prop, val, fmri, max_scf_fmri_size);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto get_self;
+
+ case ECANCELED:
+ case ENOENT:
+ case EINVAL:
+ goto out;
+
+ default:
+ bad_error("libscf_get_milestone", r);
+ }
+
+ r = dgraph_set_milestone(fmri, h, B_TRUE);
+ switch (r) {
+ case 0:
+ case ECONNRESET:
+ case EALREADY:
+ case EINVAL:
+ case ENOENT:
+ break;
+
+ default:
+ bad_error("dgraph_set_milestone", r);
+ }
+
+out:
+ startd_free(fmri, max_scf_fmri_size);
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_instance_destroy(inst);
+}
+
+/*
+ * void *graph_thread(void *)
+ *
+ * Graph management thread.
+ */
+/*ARGSUSED*/
+void *
+graph_thread(void *arg)
+{
+ scf_handle_t *h;
+ int err;
+
+ h = libscf_handle_create_bound_loop();
+
+ if (st->st_initial)
+ set_initial_milestone(h);
+
+ MUTEX_LOCK(&dgraph_lock);
+ initial_milestone_set = B_TRUE;
+ err = pthread_cond_broadcast(&initial_milestone_cv);
+ assert(err == 0);
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ libscf_populate_graph(h);
+
+ if (!st->st_initial)
+ set_restart_milestone(h);
+
+ MUTEX_LOCK(&st->st_load_lock);
+ st->st_load_complete = 1;
+ (void) pthread_cond_broadcast(&st->st_load_cv);
+ MUTEX_UNLOCK(&st->st_load_lock);
+
+ MUTEX_LOCK(&dgraph_lock);
+ /*
+ * Now that we've set st_load_complete we need to check can_come_up()
+ * since if we booted to a milestone, then there won't be any more
+ * state updates.
+ */
+ if (!go_single_user_mode && !go_to_level1 &&
+ halting == -1) {
+ if (!can_come_up() && !sulogin_thread_running) {
+ (void) startd_thread_create(sulogin_thread, NULL);
+ sulogin_thread_running = B_TRUE;
+ }
+ }
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ (void) pthread_mutex_lock(&gu->gu_freeze_lock);
+
+ /*CONSTCOND*/
+ while (1) {
+ (void) pthread_cond_wait(&gu->gu_freeze_cv,
+ &gu->gu_freeze_lock);
+ }
+
+ /*
+ * Unreachable for now -- there's currently no graceful cleanup
+ * called on exit().
+ */
+ (void) pthread_mutex_unlock(&gu->gu_freeze_lock);
+ scf_handle_destroy(h);
+
+ return (NULL);
+}
+
+
+/*
+ * int next_action()
+ * Given an array of timestamps 'a' with 'num' elements, find the
+ * lowest non-zero timestamp and return its index. If there are no
+ * non-zero elements, return -1.
+ */
+static int
+next_action(hrtime_t *a, int num)
+{
+ hrtime_t t = 0;
+ int i = 0, smallest = -1;
+
+ for (i = 0; i < num; i++) {
+ if (t == 0) {
+ t = a[i];
+ smallest = i;
+ } else if (a[i] != 0 && a[i] < t) {
+ t = a[i];
+ smallest = i;
+ }
+ }
+
+ if (t == 0)
+ return (-1);
+ else
+ return (smallest);
+}
+
+/*
+ * void process_actions()
+ * Process actions requested by the administrator. Possibilities include:
+ * refresh, restart, maintenance mode off, maintenance mode on,
+ * maintenance mode immediate, and degraded.
+ *
+ * The set of pending actions is represented in the repository as a
+ * per-instance property group, with each action being a single property
+ * in that group. This property group is converted to an array, with each
+ * action type having an array slot. The actions in the array at the
+ * time process_actions() is called are acted on in the order of the
+ * timestamp (which is the value stored in the slot). A value of zero
+ * indicates that there is no pending action of the type associated with
+ * a particular slot.
+ *
+ * Sending an action event multiple times before the restarter has a
+ * chance to process that action will force it to be run at the last
+ * timestamp where it appears in the ordering.
+ *
+ * Turning maintenance mode on trumps all other actions.
+ *
+ * Returns 0 or ECONNABORTED.
+ */
+static int
+process_actions(scf_handle_t *h, scf_propertygroup_t *pg, scf_instance_t *inst)
+{
+ scf_property_t *prop = NULL;
+ scf_value_t *val = NULL;
+ scf_type_t type;
+ graph_vertex_t *vertex;
+ admin_action_t a;
+ int i, ret = 0, r;
+ hrtime_t action_ts[NACTIONS];
+ char *inst_name;
+
+ r = libscf_instance_get_fmri(inst, &inst_name);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case ECANCELED:
+ return (0);
+
+ default:
+ bad_error("libscf_instance_get_fmri", r);
+ }
+
+ MUTEX_LOCK(&dgraph_lock);
+
+ vertex = vertex_get_by_name(inst_name);
+ if (vertex == NULL) {
+ MUTEX_UNLOCK(&dgraph_lock);
+ log_framework(LOG_DEBUG, "%s: Can't find graph vertex. "
+ "The instance must have been removed.\n", inst_name);
+ return (0);
+ }
+
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ for (i = 0; i < NACTIONS; i++) {
+ if (scf_pg_get_property(pg, admin_actions[i], prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ action_ts[i] = 0;
+ continue;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+ if (scf_property_type(prop, &type) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ action_ts[i] = 0;
+ continue;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_type", scf_error());
+ }
+ }
+
+ if (type != SCF_TYPE_INTEGER) {
+ action_ts[i] = 0;
+ continue;
+ }
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ action_ts[i] = 0;
+ continue;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_get_value",
+ scf_error());
+ }
+ }
+
+ r = scf_value_get_integer(val, &action_ts[i]);
+ assert(r == 0);
+ }
+
+ a = ADMIN_EVENT_MAINT_ON_IMMEDIATE;
+ if (action_ts[ADMIN_EVENT_MAINT_ON_IMMEDIATE] ||
+ action_ts[ADMIN_EVENT_MAINT_ON]) {
+ a = action_ts[ADMIN_EVENT_MAINT_ON_IMMEDIATE] ?
+ ADMIN_EVENT_MAINT_ON_IMMEDIATE : ADMIN_EVENT_MAINT_ON;
+
+ vertex_send_event(vertex, admin_events[a]);
+ r = libscf_unset_action(h, pg, a, action_ts[a]);
+ switch (r) {
+ case 0:
+ case EACCES:
+ break;
+
+ case ECONNABORTED:
+ ret = ECONNABORTED;
+ goto out;
+
+ case EPERM:
+ uu_die("Insufficient privilege.\n");
+ /* NOTREACHED */
+
+ default:
+ bad_error("libscf_unset_action", r);
+ }
+ }
+
+ while ((a = next_action(action_ts, NACTIONS)) != -1) {
+ log_framework(LOG_DEBUG,
+ "Graph: processing %s action for %s.\n", admin_actions[a],
+ inst_name);
+
+ if (a == ADMIN_EVENT_REFRESH) {
+ r = dgraph_refresh_instance(vertex, inst);
+ switch (r) {
+ case 0:
+ case ECANCELED:
+ case EINVAL:
+ case -1:
+ break;
+
+ case ECONNABORTED:
+ /* pg & inst are reset now, so just return. */
+ ret = ECONNABORTED;
+ goto out;
+
+ default:
+ bad_error("dgraph_refresh_instance", r);
+ }
+ }
+
+ vertex_send_event(vertex, admin_events[a]);
+
+ r = libscf_unset_action(h, pg, a, action_ts[a]);
+ switch (r) {
+ case 0:
+ case EACCES:
+ break;
+
+ case ECONNABORTED:
+ ret = ECONNABORTED;
+ goto out;
+
+ case EPERM:
+ uu_die("Insufficient privilege.\n");
+ /* NOTREACHED */
+
+ default:
+ bad_error("libscf_unset_action", r);
+ }
+
+ action_ts[a] = 0;
+ }
+
+out:
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ scf_property_destroy(prop);
+ scf_value_destroy(val);
+ startd_free(inst_name, max_scf_fmri_size);
+ return (ret);
+}
+
+/*
+ * inst and pg_name are scratch space, and are unset on entry.
+ * Returns
+ * 0 - success
+ * ECONNRESET - success, but repository handle rebound
+ * ECONNABORTED - repository connection broken
+ */
+static int
+process_pg_event(scf_handle_t *h, scf_propertygroup_t *pg, scf_instance_t *inst,
+ char *pg_name)
+{
+ int r;
+ scf_property_t *prop;
+ scf_value_t *val;
+ char *fmri;
+ boolean_t rebound = B_FALSE, rebind_inst = B_FALSE;
+
+ if (scf_pg_get_name(pg, pg_name, max_scf_value_size) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (0);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_name", scf_error());
+ }
+ }
+
+ if (strcmp(pg_name, SCF_PG_GENERAL) == 0 ||
+ strcmp(pg_name, SCF_PG_GENERAL_OVR) == 0) {
+ r = dgraph_update_general(pg);
+ switch (r) {
+ case 0:
+ case ENOTSUP:
+ case ECANCELED:
+ return (0);
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case -1:
+ /* Error should have been logged. */
+ return (0);
+
+ default:
+ bad_error("dgraph_update_general", r);
+ }
+ } else if (strcmp(pg_name, SCF_PG_RESTARTER_ACTIONS) == 0) {
+ if (scf_pg_get_parent_instance(pg, inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ /* Ignore commands on services. */
+ return (0);
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_get_parent_instance",
+ scf_error());
+ }
+ }
+
+ return (process_actions(h, pg, inst));
+ }
+
+ if (strcmp(pg_name, SCF_PG_OPTIONS) != 0 &&
+ strcmp(pg_name, SCF_PG_OPTIONS_OVR) != 0)
+ return (0);
+
+ /*
+ * We only care about the options[_ovr] property groups of our own
+ * instance, so get the fmri and compare. Plus, once we know it's
+ * correct, if the repository connection is broken we know exactly what
+ * property group we were operating on, and can look it up again.
+ */
+ if (scf_pg_get_parent_instance(pg, inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (0);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_get_parent_instance",
+ scf_error());
+ }
+ }
+
+ switch (r = libscf_instance_get_fmri(inst, &fmri)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ return (ECONNABORTED);
+
+ case ECANCELED:
+ return (0);
+
+ default:
+ bad_error("libscf_instance_get_fmri", r);
+ }
+
+ if (strcmp(fmri, SCF_SERVICE_STARTD) != 0) {
+ startd_free(fmri, max_scf_fmri_size);
+ return (0);
+ }
+
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ if (strcmp(pg_name, SCF_PG_OPTIONS_OVR) == 0) {
+ /* See if we need to set the runlevel. */
+ /* CONSTCOND */
+ if (0) {
+rebind_pg:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+
+ r = libscf_lookup_instance(SCF_SERVICE_STARTD, inst);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto rebind_pg;
+
+ case ENOENT:
+ goto out;
+
+ case EINVAL:
+ case ENOTSUP:
+ bad_error("libscf_lookup_instance", r);
+ }
+
+ if (scf_instance_get_pg(inst, pg_name, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto rebind_pg;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_instance_get_pg",
+ scf_error());
+ }
+ }
+ }
+
+ if (scf_pg_get_property(pg, "runlevel", prop) == 0) {
+ r = dgraph_set_runlevel(pg, prop);
+ switch (r) {
+ case ECONNRESET:
+ rebound = B_TRUE;
+ rebind_inst = B_TRUE;
+ /* FALLTHROUGH */
+
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto rebind_pg;
+
+ case ECANCELED:
+ goto out;
+
+ default:
+ bad_error("dgraph_set_runlevel", r);
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ goto rebind_pg;
+
+ case SCF_ERROR_DELETED:
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+ }
+
+ if (rebind_inst) {
+lookup_inst:
+ r = libscf_lookup_instance(SCF_SERVICE_STARTD, inst);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+ goto lookup_inst;
+
+ case ENOENT:
+ goto out;
+
+ case EINVAL:
+ case ENOTSUP:
+ bad_error("libscf_lookup_instance", r);
+ }
+ }
+
+ r = libscf_get_milestone(inst, prop, val, fmri, max_scf_fmri_size);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+ goto lookup_inst;
+
+ case EINVAL:
+ log_error(LOG_NOTICE,
+ "%s/%s property of %s is misconfigured.\n", pg_name,
+ SCF_PROPERTY_MILESTONE, SCF_SERVICE_STARTD);
+ /* FALLTHROUGH */
+
+ case ECANCELED:
+ case ENOENT:
+ (void) strcpy(fmri, "all");
+ break;
+
+ default:
+ bad_error("libscf_get_milestone", r);
+ }
+
+ r = dgraph_set_milestone(fmri, h, B_FALSE);
+ switch (r) {
+ case 0:
+ case ECONNRESET:
+ case EALREADY:
+ break;
+
+ case EINVAL:
+ log_error(LOG_WARNING, "Milestone %s is invalid.\n", fmri);
+ break;
+
+ case ENOENT:
+ log_error(LOG_WARNING, "Milestone %s does not exist.\n", fmri);
+ break;
+
+ default:
+ bad_error("dgraph_set_milestone", r);
+ }
+
+out:
+ startd_free(fmri, max_scf_fmri_size);
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+
+ return (rebound ? ECONNRESET : 0);
+}
+
+static void
+process_delete(char *fmri, scf_handle_t *h)
+{
+ char *lfmri;
+ const char *inst_name, *pg_name;
+
+ lfmri = safe_strdup(fmri);
+
+ /* Determine if the FMRI is a property group or instance */
+ if (scf_parse_svc_fmri(lfmri, NULL, NULL, &inst_name, &pg_name,
+ NULL) != SCF_SUCCESS) {
+ log_error(LOG_WARNING,
+ "Received invalid FMRI \"%s\" from repository server.\n",
+ fmri);
+ } else if (inst_name != NULL && pg_name == NULL) {
+ (void) dgraph_remove_instance(fmri, h);
+ }
+
+ free(lfmri);
+}
+
+/*ARGSUSED*/
+void *
+repository_event_thread(void *unused)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg;
+ scf_instance_t *inst;
+ char *fmri = startd_alloc(max_scf_fmri_size);
+ char *pg_name = startd_alloc(max_scf_value_size);
+ int r;
+
+ h = libscf_handle_create_bound_loop();
+
+ pg = safe_scf_pg_create(h);
+ inst = safe_scf_instance_create(h);
+
+retry:
+ if (_scf_notify_add_pgtype(h, SCF_GROUP_FRAMEWORK) != SCF_SUCCESS) {
+ if (scf_error() == SCF_ERROR_CONNECTION_BROKEN) {
+ libscf_handle_rebind(h);
+ } else {
+ log_error(LOG_WARNING,
+ "Couldn't set up repository notification "
+ "for property group type %s: %s\n",
+ SCF_GROUP_FRAMEWORK, scf_strerror(scf_error()));
+
+ (void) sleep(1);
+ }
+
+ goto retry;
+ }
+
+ /*CONSTCOND*/
+ while (1) {
+ ssize_t res;
+
+ /* Note: fmri is only set on delete events. */
+ res = _scf_notify_wait(pg, fmri, max_scf_fmri_size);
+ if (res < 0) {
+ libscf_handle_rebind(h);
+ goto retry;
+ } else if (res == 0) {
+ /*
+ * property group modified. inst and pg_name are
+ * pre-allocated scratch space.
+ */
+ if (scf_pg_update(pg) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ log_error(LOG_WARNING,
+ "Lost repository event due to "
+ "disconnection.\n");
+ libscf_handle_rebind(h);
+ goto retry;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_update", scf_error());
+ }
+ }
+
+ r = process_pg_event(h, pg, inst, pg_name);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ log_error(LOG_WARNING, "Lost repository event "
+ "due to disconnection.\n");
+ libscf_handle_rebind(h);
+ /* FALLTHROUGH */
+
+ case ECONNRESET:
+ goto retry;
+
+ default:
+ bad_error("process_pg_event", r);
+ }
+ } else {
+ /* service, instance, or pg deleted. */
+ process_delete(fmri, h);
+ }
+ }
+
+ /*NOTREACHED*/
+ return (NULL);
+}
+
+void
+graph_engine_start()
+{
+ int err;
+
+ (void) startd_thread_create(graph_thread, NULL);
+
+ MUTEX_LOCK(&dgraph_lock);
+ while (!initial_milestone_set) {
+ err = pthread_cond_wait(&initial_milestone_cv, &dgraph_lock);
+ assert(err == 0);
+ }
+ MUTEX_UNLOCK(&dgraph_lock);
+
+ (void) startd_thread_create(repository_event_thread, NULL);
+ (void) startd_thread_create(graph_event_thread, NULL);
+}
diff --git a/usr/src/cmd/svc/startd/libscf.c b/usr/src/cmd/svc/startd/libscf.c
new file mode 100644
index 0000000000..5da07354a6
--- /dev/null
+++ b/usr/src/cmd/svc/startd/libscf.c
@@ -0,0 +1,3844 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/contract/process.h>
+#include <assert.h>
+#include <errno.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "startd.h"
+
+#define SMF_SNAPSHOT_RUNNING "running"
+
+char *
+inst_fmri_to_svc_fmri(const char *fmri)
+{
+ char *buf, *sfmri;
+ const char *scope, *svc;
+ int r;
+ boolean_t local;
+
+ buf = startd_alloc(max_scf_fmri_size);
+ sfmri = startd_alloc(max_scf_fmri_size);
+
+ (void) strcpy(buf, fmri);
+
+ r = scf_parse_svc_fmri(buf, &scope, &svc, NULL, NULL, NULL);
+ assert(r == 0);
+
+ local = strcmp(scope, SCF_SCOPE_LOCAL) == 0;
+
+ (void) snprintf(sfmri, max_scf_fmri_size, "svc:%s%s/%s",
+ local ? "" : "//", local ? "" : scope, svc);
+
+ startd_free(buf, max_scf_fmri_size);
+
+ return (sfmri);
+}
+
+/*
+ * Wrapper for the scf_*_create() functions. On SCF_ERROR_NO_MEMORY and
+ * SCF_ERROR_NO_RESOURCES, retries or dies. So this can only fail with
+ * SCF_ERROR_INVALID_ARGUMENT, if h is NULL.
+ */
+void *
+libscf_object_create(void *f(scf_handle_t *), scf_handle_t *h)
+{
+ void *o;
+ uint_t try, msecs;
+ scf_error_t err;
+
+ o = f(h);
+ if (o != NULL)
+ return (o);
+ err = scf_error();
+ if (err != SCF_ERROR_NO_MEMORY && err != SCF_ERROR_NO_RESOURCES)
+ return (NULL);
+
+ msecs = ALLOC_DELAY;
+
+ for (try = 0; try < ALLOC_RETRY; ++try) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ o = f(h);
+ if (o != NULL)
+ return (o);
+ err = scf_error();
+ if (err != SCF_ERROR_NO_MEMORY && err != SCF_ERROR_NO_RESOURCES)
+ return (NULL);
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+}
+
+scf_snapshot_t *
+libscf_get_running_snapshot(scf_instance_t *inst)
+{
+ scf_handle_t *h;
+ scf_snapshot_t *snap;
+
+ h = scf_instance_handle(inst);
+ if (h == NULL)
+ return (NULL);
+
+ snap = scf_snapshot_create(h);
+ if (snap == NULL)
+ return (NULL);
+
+ if (scf_instance_get_snapshot(inst, SMF_SNAPSHOT_RUNNING, snap) == 0)
+ return (snap);
+
+ scf_snapshot_destroy(snap);
+ return (NULL);
+}
+
+/*
+ * Make sure a service has a "running" snapshot. If it doesn't, make one from
+ * the editing configuration.
+ */
+scf_snapshot_t *
+libscf_get_or_make_running_snapshot(scf_instance_t *inst, const char *fmri,
+ boolean_t retake)
+{
+ scf_handle_t *h;
+ scf_snapshot_t *snap;
+
+ h = scf_instance_handle(inst);
+
+ snap = scf_snapshot_create(h);
+ if (snap == NULL)
+ goto err;
+
+ if (scf_instance_get_snapshot(inst, SMF_SNAPSHOT_RUNNING, snap) == 0)
+ return (snap);
+
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ scf_snapshot_destroy(snap);
+ return (NULL);
+
+ default:
+err:
+ log_error(LOG_NOTICE,
+ "Could not check for running snapshot of %s (%s).\n", fmri,
+ scf_strerror(scf_error()));
+ scf_snapshot_destroy(snap);
+ return (NULL);
+ }
+
+ if (_scf_snapshot_take_new(inst, SMF_SNAPSHOT_RUNNING, snap) == 0) {
+ log_framework(LOG_DEBUG, "Took running snapshot for %s.\n",
+ fmri);
+ } else {
+ if (retake && scf_error() == SCF_ERROR_BACKEND_READONLY)
+ restarter_mark_pending_snapshot(fmri,
+ RINST_RETAKE_RUNNING);
+ else
+ log_error(LOG_DEBUG,
+ "Could not create running snapshot for %s "
+ "(%s).\n", fmri, scf_strerror(scf_error()));
+
+ scf_snapshot_destroy(snap);
+ snap = NULL;
+ }
+
+ return (snap);
+}
+
+/*
+ * When a service comes up, point the "start" snapshot at the "running"
+ * snapshot. Returns 0 on success, ENOTSUP if fmri designates something other
+ * than an instance, ECONNABORTED, ENOENT if the instance does not exist, or
+ * EACCES.
+ */
+int
+libscf_snapshots_poststart(scf_handle_t *h, const char *fmri, boolean_t retake)
+{
+ scf_instance_t *inst = NULL;
+ scf_snapshot_t *running, *start = NULL;
+ int ret = 0, r;
+
+ r = libscf_fmri_get_instance(h, fmri, &inst);
+ switch (r) {
+ case 0:
+ break;
+
+ case ENOTSUP:
+ case ECONNABORTED:
+ case ENOENT:
+ return (r);
+
+ case EINVAL:
+ default:
+ assert(0);
+ abort();
+ }
+
+ start = safe_scf_snapshot_create(h);
+
+again:
+ running = libscf_get_or_make_running_snapshot(inst, fmri, retake);
+ if (running == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+lookup:
+ if (scf_instance_get_snapshot(inst, "start", start) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ if (_scf_snapshot_take_new(inst, "start", start) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_EXISTS:
+ goto lookup;
+
+ case SCF_ERROR_NO_RESOURCES:
+ uu_die("Repository server out of "
+ "resources.\n");
+ /* NOTREACHED */
+
+ case SCF_ERROR_BACKEND_READONLY:
+ goto readonly;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ uu_die("Insufficient privileges.\n");
+ /* NOTREACHED */
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("_scf_snapshot_take_new",
+ scf_error());
+ }
+ }
+ break;
+
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ bad_error("scf_instance_get_snapshot", scf_error());
+ }
+ }
+
+ if (_scf_snapshot_attach(running, start) == 0) {
+ log_framework(LOG_DEBUG, "Updated \"start\" snapshot for %s.\n",
+ fmri);
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ scf_snapshot_destroy(running);
+ goto again;
+
+ case SCF_ERROR_NO_RESOURCES:
+ uu_die("Repository server out of resources.\n");
+ /* NOTREACHED */
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ uu_die("Insufficient privileges.\n");
+ /* NOTREACHED */
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+readonly:
+ if (retake)
+ restarter_mark_pending_snapshot(fmri,
+ RINST_RETAKE_START);
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("_scf_snapshot_attach", scf_error());
+ }
+ }
+
+out:
+ scf_snapshot_destroy(start);
+ scf_snapshot_destroy(running);
+ scf_instance_destroy(inst);
+
+ return (ret);
+}
+
+/*
+ * Before a refresh, update the "running" snapshot from the editing
+ * configuration.
+ *
+ * Returns 0 on success and -1 on failure.
+ */
+int
+libscf_snapshots_refresh(scf_instance_t *inst, const char *fmri)
+{
+ scf_handle_t *h;
+ scf_snapshot_t *snap;
+ boolean_t err = 1;
+
+ h = scf_instance_handle(inst);
+ if (h == NULL)
+ goto out;
+
+ snap = scf_snapshot_create(h);
+ if (snap == NULL)
+ goto out;
+
+ if (scf_instance_get_snapshot(inst, SMF_SNAPSHOT_RUNNING, snap) == 0) {
+ if (_scf_snapshot_take_attach(inst, snap) == 0)
+ err = 0;
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ err = 0;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+
+ default:
+ goto out;
+ }
+
+ log_error(LOG_DEBUG,
+ "Service %s has no %s snapshot; creating one.\n", fmri,
+ SMF_SNAPSHOT_RUNNING);
+
+ if (_scf_snapshot_take_new(inst, SMF_SNAPSHOT_RUNNING,
+ snap) == 0)
+ err = 0;
+ }
+
+out:
+ scf_snapshot_destroy(snap);
+
+ if (!err)
+ return (0);
+
+ log_error(LOG_WARNING,
+ "Could not update \"running\" snapshot for refresh of %s.\n", fmri);
+ return (-1);
+}
+
+/*
+ * int libscf_read_single_astring()
+ * Reads a single astring value of the requested property into the
+ * pre-allocated buffer (conventionally of size max_scf_value_size).
+ * Multiple values constitute an error.
+ *
+ * Returns 0 on success or LIBSCF_PROPERTY_ABSENT or LIBSCF_PROPERTY_ERROR.
+ */
+static int
+libscf_read_single_astring(scf_handle_t *h, scf_property_t *prop, char **ret)
+{
+ scf_value_t *val = safe_scf_value_create(h);
+ int r = 0;
+
+ if (scf_property_get_value(prop, val) == -1) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ r = LIBSCF_PROPERTY_ABSENT;
+ else
+ r = LIBSCF_PROPERTY_ERROR;
+ goto read_single_astring_fail;
+ }
+
+ if (scf_value_get_astring(val, *ret, max_scf_value_size) <= 0) {
+ r = LIBSCF_PROPERTY_ERROR;
+ goto read_single_astring_fail;
+ }
+
+read_single_astring_fail:
+ scf_value_destroy(val);
+ return (r);
+}
+
+static int
+libscf_read_state(const scf_propertygroup_t *pg, const char *prop_name,
+ restarter_instance_state_t *state)
+{
+ scf_handle_t *h;
+ scf_property_t *prop;
+ char *char_state = startd_alloc(max_scf_value_size);
+ int ret = 0;
+
+ h = scf_pg_handle(pg);
+ prop = safe_scf_property_create(h);
+
+ if (scf_pg_get_property(pg, prop_name, prop) == -1) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ ret = LIBSCF_PROPERTY_ABSENT;
+ else
+ ret = LIBSCF_PROPERTY_ERROR;
+ } else {
+ ret = libscf_read_single_astring(h, prop, &char_state);
+ if (ret != 0) {
+ if (ret != LIBSCF_PROPERTY_ABSENT)
+ ret = LIBSCF_PROPERTY_ERROR;
+ } else {
+ *state = restarter_string_to_state(char_state);
+ ret = 0;
+ }
+ }
+
+ startd_free(char_state, max_scf_value_size);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+/*
+ * int libscf_read_states(const scf_propertygroup_t *,
+ * restarter_instance_state_t *, restarter_instance_state_t *)
+ *
+ * Set the current state and next_state values for the given service instance.
+ * Returns 0 on success, or a libscf error code on failure.
+ */
+int
+libscf_read_states(const scf_propertygroup_t *pg,
+ restarter_instance_state_t *state, restarter_instance_state_t *next_state)
+{
+ int state_ret, next_state_ret, ret;
+
+ state_ret = libscf_read_state(pg, SCF_PROPERTY_STATE, state);
+ next_state_ret = libscf_read_state(pg, SCF_PROPERTY_NEXT_STATE,
+ next_state);
+
+ if (state_ret == LIBSCF_PROPERTY_ERROR ||
+ next_state_ret == LIBSCF_PROPERTY_ERROR) {
+ ret = LIBSCF_PROPERTY_ERROR;
+ } else if (state_ret == 0 && next_state_ret == 0) {
+ ret = 0;
+ } else if (state_ret == LIBSCF_PROPERTY_ABSENT &&
+ next_state_ret == LIBSCF_PROPERTY_ABSENT) {
+ *state = RESTARTER_STATE_UNINIT;
+ *next_state = RESTARTER_STATE_NONE;
+ ret = 0;
+ } else if (state_ret == LIBSCF_PROPERTY_ABSENT ||
+ next_state_ret == LIBSCF_PROPERTY_ABSENT) {
+ log_framework(LOG_DEBUG,
+ "Only one repository state exists, setting "
+ "restarter states to MAINTENANCE and NONE\n");
+ *state = RESTARTER_STATE_MAINT;
+ *next_state = RESTARTER_STATE_NONE;
+ ret = 0;
+ } else {
+ ret = LIBSCF_PROPERTY_ERROR;
+ }
+
+read_states_out:
+ return (ret);
+}
+
+/*
+ * depgroup_empty()
+ *
+ * Returns 0 if not empty.
+ * Returns 1 if empty.
+ * Returns -1 on error (check scf_error()).
+ */
+int
+depgroup_empty(scf_handle_t *h, scf_propertygroup_t *pg)
+{
+ int empty = 1;
+ scf_iter_t *iter;
+ scf_property_t *prop;
+ int ret;
+
+ iter = safe_scf_iter_create(h);
+ prop = safe_scf_property_create(h);
+
+ iter = safe_scf_iter_create(h);
+ if (scf_iter_pg_properties(iter, pg) != SCF_SUCCESS)
+ return (-1);
+
+ ret = scf_iter_next_property(iter, prop);
+ if (ret < 0)
+ return (-1);
+
+ if (ret == 1)
+ empty = 0;
+
+ scf_property_destroy(prop);
+ scf_iter_destroy(iter);
+
+ return (empty);
+}
+
+gv_type_t
+depgroup_read_scheme(scf_handle_t *h, scf_propertygroup_t *pg)
+{
+ scf_property_t *prop;
+ char *scheme = startd_alloc(max_scf_value_size);
+ gv_type_t ret;
+
+ prop = safe_scf_property_create(h);
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_TYPE, prop) == -1 ||
+ libscf_read_single_astring(h, prop, &scheme) != 0) {
+ scf_property_destroy(prop);
+ startd_free(scheme, max_scf_value_size);
+ return (GVT_UNSUPPORTED);
+ }
+
+ if (strcmp(scheme, "service") == 0)
+ ret = GVT_INST;
+ else if (strcmp(scheme, "path") == 0)
+ ret = GVT_FILE;
+ else
+ ret = GVT_UNSUPPORTED;
+
+ startd_free(scheme, max_scf_value_size);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+depgroup_type_t
+depgroup_read_grouping(scf_handle_t *h, scf_propertygroup_t *pg)
+{
+ char *grouping = startd_alloc(max_scf_value_size);
+ depgroup_type_t ret;
+ scf_property_t *prop = safe_scf_property_create(h);
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_GROUPING, prop) == -1 ||
+ libscf_read_single_astring(h, prop, &grouping) != 0) {
+ scf_property_destroy(prop);
+ startd_free(grouping, max_scf_value_size);
+ return (DEPGRP_UNSUPPORTED);
+ }
+
+ if (strcmp(grouping, SCF_DEP_REQUIRE_ANY) == 0)
+ ret = DEPGRP_REQUIRE_ANY;
+ else if (strcmp(grouping, SCF_DEP_REQUIRE_ALL) == 0)
+ ret = DEPGRP_REQUIRE_ALL;
+ else if (strcmp(grouping, SCF_DEP_OPTIONAL_ALL) == 0)
+ ret = DEPGRP_OPTIONAL_ALL;
+ else if (strcmp(grouping, SCF_DEP_EXCLUDE_ALL) == 0)
+ ret = DEPGRP_EXCLUDE_ALL;
+ else {
+ ret = DEPGRP_UNSUPPORTED;
+ }
+ startd_free(grouping, max_scf_value_size);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+restarter_error_t
+depgroup_read_restart(scf_handle_t *h, scf_propertygroup_t *pg)
+{
+ scf_property_t *prop = safe_scf_property_create(h);
+ char *restart_on = startd_alloc(max_scf_value_size);
+ restarter_error_t ret;
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_RESTART_ON, prop) == -1 ||
+ libscf_read_single_astring(h, prop, &restart_on) != 0) {
+ startd_free(restart_on, max_scf_value_size);
+ scf_property_destroy(prop);
+ return (RERR_UNSUPPORTED);
+ }
+
+ if (strcmp(restart_on, SCF_DEP_RESET_ON_ERROR) == 0)
+ ret = RERR_FAULT;
+ else if (strcmp(restart_on, SCF_DEP_RESET_ON_RESTART) == 0)
+ ret = RERR_RESTART;
+ else if (strcmp(restart_on, SCF_DEP_RESET_ON_REFRESH) == 0)
+ ret = RERR_REFRESH;
+ else if (strcmp(restart_on, SCF_DEP_RESET_ON_NONE) == 0)
+ ret = RERR_NONE;
+ else
+ ret = RERR_UNSUPPORTED;
+
+ startd_free(restart_on, max_scf_value_size);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+/*
+ * int get_boolean()
+ * Fetches the value of a boolean property of the given property group.
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - pg was deleted
+ * ENOENT - the property doesn't exist or has no values
+ * EINVAL - the property has the wrong type
+ * the property is not single-valued
+ */
+static int
+get_boolean(scf_propertygroup_t *pg, const char *propname, uint8_t *valuep)
+{
+ scf_handle_t *h;
+ scf_property_t *prop;
+ scf_value_t *val;
+ int ret = 0, r;
+ scf_type_t type;
+
+ h = scf_pg_handle(pg);
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ if (scf_pg_get_property(pg, propname, prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+ if (scf_property_type(prop, &type) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_type", scf_error());
+ }
+ }
+
+ if (type != SCF_TYPE_BOOLEAN) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ ret = EINVAL;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_get_value", scf_error());
+ }
+ }
+
+ r = scf_value_get_boolean(val, valuep);
+ assert(r == 0);
+
+out:
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+/*
+ * int get_count()
+ * Fetches the value of a count property of the given property group.
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * unknown libscf error
+ * ECANCELED - pg was deleted
+ * ENOENT - the property doesn't exist or has no values
+ * EINVAL - the property has the wrong type
+ * the property is not single-valued
+ */
+static int
+get_count(scf_propertygroup_t *pg, const char *propname, uint64_t *valuep)
+{
+ scf_handle_t *h;
+ scf_property_t *prop;
+ scf_value_t *val;
+ int ret = 0, r;
+
+ h = scf_pg_handle(pg);
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ if (scf_pg_get_property(pg, propname, prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+ if (scf_property_is_type(prop, SCF_TYPE_COUNT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_TYPE_MISMATCH:
+ ret = EINVAL;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_is_type", scf_error());
+ }
+ }
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ ret = EINVAL;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_get_value", scf_error());
+ }
+ }
+
+ r = scf_value_get_count(val, valuep);
+ assert(r == 0);
+
+out:
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+
+static void
+get_restarter(scf_handle_t *h, scf_propertygroup_t *pg, char **restarter)
+{
+ scf_property_t *prop = safe_scf_property_create(h);
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_RESTARTER, prop) == -1 ||
+ libscf_read_single_astring(h, prop, restarter) != 0)
+ *restarter[0] = '\0';
+
+ scf_property_destroy(prop);
+}
+
+/*
+ * int libscf_instance_get_fmri(scf_instance_t *, char **)
+ * Give a valid SCF instance, return its FMRI. Returns 0 on success,
+ * ECONNABORTED, or ECANCELED if inst is deleted.
+ */
+int
+libscf_instance_get_fmri(scf_instance_t *inst, char **retp)
+{
+ char *inst_fmri = startd_alloc(max_scf_fmri_size);
+
+ inst_fmri[0] = 0;
+ if (scf_instance_to_fmri(inst, inst_fmri, max_scf_fmri_size) <= 0) {
+ startd_free(inst_fmri, max_scf_fmri_size);
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ *retp = inst_fmri;
+ return (0);
+}
+
+/*
+ * int libscf_fmri_get_instance(scf_handle_t *, const char *,
+ * scf_instance_t **)
+ * Given a valid SCF handle and an FMRI, return the SCF instance that matches
+ * exactly. The instance must be released using scf_instance_destroy().
+ * Returns 0 on success, EINVAL if the FMRI is invalid, ENOTSUP if the FMRI
+ * is valid but designates something other than an instance, ECONNABORTED if
+ * the repository connection is broken, or ENOENT if the instance does not
+ * exist.
+ */
+int
+libscf_fmri_get_instance(scf_handle_t *h, const char *fmri,
+ scf_instance_t **instp)
+{
+ scf_instance_t *inst;
+ int r;
+
+ inst = safe_scf_instance_create(h);
+
+ r = libscf_lookup_instance(fmri, inst);
+
+ if (r == 0)
+ *instp = inst;
+ else
+ scf_instance_destroy(inst);
+
+ return (r);
+}
+
+int
+libscf_lookup_instance(const char *fmri, scf_instance_t *inst)
+{
+ if (scf_handle_decode_fmri(scf_instance_handle(inst), fmri, NULL, NULL,
+ inst, NULL, NULL, SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ return (EINVAL);
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (ENOTSUP);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_FOUND:
+ return (ENOENT);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_handle_decode_fmri", scf_error());
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * void libscf_get_basic_instance_data()
+ * Read enabled, enabled_ovr, and restarter_fmri (into an allocated
+ * buffer) for inst. Returns 0, ECONNABORTED if the connection to the
+ * repository is broken, ECANCELED if inst is deleted, or ENOENT if inst
+ * has no general property group.
+ *
+ * On success, restarter_fmri may be NULL. If general/enabled was missing
+ * or invalid, *enabledp will be -1 and a debug message is logged.
+ */
+int
+libscf_get_basic_instance_data(scf_handle_t *h, scf_instance_t *inst,
+ const char *fmri, int *enabledp, int *enabled_ovrp, char **restarter_fmri)
+{
+ scf_propertygroup_t *pg;
+ int r;
+ uint8_t enabled_8;
+
+ pg = safe_scf_pg_create(h);
+
+ if (enabled_ovrp == NULL)
+ goto enabled;
+
+ if (scf_instance_get_pg_composed(inst, NULL, SCF_PG_GENERAL_OVR, pg) !=
+ 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ scf_pg_destroy(pg);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ scf_pg_destroy(pg);
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_FOUND:
+ *enabled_ovrp = -1;
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg_composed", scf_error());
+ }
+ } else {
+ switch (r = get_boolean(pg, SCF_PROPERTY_ENABLED, &enabled_8)) {
+ case 0:
+ *enabled_ovrp = enabled_8;
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ scf_pg_destroy(pg);
+ return (r);
+
+ case ENOENT:
+ case EINVAL:
+ *enabled_ovrp = -1;
+ break;
+
+ default:
+ bad_error("get_boolean", r);
+ }
+ }
+
+enabled:
+ /*
+ * Since general/restarter can be at the service level, we must do
+ * a composed lookup. These properties are immediate, though, so we
+ * must use the "editing" snapshot. Technically enabled shouldn't be
+ * at the service level, but looking it up composed, too, doesn't
+ * hurt.
+ */
+ if (scf_instance_get_pg_composed(inst, NULL, SCF_PG_GENERAL, pg) != 0) {
+ scf_pg_destroy(pg);
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_FOUND:
+ return (ENOENT);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg_composed", scf_error());
+ }
+ }
+
+ switch (r = get_boolean(pg, SCF_PROPERTY_ENABLED, &enabled_8)) {
+ case 0:
+ *enabledp = enabled_8;
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ scf_pg_destroy(pg);
+ return (r);
+
+ case ENOENT:
+ /*
+ * DEBUG because this happens when svccfg import creates
+ * a temporary service.
+ */
+ log_framework(LOG_DEBUG,
+ "general/enabled property of %s is missing.\n", fmri);
+ *enabledp = -1;
+ break;
+
+ case EINVAL:
+ log_framework(LOG_ERR,
+ "general/enabled property of %s is invalid.\n", fmri);
+ *enabledp = -1;
+ break;
+
+ default:
+ bad_error("get_boolean", r);
+ }
+
+ if (restarter_fmri != NULL)
+ get_restarter(h, pg, restarter_fmri);
+
+ scf_pg_destroy(pg);
+
+ return (0);
+}
+
+
+/*
+ * Sets pg to the name property group of s_inst. If it doesn't exist, it is
+ * added.
+ *
+ * Fails with
+ * ECONNABORTED - repository disconnection or unknown libscf error
+ * ECANCELED - inst is deleted
+ * EPERM - permission is denied
+ * EACCES - backend denied access
+ * EROFS - backend readonly
+ */
+int
+libscf_inst_get_or_add_pg(scf_instance_t *inst, const char *name,
+ const char *type, uint32_t flags, scf_propertygroup_t *pg)
+{
+ uint32_t f;
+
+again:
+ if (scf_instance_get_pg(inst, name, pg) == 0) {
+ if (scf_pg_get_flags(pg, &f) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ goto add;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_flags", scf_error());
+ }
+ }
+
+ if (f == flags)
+ return (0);
+
+ if (scf_pg_delete(pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ return (EPERM);
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (EACCES);
+
+ case SCF_ERROR_BACKEND_READONLY:
+ return (EROFS);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+ }
+
+add:
+ if (scf_instance_add_pg(inst, name, type, flags, pg) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_EXISTS:
+ goto again;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ return (EPERM);
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (EACCES);
+
+ case SCF_ERROR_BACKEND_READONLY:
+ return (EROFS);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_add_pg", scf_error());
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * - unknown libscf error
+ * ECANCELED
+ */
+static scf_error_t
+transaction_add_set(scf_transaction_t *tx, scf_transaction_entry_t *ent,
+ const char *pname, scf_type_t ty)
+{
+ for (;;) {
+ if (scf_transaction_property_change_type(tx, ent, pname,
+ ty) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_property_change_type",
+ scf_error());
+ }
+
+ if (scf_transaction_property_new(tx, ent, pname, ty) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_EXISTS:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_property_new", scf_error());
+ /* NOTREACHED */
+ }
+ }
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * - unknown libscf error
+ * ECANCELED - pg was deleted
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+static int
+pg_set_prop_value(scf_propertygroup_t *pg, const char *pname, scf_value_t *v)
+{
+ scf_handle_t *h;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e;
+ scf_type_t ty;
+ scf_error_t scfe;
+ int ret, r;
+
+ h = scf_pg_handle(pg);
+ tx = safe_scf_transaction_create(h);
+ e = safe_scf_entry_create(h);
+
+ ty = scf_value_type(v);
+ assert(ty != SCF_TYPE_INVALID);
+
+ for (;;) {
+ if (scf_transaction_start(tx, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_start", ret);
+ }
+ }
+
+ ret = transaction_add_set(tx, e, pname, ty);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto out;
+
+ default:
+ bad_error("transaction_add_set", ret);
+ }
+
+ r = scf_entry_add_value(e, v);
+ assert(r == 0);
+
+ r = scf_transaction_commit(tx);
+ if (r == 1)
+ break;
+ if (r != 0) {
+ scfe = scf_error();
+ scf_transaction_reset(tx);
+ switch (scfe) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_commit", scfe);
+ }
+ }
+
+ scf_transaction_reset(tx);
+
+ if (scf_pg_update(pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_update", scf_error());
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e);
+ return (ret);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * - unknown libscf error
+ * ECANCELED - inst was deleted
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+int
+libscf_inst_set_boolean_prop(scf_instance_t *inst, const char *pgname,
+ const char *pgtype, uint32_t pgflags, const char *pname, int val)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg = NULL;
+ scf_value_t *v;
+ int ret = 0;
+
+ h = scf_instance_handle(inst);
+ pg = safe_scf_pg_create(h);
+ v = safe_scf_value_create(h);
+
+ ret = libscf_inst_get_or_add_pg(inst, pgname, pgtype, pgflags, pg);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ goto out;
+
+ default:
+ bad_error("libscf_inst_get_or_add_pg", ret);
+ }
+
+ scf_value_set_boolean(v, val);
+
+ ret = pg_set_prop_value(pg, pname, v);
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ default:
+ bad_error("pg_set_prop_value", ret);
+ }
+
+out:
+ scf_pg_destroy(pg);
+ scf_value_destroy(v);
+ return (ret);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * - unknown libscf error
+ * ECANCELED - inst was deleted
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+int
+libscf_inst_set_count_prop(scf_instance_t *inst, const char *pgname,
+ const char *pgtype, uint32_t pgflags, const char *pname, uint64_t count)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg = NULL;
+ scf_value_t *v;
+ int ret = 0;
+
+ h = scf_instance_handle(inst);
+ pg = safe_scf_pg_create(h);
+ v = safe_scf_value_create(h);
+
+ ret = libscf_inst_get_or_add_pg(inst, pgname, pgtype, pgflags, pg);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ goto out;
+
+ default:
+ bad_error("libscf_inst_get_or_add_pg", ret);
+ }
+
+ scf_value_set_count(v, count);
+
+ ret = pg_set_prop_value(pg, pname, v);
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ default:
+ bad_error("pg_set_prop_value", ret);
+ }
+
+out:
+ scf_pg_destroy(pg);
+ scf_value_destroy(v);
+ return (ret);
+}
+
+/*
+ * Returns 0 on success, ECONNABORTED if the repository connection is broken,
+ * ECANCELED if inst is deleted, EROFS if the backend is readonly, or EPERM if
+ * permission was denied.
+ */
+int
+libscf_set_enable_ovr(scf_instance_t *inst, int enable)
+{
+ return (libscf_inst_set_boolean_prop(inst, SCF_PG_GENERAL_OVR,
+ SCF_PG_GENERAL_OVR_TYPE, SCF_PG_GENERAL_OVR_FLAGS,
+ SCF_PROPERTY_ENABLED, enable));
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - inst was deleted
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+int
+libscf_inst_delete_prop(scf_instance_t *inst, const char *pgname,
+ const char *pname)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e;
+ scf_error_t serr;
+ int ret = 0, r;
+
+ h = scf_instance_handle(inst);
+ pg = safe_scf_pg_create(h);
+
+ if (scf_instance_get_pg(inst, pgname, pg) != 0) {
+ scf_pg_destroy(pg);
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_FOUND:
+ return (0);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+ }
+
+ tx = safe_scf_transaction_create(h);
+ e = safe_scf_entry_create(h);
+
+ for (;;) {
+ if (scf_transaction_start(tx, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = 0;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_start", scf_error());
+ }
+ }
+
+ if (scf_transaction_property_delete(tx, e, pname) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ ret = 0;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ bad_error("scf_transaction_property_delete",
+ scf_error());
+ }
+ }
+
+ r = scf_transaction_commit(tx);
+ if (r == 1)
+ break;
+ if (r != 0) {
+ serr = scf_error();
+ scf_transaction_reset(tx);
+ switch (serr) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = 0;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ bad_error("scf_transaction_commit", serr);
+ }
+ }
+
+ scf_transaction_reset(tx);
+
+ if (scf_pg_update(pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = 0;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ bad_error("scf_pg_update", scf_error());
+ }
+ }
+ }
+
+out:
+ scf_transaction_destroy(tx);
+ (void) scf_entry_destroy(e);
+ scf_pg_destroy(pg);
+ return (ret);
+}
+
+/*
+ * Returns 0, ECONNABORTED, ECANCELED, or EPERM.
+ */
+int
+libscf_delete_enable_ovr(scf_instance_t *inst)
+{
+ return (libscf_inst_delete_prop(inst, SCF_PG_GENERAL_OVR,
+ SCF_PROPERTY_ENABLED));
+}
+
+/*
+ * Fails with
+ * ECONNABORTED - repository connection was broken
+ * ECANCELED - pg was deleted
+ * ENOENT - pg has no milestone property
+ * EINVAL - the milestone property is misconfigured
+ */
+static int
+pg_get_milestone(scf_propertygroup_t *pg, scf_property_t *prop,
+ scf_value_t *val, char *buf, size_t buf_sz)
+{
+ if (scf_pg_get_property(pg, SCF_PROPERTY_MILESTONE, prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_NOT_FOUND:
+ return (ENOENT);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_NOT_FOUND:
+ return (EINVAL);
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_get_value", scf_error());
+ }
+ }
+
+ if (scf_value_get_astring(val, buf, buf_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_TYPE_MISMATCH:
+ return (EINVAL);
+
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_value_get_astring", scf_error());
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Fails with
+ * ECONNABORTED - repository connection was broken
+ * ECANCELED - inst was deleted
+ * ENOENT - inst has no milestone property
+ * EINVAL - the milestone property is misconfigured
+ */
+int
+libscf_get_milestone(scf_instance_t *inst, scf_property_t *prop,
+ scf_value_t *val, char *buf, size_t buf_sz)
+{
+ scf_propertygroup_t *pg;
+ int r;
+
+ pg = safe_scf_pg_create(scf_instance_handle(inst));
+
+ if (scf_instance_get_pg(inst, SCF_PG_OPTIONS_OVR, pg) == 0) {
+ switch (r = pg_get_milestone(pg, prop, val, buf, buf_sz)) {
+ case 0:
+ case ECONNABORTED:
+ case EINVAL:
+ goto out;
+
+ case ECANCELED:
+ case ENOENT:
+ break;
+
+ default:
+ bad_error("pg_get_milestone", r);
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ r = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+ }
+
+ if (scf_instance_get_pg(inst, SCF_PG_OPTIONS, pg) == 0) {
+ r = pg_get_milestone(pg, prop, val, buf, buf_sz);
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ r = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ r = ENOENT;
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+ }
+
+out:
+ scf_pg_destroy(pg);
+
+ return (r);
+}
+
+/*
+ * Get the runlevel character from the runlevel property of the given property
+ * group. Fails with
+ * ECONNABORTED - repository connection was broken
+ * ECANCELED - prop's property group was deleted
+ * ENOENT - the property has no values
+ * EINVAL - the property has more than one value
+ * the property is of the wrong type
+ * the property value is malformed
+ */
+int
+libscf_extract_runlevel(scf_property_t *prop, char *rlp)
+{
+ scf_value_t *val;
+ char buf[2];
+
+ val = safe_scf_value_create(scf_property_handle(prop));
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ return (ENOENT);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (EINVAL);
+
+ case SCF_ERROR_NOT_FOUND:
+ return (ENOENT);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_property_get_value", scf_error());
+ }
+ }
+
+ if (scf_value_get_astring(val, buf, sizeof (buf)) < 0) {
+ if (scf_error() != SCF_ERROR_TYPE_MISMATCH)
+ bad_error("scf_value_get_astring", scf_error());
+
+ return (EINVAL);
+ }
+
+ if (buf[0] == '\0' || buf[1] != '\0')
+ return (EINVAL);
+
+ *rlp = buf[0];
+
+ return (0);
+}
+
+/*
+ * Delete the "runlevel" property from the given property group. Also set the
+ * "milestone" property to the given string. Fails with ECONNABORTED,
+ * ECANCELED, EPERM, EACCES, or EROFS.
+ */
+int
+libscf_clear_runlevel(scf_propertygroup_t *pg, const char *milestone)
+{
+ scf_handle_t *h;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e_rl, *e_ms;
+ scf_value_t *val;
+ scf_error_t serr;
+ boolean_t isempty = B_TRUE;
+ int ret = 0, r;
+
+ h = scf_pg_handle(pg);
+ tx = safe_scf_transaction_create(h);
+ e_rl = safe_scf_entry_create(h);
+ e_ms = safe_scf_entry_create(h);
+ val = safe_scf_value_create(h);
+
+ if (milestone) {
+ r = scf_value_set_astring(val, milestone);
+ assert(r == 0);
+ }
+
+ for (;;) {
+ if (scf_transaction_start(tx, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_start", scf_error());
+ }
+ }
+
+ if (scf_transaction_property_delete(tx, e_rl,
+ "runlevel") == 0) {
+ isempty = B_FALSE;
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ bad_error("scf_transaction_property_delete",
+ scf_error());
+ }
+ }
+
+ if (milestone) {
+ ret = transaction_add_set(tx, e_ms,
+ SCF_PROPERTY_MILESTONE, SCF_TYPE_ASTRING);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto out;
+
+ default:
+ bad_error("transaction_add_set", ret);
+ }
+
+ isempty = B_FALSE;
+
+ r = scf_entry_add_value(e_ms, val);
+ assert(r == 0);
+ }
+
+ if (isempty)
+ goto out;
+
+ r = scf_transaction_commit(tx);
+ if (r == 1)
+ break;
+ if (r != 0) {
+ serr = scf_error();
+ scf_transaction_reset(tx);
+ switch (serr) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ default:
+ bad_error("scf_transaction_commit", serr);
+ }
+ }
+
+ scf_transaction_reset(tx);
+
+ if (scf_pg_update(pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ ret = ECANCELED;
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+ }
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e_rl);
+ scf_entry_destroy(e_ms);
+ scf_value_destroy(val);
+ return (ret);
+}
+
+/*
+ * int libscf_get_template_values(scf_instance_t *, scf_snapshot_t *,
+ * char **)
+ *
+ * Return template values for inst in *common_name suitable for use in
+ * restarter_inst_t->ri_common_name. Called by restarter_insert_inst().
+ *
+ * Returns 0 on success, ECANCELED if the instance is deleted, ECHILD if
+ * a value fetch failed for a property, ENOENT if the instance has no
+ * tm_common_name property group or the property group is deleted, and
+ * ECONNABORTED if the repository connection is broken.
+ */
+int
+libscf_get_template_values(scf_instance_t *inst, scf_snapshot_t *snap,
+ char **common_name, char **c_common_name)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg = NULL;
+ scf_property_t *prop = NULL;
+ int ret = 0, r;
+ char *cname = startd_alloc(max_scf_value_size);
+ char *c_cname = startd_alloc(max_scf_value_size);
+
+ h = scf_instance_handle(inst);
+ pg = safe_scf_pg_create(h);
+ prop = safe_scf_property_create(h);
+
+ /*
+ * The tm_common_name property group, as with all template property
+ * groups, is optional.
+ */
+ if (scf_instance_get_pg_composed(inst, snap, SCF_PG_TM_COMMON_NAME, pg)
+ == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto template_values_out;
+
+ case SCF_ERROR_NOT_FOUND:
+ goto template_values_out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto template_values_out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg_composed", scf_error());
+ }
+ }
+
+ /*
+ * The name we wish uses the current locale name as the property name.
+ */
+ if (st->st_locale != NULL) {
+ if (scf_pg_get_property(pg, st->st_locale, prop) == -1) {
+ startd_free(cname, max_scf_value_size);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto template_values_out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ } else {
+ if ((r = libscf_read_single_astring(h, prop, &cname)) !=
+ 0) {
+ if (r != LIBSCF_PROPERTY_ABSENT)
+ ret = ECHILD;
+ startd_free(cname, max_scf_value_size);
+ goto template_values_out;
+ }
+
+ *common_name = cname;
+ }
+ }
+
+ /*
+ * Also pull out the C locale name, as a fallback for the case where
+ * service offers no localized name.
+ */
+ if (scf_pg_get_property(pg, "C", prop) == -1) {
+ startd_free(c_cname, max_scf_value_size);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto template_values_out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto template_values_out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ } else {
+ if ((r = libscf_read_single_astring(h, prop, &c_cname)) != 0) {
+ if (r != LIBSCF_PROPERTY_ABSENT)
+ ret = ECHILD;
+ goto template_values_out;
+ }
+
+ *c_common_name = c_cname;
+ }
+
+
+template_values_out:
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+
+ return (ret);
+}
+
+/*
+ * int libscf_get_startd_properties(scf_handle_t *, scf_instance_t *,
+ * scf_snapshot_t *, uint_t *, char **)
+ *
+ * Return startd settings for inst in *flags suitable for use in
+ * restarter_inst_t->ri_flags. Called by restarter_insert_inst().
+ *
+ * Returns 0 on success, ECANCELED if the instance is deleted, ECHILD if
+ * a value fetch failed for a property, ENOENT if the instance has no
+ * general property group or the property group is deleted, and
+ * ECONNABORTED if the repository connection is broken.
+ */
+int
+libscf_get_startd_properties(scf_instance_t *inst,
+ scf_snapshot_t *snap, uint_t *flags, char **prefixp)
+{
+ scf_handle_t *h;
+ scf_propertygroup_t *pg = NULL;
+ scf_property_t *prop = NULL;
+ int style = RINST_CONTRACT;
+ char *style_str = startd_alloc(max_scf_value_size);
+ int ret = 0, r;
+
+ h = scf_instance_handle(inst);
+ pg = safe_scf_pg_create(h);
+ prop = safe_scf_property_create(h);
+
+ /*
+ * The startd property group is optional.
+ */
+ if (scf_instance_get_pg_composed(inst, snap, SCF_PG_STARTD, pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto instance_flags_out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto instance_flags_out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto instance_flags_out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg_composed", scf_error());
+ }
+ }
+
+ /*
+ * 1. Duration property.
+ */
+ if (scf_pg_get_property(pg, SCF_PROPERTY_DURATION, prop) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto instance_flags_out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto instance_flags_out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ } else {
+ errno = 0;
+ if ((r = libscf_read_single_astring(h, prop, &style_str))
+ != 0) {
+ if (r != LIBSCF_PROPERTY_ABSENT)
+ ret = ECHILD;
+ goto instance_flags_out;
+ }
+
+ if (strcmp(style_str, "child") == 0)
+ style = RINST_WAIT;
+ else if (strcmp(style_str, "transient") == 0)
+ style = RINST_TRANSIENT;
+ }
+
+ /*
+ * 2. utmpx prefix property.
+ */
+ if (scf_pg_get_property(pg, SCF_PROPERTY_UTMPX_PREFIX, prop) == 0) {
+ errno = 0;
+ if ((r = libscf_read_single_astring(h, prop, prefixp)) != 0) {
+ if (r != LIBSCF_PROPERTY_ABSENT)
+ ret = ECHILD;
+ goto instance_flags_out;
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto instance_flags_out;
+
+ case SCF_ERROR_NOT_FOUND:
+ goto instance_flags_out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto instance_flags_out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+instance_flags_out:
+ startd_free(style_str, max_scf_value_size);
+ *flags = (*flags & ~RINST_STYLE_MASK) | style;
+
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+
+ return (ret);
+}
+
+/*
+ * int libscf_read_method_ids(scf_handle_t *, scf_instance_t *, ctid_t *,
+ * ctid_t *, pid_t *)
+ *
+ * Sets given id_t variables to primary and transient contract IDs and start
+ * PID. Returns 0, ECONNABORTED, and ECANCELED.
+ */
+int
+libscf_read_method_ids(scf_handle_t *h, scf_instance_t *inst, const char *fmri,
+ ctid_t *primary, ctid_t *transient, pid_t *start_pid)
+{
+ scf_propertygroup_t *pg = NULL;
+ scf_property_t *prop = NULL;
+ scf_value_t *val = NULL;
+ uint64_t p, t;
+ int ret = 0;
+
+ *primary = 0;
+ *transient = 0;
+ *start_pid = -1;
+
+ pg = safe_scf_pg_create(h);
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto read_id_err;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto read_id_err;
+
+ case SCF_ERROR_NOT_FOUND:
+ goto read_id_err;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+ }
+
+ ret = get_count(pg, SCF_PROPERTY_CONTRACT, &p);
+ switch (ret) {
+ case 0:
+ break;
+
+ case EINVAL:
+ log_error(LOG_NOTICE,
+ "%s: Ignoring %s/%s: multivalued or not of type count\n",
+ fmri, SCF_PG_RESTARTER, SCF_PROPERTY_CONTRACT);
+ /* FALLTHROUGH */
+ case ENOENT:
+ ret = 0;
+ goto read_trans;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto read_id_err;
+
+ default:
+ bad_error("get_count", ret);
+ }
+
+ *primary = p;
+
+read_trans:
+ ret = get_count(pg, SCF_PROPERTY_TRANSIENT_CONTRACT, &t);
+ switch (ret) {
+ case 0:
+ break;
+
+ case EINVAL:
+ log_error(LOG_NOTICE,
+ "%s: Ignoring %s/%s: multivalued or not of type count\n",
+ fmri, SCF_PG_RESTARTER, SCF_PROPERTY_TRANSIENT_CONTRACT);
+ /* FALLTHROUGH */
+
+ case ENOENT:
+ ret = 0;
+ goto read_pid_only;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto read_id_err;
+
+ default:
+ bad_error("get_count", ret);
+ }
+
+ *transient = t;
+
+read_pid_only:
+ ret = get_count(pg, SCF_PROPERTY_START_PID, &p);
+ switch (ret) {
+ case 0:
+ break;
+
+ case EINVAL:
+ log_error(LOG_NOTICE,
+ "%s: Ignoring %s/%s: multivalued or not of type count\n",
+ fmri, SCF_PG_RESTARTER, SCF_PROPERTY_START_PID);
+ /* FALLTHROUGH */
+ case ENOENT:
+ ret = 0;
+ goto read_id_err;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto read_id_err;
+
+ default:
+ bad_error("get_count", ret);
+ }
+
+ *start_pid = p;
+
+read_id_err:
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ return (ret);
+}
+
+/*
+ * Returns with
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * - unknown libscf error
+ * ECANCELED - s_inst was deleted
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+int
+libscf_write_start_pid(scf_instance_t *s_inst, pid_t pid)
+{
+ scf_handle_t *h;
+ scf_transaction_entry_t *t_pid;
+ scf_value_t *v_pid;
+ scf_propertygroup_t *pg;
+ int ret = 0;
+
+ h = scf_instance_handle(s_inst);
+
+ pg = safe_scf_pg_create(h);
+ t_pid = safe_scf_entry_create(h);
+ v_pid = safe_scf_value_create(h);
+
+get_pg:
+ ret = libscf_inst_get_or_add_pg(s_inst, SCF_PG_RESTARTER,
+ SCF_PG_RESTARTER_TYPE, SCF_PG_RESTARTER_FLAGS, pg);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ goto write_start_err;
+
+ default:
+ bad_error("libscf_inst_get_or_add_pg", ret);
+ }
+
+ scf_value_set_count(v_pid, pid);
+
+ ret = pg_set_prop_value(pg, SCF_PROPERTY_START_PID, v_pid);
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ case ECANCELED:
+ goto get_pg;
+
+ default:
+ bad_error("pg_set_prop_value", ret);
+ }
+
+write_start_err:
+ scf_entry_destroy(t_pid);
+ scf_value_destroy(v_pid);
+ scf_pg_destroy(pg);
+
+ return (ret);
+}
+
+/*
+ * Add a property indicating the instance log file. If the dir is
+ * equal to LOG_PREFIX_EARLY, then the property restarter/alt_logfile
+ * of the instance is used; otherwise, restarter/logfile is used.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED
+ * ECANCELED
+ * EPERM
+ * EACCES
+ * EROFS
+ * EAGAIN
+ */
+int
+libscf_note_method_log(scf_instance_t *inst, const char *dir, const char *file)
+{
+ scf_handle_t *h;
+ scf_value_t *v;
+ scf_propertygroup_t *pg;
+ int ret = 0;
+ char *logname;
+ const char *propname;
+
+ h = scf_instance_handle(inst);
+ pg = safe_scf_pg_create(h);
+ v = safe_scf_value_create(h);
+
+ logname = uu_msprintf("%s%s", dir, file);
+
+ if (logname == NULL) {
+ ret = errno;
+ goto out;
+ }
+
+ ret = libscf_inst_get_or_add_pg(inst, SCF_PG_RESTARTER,
+ SCF_PG_RESTARTER_TYPE, SCF_PG_RESTARTER_FLAGS, pg);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ goto out;
+
+ default:
+ bad_error("libscf_inst_get_or_add_pg", ret);
+ }
+
+ (void) scf_value_set_astring(v, logname);
+
+ if (strcmp(LOG_PREFIX_EARLY, dir) == 0)
+ propname = SCF_PROPERTY_ALT_LOGFILE;
+ else
+ propname = SCF_PROPERTY_LOGFILE;
+
+ ret = pg_set_prop_value(pg, propname, v);
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ default:
+ bad_error("pg_set_prop_value", ret);
+ }
+
+out:
+ scf_pg_destroy(pg);
+ scf_value_destroy(v);
+ uu_free(logname);
+ return (ret);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ENAMETOOLONG - name is too long
+ * ECONNABORTED
+ * ECANCELED
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+int
+libscf_write_method_status(scf_instance_t *s_inst, const char *name,
+ int status)
+{
+ scf_handle_t *h;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e_time, *e_stat;
+ scf_value_t *v_time, *v_stat;
+ scf_propertygroup_t *pg;
+ int ret = 0, r;
+ char pname[30];
+ struct timeval tv;
+ scf_error_t scfe;
+
+ if (strlen(name) + sizeof ("_method_waitstatus") > sizeof (pname))
+ return (ENAMETOOLONG);
+
+ h = scf_instance_handle(s_inst);
+
+ pg = safe_scf_pg_create(h);
+ tx = safe_scf_transaction_create(h);
+ e_time = safe_scf_entry_create(h);
+ v_time = safe_scf_value_create(h);
+ e_stat = safe_scf_entry_create(h);
+ v_stat = safe_scf_value_create(h);
+
+get_pg:
+ ret = libscf_inst_get_or_add_pg(s_inst, SCF_PG_RESTARTER,
+ SCF_PG_RESTARTER_TYPE, SCF_PG_RESTARTER_FLAGS, pg);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ goto out;
+
+ default:
+ bad_error("libscf_inst_get_or_add_pg", ret);
+ }
+
+ (void) gettimeofday(&tv, NULL);
+
+ r = scf_value_set_time(v_time, tv.tv_sec, tv.tv_usec * 1000);
+ assert(r == 0);
+
+ scf_value_set_integer(v_stat, status);
+
+ for (;;) {
+ if (scf_transaction_start(tx, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_start", ret);
+ }
+ }
+
+ (void) snprintf(pname, sizeof (pname), "%s_method_timestamp",
+ name);
+ ret = transaction_add_set(tx, e_time, pname, SCF_TYPE_TIME);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto out;
+
+ default:
+ bad_error("transaction_add_set", ret);
+ }
+
+ r = scf_entry_add_value(e_time, v_time);
+ assert(r == 0);
+
+ (void) snprintf(pname, sizeof (pname), "%s_method_waitstatus",
+ name);
+ ret = transaction_add_set(tx, e_stat, pname, SCF_TYPE_INTEGER);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ goto out;
+
+ default:
+ bad_error("transaction_add_set", ret);
+ }
+
+ r = scf_entry_add_value(e_stat, v_stat);
+ if (r != 0)
+ bad_error("scf_entry_add_value", scf_error());
+
+ r = scf_transaction_commit(tx);
+ if (r == 1)
+ break;
+ if (r != 0) {
+ scfe = scf_error();
+ scf_transaction_reset_all(tx);
+ switch (scfe) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_transaction_commit", scfe);
+ }
+ }
+
+ scf_transaction_reset_all(tx);
+
+ if (scf_pg_update(pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_pg_update", scf_error());
+ }
+ }
+ }
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e_time);
+ scf_value_destroy(v_time);
+ scf_entry_destroy(e_stat);
+ scf_value_destroy(v_stat);
+ scf_pg_destroy(pg);
+
+ return (ret);
+}
+
+/*
+ * Call dgraph_add_instance() for each instance in the repository.
+ */
+void
+libscf_populate_graph(scf_handle_t *h)
+{
+ scf_scope_t *scope;
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ scf_iter_t *svc_iter;
+ scf_iter_t *inst_iter;
+ int ret;
+
+ scope = safe_scf_scope_create(h);
+ svc = safe_scf_service_create(h);
+ inst = safe_scf_instance_create(h);
+ svc_iter = safe_scf_iter_create(h);
+ inst_iter = safe_scf_iter_create(h);
+
+ if ((ret = scf_handle_get_local_scope(h, scope)) !=
+ SCF_SUCCESS)
+ uu_die("retrieving local scope failed: %d\n", ret);
+
+ if (scf_iter_scope_services(svc_iter, scope) == -1)
+ uu_die("walking local scope's services failed\n");
+
+ while (scf_iter_next_service(svc_iter, svc) > 0) {
+ if (scf_iter_service_instances(inst_iter, svc) == -1)
+ uu_die("unable to walk service's instances");
+
+ while (scf_iter_next_instance(inst_iter, inst) > 0) {
+ char *fmri;
+
+ if (libscf_instance_get_fmri(inst, &fmri) == 0) {
+ int err;
+
+ err = dgraph_add_instance(fmri, inst, B_TRUE);
+ if (err != 0 && err != EEXIST)
+ log_error(LOG_WARNING,
+ "Failed to add %s (%s).\n", fmri,
+ strerror(err));
+ startd_free(fmri, max_scf_fmri_size);
+ }
+ }
+ }
+
+ scf_iter_destroy(inst_iter);
+ scf_iter_destroy(svc_iter);
+ scf_instance_destroy(inst);
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+}
+
+/*
+ * Monitors get handled differently since there can be multiple of them.
+ *
+ * Returns exec string on success. If method not defined, returns
+ * LIBSCF_PGROUP_ABSENT; if exec property missing, returns
+ * LIBSCF_PROPERTY_ABSENT. Returns LIBSCF_PROPERTY_ERROR on other failures.
+ */
+char *
+libscf_get_method(scf_handle_t *h, int type, restarter_inst_t *inst,
+ scf_snapshot_t *snap, method_restart_t *restart_on, uint_t *cte_mask,
+ uint8_t *need_sessionp, uint64_t *timeout, uint8_t *timeout_retry)
+{
+ scf_instance_t *scf_inst = NULL;
+ scf_propertygroup_t *pg = NULL, *pg_startd = NULL;
+ scf_property_t *prop = NULL;
+ const char *name;
+ char *method = startd_alloc(max_scf_value_size);
+ char *ig = startd_alloc(max_scf_value_size);
+ char *restart = startd_alloc(max_scf_value_size);
+ char *ret;
+ int error = 0, r;
+
+ scf_inst = safe_scf_instance_create(h);
+ pg = safe_scf_pg_create(h);
+ pg_startd = safe_scf_pg_create(h);
+ prop = safe_scf_property_create(h);
+
+ ret = NULL;
+
+ *restart_on = METHOD_RESTART_UNKNOWN;
+
+ switch (type) {
+ case METHOD_START:
+ name = "start";
+ break;
+ case METHOD_STOP:
+ name = "stop";
+ break;
+ case METHOD_REFRESH:
+ name = "refresh";
+ break;
+ default:
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+
+ if (scf_handle_decode_fmri(h, inst->ri_i.i_fmri, NULL, NULL, scf_inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT) == -1) {
+ log_error(LOG_WARNING,
+ "%s: get_method decode instance FMRI failed: %s\n",
+ inst->ri_i.i_fmri, scf_strerror(scf_error()));
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+
+ if (scf_instance_get_pg_composed(scf_inst, snap, name, pg) == -1) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ error = LIBSCF_PGROUP_ABSENT;
+ else
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_EXEC, prop) == -1) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ error = LIBSCF_PROPERTY_ABSENT;
+ else
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+
+ error = libscf_read_single_astring(h, prop, &method);
+ if (error != 0) {
+ log_error(LOG_WARNING,
+ "%s: get_method failed: can't get a single astring "
+ "from %s/%s\n", inst->ri_i.i_fmri, name, SCF_PROPERTY_EXEC);
+ goto get_method_cleanup;
+ }
+
+ error = expand_method_tokens(method, scf_inst, snap, type, &ret);
+ if (error != 0) {
+ log_instance(inst, B_TRUE, "Could not expand method tokens "
+ "in \"%s\": %s", method, ret);
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+
+ r = get_count(pg, SCF_PROPERTY_TIMEOUT, timeout);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+
+ case EINVAL:
+ log_instance(inst, B_TRUE, "%s/%s is multi-valued or not of "
+ "type count. Using infinite timeout.", name,
+ SCF_PROPERTY_TIMEOUT);
+ /* FALLTHROUGH */
+ case ECANCELED:
+ case ENOENT:
+ *timeout = METHOD_TIMEOUT_INFINITE;
+ break;
+
+ default:
+ bad_error("get_count", r);
+ }
+
+ /* Both 0 and -1 (ugh) are considered infinite timeouts. */
+ if (*timeout == -1 || *timeout == 0)
+ *timeout = METHOD_TIMEOUT_INFINITE;
+
+ if (scf_instance_get_pg_composed(scf_inst, snap, SCF_PG_STARTD,
+ pg_startd) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+
+ case SCF_ERROR_NOT_FOUND:
+ *cte_mask = 0;
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_instance_get_pg_composed", scf_error());
+ }
+ } else {
+ if (scf_pg_get_property(pg_startd, SCF_PROPERTY_IGNORE,
+ prop) == -1) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ *cte_mask = 0;
+ else {
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+ } else {
+ error = libscf_read_single_astring(h, prop, &ig);
+ if (error != 0) {
+ log_error(LOG_WARNING,
+ "%s: get_method failed: can't get a single "
+ "astring from %s/%s\n", inst->ri_i.i_fmri,
+ name, SCF_PROPERTY_IGNORE);
+ goto get_method_cleanup;
+ }
+
+ if (strcmp(ig, "core") == 0)
+ *cte_mask = CT_PR_EV_CORE;
+ else if (strcmp(ig, "signal") == 0)
+ *cte_mask = CT_PR_EV_SIGNAL;
+ else if (strcmp(ig, "core,signal") == 0 ||
+ strcmp(ig, "signal,core") == 0)
+ *cte_mask = CT_PR_EV_CORE | CT_PR_EV_SIGNAL;
+ else
+ *cte_mask = 0;
+ }
+
+ r = get_boolean(pg_startd, SCF_PROPERTY_NEED_SESSION,
+ need_sessionp);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+
+ case ECANCELED:
+ case ENOENT:
+ case EINVAL:
+ *need_sessionp = 0;
+ break;
+
+ default:
+ bad_error("get_boolean", r);
+ }
+
+ /*
+ * Determine whether service has overriden retry after
+ * method timeout. Default to retry if no value is
+ * specified.
+ */
+ r = get_boolean(pg_startd, SCF_PROPERTY_TIMEOUT_RETRY,
+ timeout_retry);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+
+ case ECANCELED:
+ case ENOENT:
+ case EINVAL:
+ *timeout_retry = 1;
+ break;
+
+ default:
+ bad_error("get_boolean", r);
+ }
+ }
+
+ if (type != METHOD_START)
+ goto get_method_cleanup;
+
+ /* Only start methods need to honor the restart_on property. */
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_RESTART_ON, prop) == -1) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ *restart_on = METHOD_RESTART_ALL;
+ else
+ error = LIBSCF_PROPERTY_ERROR;
+ goto get_method_cleanup;
+ }
+
+ error = libscf_read_single_astring(h, prop, &restart);
+ if (error != 0) {
+ log_error(LOG_WARNING,
+ "%s: get_method failed: can't get a single astring "
+ "from %s/%s\n", inst->ri_i.i_fmri, name,
+ SCF_PROPERTY_RESTART_ON);
+ goto get_method_cleanup;
+ }
+
+ if (strcmp(restart, "all") == 0)
+ *restart_on = METHOD_RESTART_ALL;
+ else if (strcmp(restart, "external_fault") == 0)
+ *restart_on = METHOD_RESTART_EXTERNAL_FAULT;
+ else if (strcmp(restart, "any_fault") == 0)
+ *restart_on = METHOD_RESTART_ANY_FAULT;
+
+get_method_cleanup:
+ startd_free(ig, max_scf_value_size);
+ startd_free(method, max_scf_value_size);
+ startd_free(restart, max_scf_value_size);
+
+ scf_instance_destroy(scf_inst);
+ scf_pg_destroy(pg);
+ scf_pg_destroy(pg_startd);
+ scf_property_destroy(prop);
+
+ if (error != 0 && ret != NULL) {
+ free(ret);
+ ret = NULL;
+ }
+
+ errno = error;
+ return (ret);
+}
+
+/*
+ * Returns 1 if we've reached the fault threshold
+ */
+int
+update_fault_count(restarter_inst_t *inst, int type)
+{
+ assert(type == FAULT_COUNT_INCR || type == FAULT_COUNT_RESET);
+
+ if (type == FAULT_COUNT_INCR) {
+ inst->ri_i.i_fault_count++;
+ log_framework(LOG_INFO, "%s: Increasing fault count to %d\n",
+ inst->ri_i.i_fmri, inst->ri_i.i_fault_count);
+ }
+ if (type == FAULT_COUNT_RESET)
+ inst->ri_i.i_fault_count = 0;
+
+ if (inst->ri_i.i_fault_count >= FAULT_THRESHOLD)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * int libscf_unset_action()
+ * Delete any pending timestamps for the specified action which is
+ * older than the supplied ts.
+ *
+ * Returns 0 on success, ECONNABORTED, EACCES, or EPERM on failure.
+ */
+int
+libscf_unset_action(scf_handle_t *h, scf_propertygroup_t *pg,
+ admin_action_t a, hrtime_t ts)
+{
+ scf_transaction_t *t;
+ scf_transaction_entry_t *e;
+ scf_property_t *prop;
+ scf_value_t *val;
+ hrtime_t rep_ts;
+ int ret = 0, r;
+
+ t = safe_scf_transaction_create(h);
+ e = safe_scf_entry_create(h);
+ prop = safe_scf_property_create(h);
+ val = safe_scf_value_create(h);
+
+ for (;;) {
+ if (scf_pg_update(pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_DELETED:
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ if (scf_transaction_start(t, pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_DELETED:
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EACCES;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ /* Return failure only if the property hasn't been deleted. */
+ if (scf_pg_get_property(pg, admin_actions[a], prop) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ if (scf_property_get_value(prop, val) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ /*
+ * More than one value was associated with
+ * this property -- this is incorrect. Take
+ * the opportunity to clean up and clear the
+ * entire property.
+ */
+ rep_ts = ts;
+ break;
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ } else if (scf_value_get_integer(val, &rep_ts) == -1) {
+ assert(scf_error() == SCF_ERROR_TYPE_MISMATCH);
+ rep_ts = 0;
+ }
+
+ /* Repository ts is more current. Don't clear the action. */
+ if (rep_ts > ts)
+ goto unset_action_cleanup;
+
+ r = scf_transaction_property_change_type(t, e,
+ admin_actions[a], SCF_TYPE_INTEGER);
+ assert(r == 0);
+
+ r = scf_transaction_commit(t);
+ if (r == 1)
+ break;
+
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EACCES;
+ goto unset_action_cleanup;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ scf_transaction_reset(t);
+ }
+
+unset_action_cleanup:
+ scf_transaction_destroy(t);
+ scf_entry_destroy(e);
+ scf_property_destroy(prop);
+ scf_value_destroy(val);
+
+ return (ret);
+}
+
+/*
+ * Decorates & binds hndl. hndl must be unbound. Returns
+ * 0 - success
+ * -1 - repository server is not running
+ * -1 - repository server is out of resources
+ */
+static int
+handle_decorate_and_bind(scf_handle_t *hndl)
+{
+ scf_value_t *door_dec_value;
+
+ door_dec_value = safe_scf_value_create(hndl);
+
+ /*
+ * Decorate if alternate door path set.
+ */
+ if (st->st_door_path) {
+ if (scf_value_set_astring(door_dec_value, st->st_door_path) !=
+ 0)
+ uu_die("$STARTD_ALT_DOOR is too long.\n");
+
+ if (scf_handle_decorate(hndl, "door_path", door_dec_value) != 0)
+ bad_error("scf_handle_decorate", scf_error());
+ }
+
+ scf_value_destroy(door_dec_value);
+
+ if (scf_handle_bind(hndl) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_NO_SERVER:
+ case SCF_ERROR_NO_RESOURCES:
+ return (-1);
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_IN_USE:
+ default:
+ bad_error("scf_handle_bind", scf_error());
+ /* NOTREACHED */
+ }
+}
+
+scf_handle_t *
+libscf_handle_create_bound(scf_version_t v)
+{
+ scf_handle_t *hndl = scf_handle_create(v);
+
+ if (hndl == NULL)
+ return (hndl);
+
+ if (handle_decorate_and_bind(hndl) == 0)
+ return (hndl);
+
+ scf_handle_destroy(hndl);
+ return (NULL);
+}
+
+void
+libscf_handle_rebind(scf_handle_t *h)
+{
+ (void) scf_handle_unbind(h);
+
+ MUTEX_LOCK(&st->st_configd_live_lock);
+
+ /*
+ * Try to rebind the handle before sleeping in case the server isn't
+ * really dead.
+ */
+ while (handle_decorate_and_bind(h) != 0)
+ (void) pthread_cond_wait(&st->st_configd_live_cv,
+ &st->st_configd_live_lock);
+
+ MUTEX_UNLOCK(&st->st_configd_live_lock);
+}
+
+/*
+ * Create a handle and try to bind it until it succeeds. Always returns
+ * a bound handle.
+ */
+scf_handle_t *
+libscf_handle_create_bound_loop()
+{
+ scf_handle_t *h;
+
+ while ((h = scf_handle_create(SCF_VERSION)) == NULL) {
+ /* This should have been caught earlier. */
+ assert(scf_error() != SCF_ERROR_VERSION_MISMATCH);
+ (void) sleep(2);
+ }
+
+ if (handle_decorate_and_bind(h) != 0)
+ libscf_handle_rebind(h);
+
+ return (h);
+}
+
+/*
+ * Call cb for each dependency property group of inst. cb is invoked with
+ * a pointer to the scf_propertygroup_t and arg. If the repository connection
+ * is broken, returns ECONNABORTED. If inst is deleted, returns ECANCELED.
+ * If cb returns non-zero, the walk is stopped and EINTR is returned.
+ * Otherwise returns 0.
+ */
+int
+walk_dependency_pgs(scf_instance_t *inst, callback_t cb, void *arg)
+{
+ scf_handle_t *h;
+ scf_snapshot_t *snap;
+ scf_iter_t *iter;
+ scf_propertygroup_t *pg;
+ int r;
+
+ h = scf_instance_handle(inst);
+
+ iter = safe_scf_iter_create(h);
+ pg = safe_scf_pg_create(h);
+
+ snap = libscf_get_running_snapshot(inst);
+
+ if (scf_iter_instance_pgs_typed_composed(iter, inst, snap,
+ SCF_GROUP_DEPENDENCY) != 0) {
+ scf_snapshot_destroy(snap);
+ scf_pg_destroy(pg);
+ scf_iter_destroy(iter);
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ for (;;) {
+ r = scf_iter_next_pg(iter, pg);
+ if (r == 0)
+ break;
+ if (r == -1) {
+ assert(scf_error() == SCF_ERROR_CONNECTION_BROKEN);
+ scf_snapshot_destroy(snap);
+ scf_pg_destroy(pg);
+ scf_iter_destroy(iter);
+ return (ECONNABORTED);
+ }
+
+ r = cb(pg, arg);
+
+ if (r != 0)
+ break;
+ }
+
+ scf_snapshot_destroy(snap);
+ scf_pg_destroy(pg);
+ scf_iter_destroy(iter);
+
+ return (r == 0 ? 0 : EINTR);
+}
+
+/*
+ * Call cb for each of the string values of prop. cb is invoked with
+ * a pointer to the string and arg. If the connection to the repository is
+ * broken, ECONNABORTED is returned. If the property is deleted, ECANCELED is
+ * returned. If the property does not have astring type, EINVAL is returned.
+ * If cb returns non-zero, the walk is stopped and EINTR is returned.
+ * Otherwise 0 is returned.
+ */
+int
+walk_property_astrings(scf_property_t *prop, callback_t cb, void *arg)
+{
+ scf_handle_t *h;
+ scf_value_t *val;
+ scf_iter_t *iter;
+ char *buf;
+ int r;
+ ssize_t sz;
+
+ if (scf_property_is_type(prop, SCF_TYPE_ASTRING) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_TYPE_MISMATCH:
+ return (EINVAL);
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ h = scf_property_handle(prop);
+
+ val = safe_scf_value_create(h);
+ iter = safe_scf_iter_create(h);
+
+ if (scf_iter_property_values(iter, prop) != 0) {
+ scf_iter_destroy(iter);
+ scf_value_destroy(val);
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ }
+ }
+
+ buf = startd_alloc(max_scf_value_size);
+
+ for (;;) {
+ r = scf_iter_next_value(iter, val);
+ if (r < 0) {
+ assert(scf_error() == SCF_ERROR_CONNECTION_BROKEN);
+ startd_free(buf, max_scf_value_size);
+ scf_iter_destroy(iter);
+ scf_value_destroy(val);
+ return (ECONNABORTED);
+ }
+ if (r == 0)
+ break;
+
+ sz = scf_value_get_astring(val, buf, max_scf_value_size);
+ assert(sz >= 0);
+
+ r = cb(buf, arg);
+
+ if (r != 0)
+ break;
+ }
+
+ startd_free(buf, max_scf_value_size);
+ scf_value_destroy(val);
+ scf_iter_destroy(iter);
+
+ return (r == 0 ? 0 : EINTR);
+}
+
+/*
+ * Returns 0 or ECONNABORTED.
+ */
+int
+libscf_create_self(scf_handle_t *h)
+{
+ scf_scope_t *scope;
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ instance_data_t idata;
+ int ret = 0, r;
+ ctid_t ctid;
+ uint64_t uint64;
+ uint_t count = 0, msecs = ALLOC_DELAY;
+
+ const char * const startd_svc = "system/svc/restarter";
+ const char * const startd_inst = "default";
+
+ /* If SCF_SERVICE_STARTD changes, our strings must change, too. */
+ assert(strcmp(SCF_SERVICE_STARTD,
+ "svc:/system/svc/restarter:default") == 0);
+
+ scope = safe_scf_scope_create(h);
+ svc = safe_scf_service_create(h);
+ inst = safe_scf_instance_create(h);
+
+ if (scf_handle_get_scope(h, SCF_SCOPE_LOCAL, scope) != 0) {
+ assert(scf_error() == SCF_ERROR_CONNECTION_BROKEN);
+ ret = ECONNABORTED;
+ goto out;
+ }
+
+get_svc:
+ if (scf_scope_get_service(scope, startd_svc, svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_scope_get_service", scf_error());
+ }
+
+add_svc:
+ if (scf_scope_add_service(scope, startd_svc, svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_EXISTS:
+ goto get_svc;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_BACKEND_READONLY:
+ uu_warn("Could not create %s: %s\n",
+ SCF_SERVICE_STARTD,
+ scf_strerror(scf_error()));
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_scope_add_service", scf_error());
+ }
+ }
+ }
+
+ if (scf_service_get_instance(svc, startd_inst, NULL) == 0)
+ goto out;
+
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ goto add_svc;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_service_get_instance", scf_error());
+ }
+
+add_inst:
+ if (scf_service_add_instance(svc, startd_inst, inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ ret = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_EXISTS:
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_BACKEND_ACCESS:
+ uu_die("Could not create %s: %s\n", SCF_SERVICE_STARTD,
+ scf_strerror(scf_error()));
+ /* NOTREACHED */
+
+ case SCF_ERROR_BACKEND_READONLY:
+ log_error(LOG_NOTICE,
+ "Could not create %s: backend readonly.\n",
+ SCF_SERVICE_STARTD);
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ goto add_svc;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_service_add_instance", scf_error());
+ }
+ }
+
+ /* Set start time. */
+ idata.i_fmri = SCF_SERVICE_STARTD;
+ idata.i_state = RESTARTER_STATE_NONE;
+ idata.i_next_state = RESTARTER_STATE_NONE;
+set_state:
+ switch (r = _restarter_commit_states(h, &idata, RESTARTER_STATE_ONLINE,
+ RESTARTER_STATE_NONE, NULL)) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ ++count;
+ if (count < ALLOC_RETRY) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ goto set_state;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+
+ case ECONNABORTED:
+ ret = ECONNABORTED;
+ goto out;
+
+ case ENOENT:
+ goto add_inst;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ uu_warn("Could not timestamp %s: %s\n", idata.i_fmri,
+ strerror(r));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("_restarter_commit_states", r);
+ }
+
+ /* Set general/enabled. */
+ ret = libscf_inst_set_boolean_prop(inst, SCF_PG_GENERAL,
+ SCF_PG_GENERAL_TYPE, SCF_PG_GENERAL_FLAGS, SCF_PROPERTY_ENABLED, 1);
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ case ECANCELED:
+ goto add_inst;
+
+ default:
+ bad_error("libscf_inst_set_boolean_prop", ret);
+ }
+
+ ret = libscf_write_start_pid(inst, getpid());
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ case ECANCELED:
+ goto add_inst;
+
+ default:
+ bad_error("libscf_write_start_pid", ret);
+ }
+
+ ctid = proc_get_ctid();
+ if (ctid > 0) {
+
+ uint64 = (uint64_t)ctid;
+ ret = libscf_inst_set_count_prop(inst,
+ SCF_PG_RESTARTER, SCF_PG_RESTARTER_TYPE,
+ SCF_PG_RESTARTER_FLAGS, SCF_PROPERTY_CONTRACT, uint64);
+
+ switch (ret) {
+ case 0:
+ case ECONNABORTED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ case ECANCELED:
+ goto add_inst;
+
+ default:
+ bad_error("libscf_inst_set_count_prop", ret);
+ }
+ }
+
+ ret = libscf_note_method_log(inst, LOG_PREFIX_EARLY,
+ STARTD_DEFAULT_LOG);
+ if (ret == 0) {
+ ret = libscf_note_method_log(inst, LOG_PREFIX_NORMAL,
+ STARTD_DEFAULT_LOG);
+ }
+
+ switch (ret) {
+ case ECONNABORTED:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ case EAGAIN:
+ break;
+
+ case ECANCELED:
+ goto add_inst;
+
+ default:
+ bad_error("libscf_note_method_log", ret);
+ }
+
+out:
+ scf_instance_destroy(inst);
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+ return (ret);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ENOENT - SCF_SERVICE_STARTD does not exist in repository
+ * EPERM
+ * EACCES
+ * EROFS
+ */
+int
+libscf_set_reconfig(int set)
+{
+ scf_handle_t *h;
+ scf_instance_t *inst;
+ scf_propertygroup_t *pg;
+ int ret = 0;
+
+ h = libscf_handle_create_bound_loop();
+ inst = safe_scf_instance_create(h);
+ pg = safe_scf_pg_create(h);
+
+again:
+ if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL,
+ inst, NULL, NULL, SCF_DECODE_FMRI_EXACT) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ libscf_handle_rebind(h);
+ goto again;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto reconfig_out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ bad_error("scf_handle_decode_fmri", scf_error());
+ }
+ }
+
+ ret = libscf_inst_set_boolean_prop(inst, "system", SCF_GROUP_FRAMEWORK,
+ SCF_PG_FLAG_NONPERSISTENT, "reconfigure", set);
+ switch (ret) {
+ case 0:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto again;
+
+ case ECANCELED:
+ ret = ENOENT;
+ break;
+
+ default:
+ bad_error("libscf_inst_set_boolean_prop", ret);
+ }
+
+reconfig_out:
+ scf_pg_destroy(pg);
+ scf_instance_destroy(inst);
+ scf_handle_destroy(h);
+ return (ret);
+}
+
+/*
+ * Set inst->ri_m_inst to the scf instance for inst. If it has been deleted,
+ * set inst->ri_mi_deleted to true. If the repository connection is broken, it
+ * is rebound with libscf_handle_rebound().
+ */
+void
+libscf_reget_instance(restarter_inst_t *inst)
+{
+ scf_handle_t *h;
+ int r;
+
+ h = scf_instance_handle(inst->ri_m_inst);
+
+again:
+ r = libscf_lookup_instance(inst->ri_i.i_fmri, inst->ri_m_inst);
+ switch (r) {
+ case 0:
+ case ENOENT:
+ inst->ri_mi_deleted = (r == ENOENT);
+ return;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto again;
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_lookup_instance", r);
+ }
+}
diff --git a/usr/src/cmd/svc/startd/log.c b/usr/src/cmd/svc/startd/log.c
new file mode 100644
index 0000000000..32dd39b756
--- /dev/null
+++ b/usr/src/cmd/svc/startd/log.c
@@ -0,0 +1,656 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * log.c - debugging and logging functions
+ *
+ * Logging destinations
+ * svc.startd(1M) supports three logging destinations: the system log, a
+ * daemon-specific log (in the /var/svc/log hierarchy by default), and to the
+ * standard output. Any or all of these destinations may be used to
+ * communicate a specific message; the audiences for each destination differ.
+ *
+ * Generic messages associated with svc.startd(1M) are made by the
+ * log_framework() and log_error() functions. For these messages, svc.startd
+ * logs under its own name and under the LOG_DAEMON facility when issuing
+ * events to the system log. By design, severities below LOG_NOTICE are never
+ * issued to the system log.
+ *
+ * Messages associated with a specific service instance are logged using the
+ * log_instance() or log_instance_fmri() functions. These messages are always
+ * sent to the appropriate per-instance log file.
+ *
+ * In the case of verbose or debug boot, the log_transition() function
+ * displays messages regarding instance transitions to the system console,
+ * until the expected login services are available.
+ *
+ * Finally, log_console() displays messages to the system consoles and
+ * the master restarter log file. This is used when booting to a milestone
+ * other than 'all'.
+ *
+ * Logging detail
+ * The constants for severity from <syslog.h> are reused, with a specific
+ * convention here. (It is worth noting that the #define values for the LOG_
+ * levels are such that more important severities have lower values.) The
+ * severity determines the importance of the event, and its addressibility by
+ * the administrator. Each severity level's use is defined below, along with
+ * an illustrative example.
+ *
+ * LOG_EMERG Not used presently.
+ *
+ * LOG_ALERT An unrecoverable operation requiring external
+ * intervention has occurred. Includes an inability to
+ * write to the smf(5) repository (due to svc.configd(1M)
+ * absence, due to permissions failures, etc.). Message
+ * should identify component at fault.
+ *
+ * LOG_CRIT An unrecoverable operation internal to svc.startd(1M)
+ * has occurred. Failure should be recoverable by restart
+ * of svc.startd(1M).
+ *
+ * LOG_ERR An smf(5) event requiring administrative intervention
+ * has occurred. Includes instance being moved to the
+ * maintenance state.
+ *
+ * LOG_WARNING A potentially destabilizing smf(5) event not requiring
+ * administrative intervention has occurred.
+ *
+ * LOG_NOTICE A noteworthy smf(5) event has occurred. Includes
+ * individual instance failures.
+ *
+ * LOG_INFO A noteworthy operation internal to svc.startd(1M) has
+ * occurred. Includes recoverable failures or otherwise
+ * unexpected outcomes.
+ *
+ * LOG_DEBUG An internal operation only of interest to a
+ * svc.startd(1M) developer has occurred.
+ *
+ * Logging configuration
+ * While the logging output can be configured using the -d and -v flags at
+ * invocation, the preferred approach is to set the logging property values
+ * in the options property group of the svc.startd default instance. The
+ * valid values are "quiet", "verbose", and "debug". "quiet" is the default;
+ * "verbose" and "debug" allow LOG_INFO and LOG_DEBUG logging requests to
+ * reach the daemon-specific log, respectively.
+ */
+
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kstat.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <strings.h>
+#include <syslog.h>
+#include <unistd.h>
+#include <zone.h>
+
+#include "startd.h"
+
+
+#define LOGBUF_SZ (60 * 80) /* 60 lines */
+
+static FILE *logfile = NULL;
+
+#ifndef NDEBUG
+/*
+ * This is a circular buffer for all (even those not emitted externally)
+ * logging messages. To read it properly you should start after the first
+ * null, go until the second, and then go back to the beginning until the
+ * first null. Or use ::startd_log in mdb.
+ */
+/* LINTED unused */
+static const size_t logbuf_sz = LOGBUF_SZ; /* For mdb */
+static char logbuf[LOGBUF_SZ] = "";
+static pthread_mutex_t logbuf_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+static void
+xstrftime_poststart(char *buf, size_t bufsize, struct timeval *time)
+{
+ long sec, usec;
+
+ sec = time->tv_sec - st->st_start_time.tv_sec;
+ usec = time->tv_usec - st->st_start_time.tv_usec;
+
+ if (usec < 0) {
+ sec -= 1;
+ usec += 1000000;
+ }
+
+ (void) snprintf(buf, bufsize, "start + %d.%02ds", sec, usec / 10000);
+}
+
+static void
+vlog_prefix(int severity, const char *prefix, const char *format, va_list args)
+{
+ char buf[512], *cp;
+ char timebuf[LOG_DATE_SIZE];
+ struct timeval now;
+ struct tm ltime;
+
+#ifdef NDEBUG
+ if (severity > st->st_log_level_min)
+ return;
+#endif
+
+ if (gettimeofday(&now, NULL) != 0)
+ (void) fprintf(stderr, "gettimeofday(3C) failed: %s\n",
+ strerror(errno));
+
+ if (st->st_log_timezone_known)
+ (void) strftime(timebuf, sizeof (timebuf), "%b %e %T",
+ localtime_r(&now.tv_sec, &ltime));
+ else
+ xstrftime_poststart(timebuf, sizeof (timebuf), &now);
+
+ (void) snprintf(buf, sizeof (buf), "%s/%d%s: ", timebuf, pthread_self(),
+ prefix);
+ cp = strchr(buf, '\0');
+ (void) vsnprintf(cp, sizeof (buf) - (cp - buf), format, args);
+
+#ifndef NDEBUG
+ /* Copy into logbuf. */
+ (void) pthread_mutex_lock(&logbuf_mutex);
+ if (strlen(logbuf) + strlen(buf) + 1 <= sizeof (logbuf))
+ (void) strcat(logbuf, buf);
+ else
+ (void) strlcpy(logbuf, buf, sizeof (logbuf));
+ (void) pthread_mutex_unlock(&logbuf_mutex);
+
+ if (severity > st->st_log_level_min)
+ return;
+#endif
+
+ if (st->st_log_flags & STARTD_LOG_FILE && logfile)
+ (void) fputs(buf, logfile);
+ if (st->st_log_flags & STARTD_LOG_TERMINAL)
+ (void) fputs(buf, stdout);
+
+ if (st->st_log_timezone_known)
+ vsyslog(severity, format, args);
+
+ if (st->st_log_flags & STARTD_LOG_FILE && logfile)
+ (void) fflush(logfile);
+}
+
+/*PRINTFLIKE2*/
+void
+log_error(int severity, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vlog_prefix(severity, " ERROR", format, args);
+ va_end(args);
+}
+
+/*PRINTFLIKE2*/
+void
+log_framework(int severity, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vlog_prefix(severity, "", format, args);
+ va_end(args);
+}
+
+/*
+ * void log_preexec()
+ *
+ * log_preexec() should be invoked prior to any exec(2) calls, to prevent the
+ * logfile and syslogd file descriptors from being leaked to child processes.
+ * Why openlog(3C) lacks a close-on-exec option is a minor mystery.
+ */
+void
+log_preexec()
+{
+ closelog();
+}
+
+/*
+ * void setlog()
+ * Close file descriptors and redirect output.
+ */
+void
+setlog(const char *logstem)
+{
+ int fd;
+ char logfile[PATH_MAX];
+
+ closefrom(0);
+
+ (void) open("/dev/null", O_RDONLY);
+
+ (void) snprintf(logfile, PATH_MAX, "%s/%s", st->st_log_prefix, logstem);
+
+ (void) umask(fmask);
+ fd = open(logfile, O_WRONLY|O_CREAT|O_APPEND,
+ S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
+ (void) umask(dmask);
+
+ if (fd == -1)
+ return;
+
+ (void) dup2(fd, 1);
+ (void) dup2(fd, 2);
+
+ if (fd != 1 && fd != 2)
+ startd_close(fd);
+}
+
+static int
+log_dir_writeable(const char *path)
+{
+ int fd;
+ struct statvfs svb;
+
+ if ((fd = open(path, O_RDONLY, 0644)) == -1)
+ return (-1);
+
+ if (fstatvfs(fd, &svb) == -1)
+ return (-1);
+
+ if (svb.f_flag & ST_RDONLY) {
+ (void) close(fd);
+
+ fd = -1;
+ }
+
+ return (fd);
+}
+
+static void
+vlog_instance(const char *fmri, const char *logstem, boolean_t canlog,
+ const char *format, va_list args)
+{
+ char logfile[PATH_MAX];
+ char *message;
+ char omessage[1024];
+ int fd, err;
+ char timebuf[LOG_DATE_SIZE];
+ struct tm ltime;
+ struct timeval now;
+
+ (void) snprintf(logfile, PATH_MAX, "%s/%s", st->st_log_prefix,
+ logstem);
+
+ (void) umask(fmask);
+ fd = open(logfile, O_WRONLY|O_CREAT|O_APPEND,
+ S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
+ err = errno;
+ (void) umask(dmask);
+
+ if (fd == -1) {
+ if (canlog)
+ log_error(LOG_NOTICE, "Could not log for %s: open(%s) "
+ "failed with %s.\n", fmri, logfile, strerror(err));
+
+ return;
+ }
+
+ (void) vsnprintf(omessage, sizeof (omessage), format, args);
+
+ if (gettimeofday(&now, NULL) != 0)
+ (void) fprintf(stderr, "gettimeofday(3C) failed: %s\n",
+ strerror(errno));
+
+ if (st->st_log_timezone_known)
+ (void) strftime(timebuf, sizeof (timebuf), "%b %e %T",
+ localtime_r(&now.tv_sec, &ltime));
+ else
+ xstrftime_poststart(timebuf, sizeof (timebuf), &now);
+
+ message = uu_msprintf("[ %s %s ]\n", timebuf, omessage);
+
+ if (message == NULL) {
+ if (canlog)
+ log_error(LOG_NOTICE, "Could not log for %s: %s.\n",
+ fmri, uu_strerror(uu_error()));
+ } else {
+ if (write(fd, message, strlen(message)) < 0 && canlog)
+ log_error(LOG_NOTICE, "Could not log for %s: write(%d) "
+ "failed with %s.\n", fmri, fd,
+ strerror(errno));
+
+ uu_free(message);
+ }
+
+ if (close(fd) != 0 && canlog)
+ log_framework(LOG_NOTICE, "close(%d) failed: %s.\n", fd,
+ strerror(errno));
+}
+
+/*
+ * void log_instance(const restarter_inst_t *, boolean_t, const char *, ...)
+ *
+ * The log_instance() format is "[ month day time message ]". (The
+ * brackets distinguish svc.startd messages from method output.) We avoid
+ * calling log_*() functions on error when canlog is not set, since we may
+ * be called from a child process.
+ *
+ * When adding new calls to this function, consider: If this is called before
+ * any instances have started, then it should be called with canlog clear,
+ * lest we spew errors to the console when booted on the miniroot.
+ */
+/*PRINTFLIKE3*/
+void
+log_instance(const restarter_inst_t *inst, boolean_t canlog,
+ const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vlog_instance(inst->ri_i.i_fmri, inst->ri_logstem, canlog, format,
+ args);
+ va_end(args);
+}
+
+/*
+ * void log_instance_fmri(const char *, const char *,boolean_t, const char *,
+ * ...)
+ *
+ * The log_instance_fmri() format is "[ month day time message ]". (The
+ * brackets distinguish svc.startd messages from method output.) We avoid
+ * calling log_*() functions on error when canlog is not set, since we may
+ * be called from a child process.
+ *
+ * For new calls to this function, see the warning in log_instance()'s
+ * comment.
+ */
+/*PRINTFLIKE4*/
+void
+log_instance_fmri(const char *fmri, const char *logstem, boolean_t canlog,
+ const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vlog_instance(fmri, logstem, canlog, format, args);
+ va_end(args);
+}
+
+/*
+ * void log_transition(const restarter_inst_t *, start_outcome_t)
+ *
+ * The log_transition() format is
+ *
+ * [ _service_fmri_ _participle_ (_common_name_) ]
+ *
+ * Again, brackets separate messages from specific service instance output to
+ * the console.
+ */
+void
+log_transition(const restarter_inst_t *inst, start_outcome_t outcome)
+{
+ char *message;
+ char omessage[1024];
+ char *action;
+ int severity;
+
+ if (outcome == START_REQUESTED) {
+ char *cname = NULL;
+
+ cname = inst->ri_common_name;
+ if (cname == NULL)
+ cname = inst->ri_C_common_name;
+
+ if (!(st->st_boot_flags & STARTD_BOOT_VERBOSE))
+ return;
+
+ if (inst->ri_start_index > 1)
+ return;
+
+ if (cname)
+ (void) snprintf(omessage, sizeof (omessage), " (%s)",
+ cname);
+ else
+ *omessage = '\0';
+
+ action = gettext("starting");
+
+ message = uu_msprintf("[ %s %s%s ]\n",
+ inst->ri_i.i_fmri + strlen("svc:/"), action,
+ omessage);
+
+ severity = LOG_INFO;
+ } else {
+ switch (outcome) {
+ case START_FAILED_REPEATEDLY:
+ action = gettext("failed repeatedly");
+ break;
+ case START_FAILED_CONFIGURATION:
+ action = gettext("misconfigured");
+ break;
+ case START_FAILED_FATAL:
+ action = gettext("failed fatally");
+ break;
+ case START_FAILED_TIMEOUT_FATAL:
+ action = gettext("timed out, fault threshold reached");
+ break;
+ case START_FAILED_OTHER:
+ action = gettext("failed");
+ break;
+ case START_REQUESTED:
+ assert(outcome != START_REQUESTED);
+ /*FALLTHROUGH*/
+ default:
+ action = gettext("outcome unknown?");
+ }
+
+ message = uu_msprintf("[ %s %s %s ]\n",
+ inst->ri_i.i_fmri + strlen("svc:/"), action,
+ gettext("(see 'svcs -x' for details)"));
+
+ severity = LOG_ERR;
+ }
+
+
+ if (message == NULL) {
+ log_error(LOG_NOTICE,
+ "Could not log boot message for %s: %s.\n",
+ inst->ri_i.i_fmri, uu_strerror(uu_error()));
+ } else {
+ if (!st->st_log_login_reached) {
+ /*LINTED*/
+ if (fprintf(stderr, message) < 0)
+ log_error(LOG_NOTICE, "Could not log for %s: "
+ "fprintf() failed with %s.\n",
+ inst->ri_i.i_fmri, strerror(errno));
+ } else {
+ log_framework(severity, "%s %s\n",
+ inst->ri_i.i_fmri + strlen("svc:/"), action);
+ }
+
+ uu_free(message);
+ }
+}
+
+/*
+ * log_console - log a message to the consoles and to syslog
+ *
+ * This logs a message as-is to the console (and auxiliary consoles),
+ * as well as to the master restarter log.
+ */
+/*PRINTFLIKE2*/
+void
+log_console(int severity, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vlog_prefix(severity, "", format, args);
+ va_end(args);
+
+ va_start(args, format);
+ (void) vfprintf(stderr, format, args);
+ va_end(args);
+}
+
+/*
+ * void log_init()
+ *
+ * Set up the log files, if necessary, for the current invocation. This
+ * function should be called before any other functions in this file. Set the
+ * syslog(3C) logging mask such that severities of the importance of
+ * LOG_NOTICE and above are passed through, but lower severity messages are
+ * masked out.
+ *
+ * It may be called multiple times to change the logging configuration due to
+ * administrative request.
+ */
+void
+log_init()
+{
+ int dirfd, logfd;
+ char *dir;
+ struct stat sb;
+
+ if (st->st_start_time.tv_sec == 0) {
+ if (getzoneid() != GLOBAL_ZONEID) {
+ st->st_start_time.tv_sec = time(NULL);
+ } else {
+ /*
+ * We need to special-case the BOOT_TIME utmp entry, and
+ * drag that value out of the kernel if it's there.
+ */
+ kstat_ctl_t *kc;
+ kstat_t *ks;
+ kstat_named_t *boot;
+
+ if (((kc = kstat_open()) != 0) &&
+ ((ks = kstat_lookup(kc, "unix", 0, "system_misc"))
+ != NULL) &&
+ (kstat_read(kc, ks, NULL) != -1) &&
+ ((boot = kstat_data_lookup(ks, "boot_time")) !=
+ NULL)) {
+ /*
+ * If we're here, then we've successfully found
+ * the boot_time kstat... use its value.
+ */
+ st->st_start_time.tv_sec = boot->value.ul;
+ } else {
+ st->st_start_time.tv_sec = time(NULL);
+ }
+
+ if (kc)
+ (void) kstat_close(kc);
+ }
+ }
+
+ /*
+ * Establish our timezone if the appropriate directory is available.
+ */
+ if (!st->st_log_timezone_known && stat(FS_TIMEZONE_DIR, &sb) == 0) {
+ tzset();
+ st->st_log_timezone_known = 1;
+ }
+
+ /*
+ * Establish our locale if the appropriate directory is available. Set
+ * the locale string from the environment so we can extract template
+ * information correctly, if the locale directories aren't yet
+ * available.
+ */
+ if (st->st_locale != NULL)
+ free(st->st_locale);
+
+ if ((st->st_locale = getenv("LC_ALL")) == NULL)
+ if ((st->st_locale = getenv("LC_MESSAGES")) == NULL)
+ st->st_locale = getenv("LANG");
+
+ if (!st->st_log_locale_known && stat(FS_LOCALE_DIR, &sb) == 0) {
+ (void) setlocale(LC_ALL, "");
+ st->st_locale = setlocale(LC_MESSAGES, "");
+ if (st->st_locale)
+ st->st_log_locale_known = 1;
+
+ (void) textdomain(TEXT_DOMAIN);
+ }
+
+ if (st->st_locale) {
+ st->st_locale = safe_strdup(st->st_locale);
+ xstr_sanitize(st->st_locale);
+ }
+
+ if (logfile) {
+ (void) fclose(logfile);
+ logfile = NULL;
+ }
+
+ /*
+ * Set syslog(3C) behaviour in all cases.
+ */
+ closelog();
+ openlog("svc.startd", LOG_PID | LOG_CONS, LOG_DAEMON);
+ (void) setlogmask(LOG_UPTO(LOG_NOTICE));
+
+ if ((dirfd = log_dir_writeable(LOG_PREFIX_NORMAL)) == -1) {
+ if ((dirfd = log_dir_writeable(LOG_PREFIX_EARLY)) == -1)
+ return;
+ else
+ dir = LOG_PREFIX_EARLY;
+ } else {
+ dir = LOG_PREFIX_NORMAL;
+ }
+
+ st->st_log_prefix = dir;
+
+ (void) umask(fmask);
+ if ((logfd = openat(dirfd, STARTD_DEFAULT_LOG, O_CREAT | O_RDWR,
+ 0644)) == -1) {
+ (void) close(dirfd);
+ (void) umask(dmask);
+ return;
+ }
+
+ (void) close(dirfd);
+ (void) umask(dmask);
+
+ if ((logfile = fdopen(logfd, "a")) == NULL)
+ if (errno != EROFS)
+ log_error(LOG_WARNING, "can't open logfile %s/%s",
+ dir, STARTD_DEFAULT_LOG);
+
+ if (logfile &&
+ fcntl(fileno(logfile), F_SETFD, FD_CLOEXEC) == -1)
+ log_error(LOG_WARNING,
+ "couldn't mark logfile close-on-exec: %s\n",
+ strerror(errno));
+}
diff --git a/usr/src/cmd/svc/startd/method.c b/usr/src/cmd/svc/startd/method.c
new file mode 100644
index 0000000000..c4257fdf2e
--- /dev/null
+++ b/usr/src/cmd/svc/startd/method.c
@@ -0,0 +1,1137 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * method.c - method execution functions
+ *
+ * This file contains the routines needed to run a method: a fork(2)-exec(2)
+ * invocation monitored using either the contract filesystem or waitpid(2).
+ * (Plain fork1(2) support is provided in fork.c.)
+ *
+ * Contract Transfer
+ * When we restart a service, we want to transfer any contracts that the old
+ * service's contract inherited. This means that (a) we must not abandon the
+ * old contract when the service dies and (b) we must write the id of the old
+ * contract into the terms of the new contract. There should be limits to
+ * (a), though, since we don't want to keep the contract around forever. To
+ * this end we'll say that services in the offline state may have a contract
+ * to be transfered and services in the disabled or maintenance states cannot.
+ * This means that when a service transitions from online (or degraded) to
+ * offline, the contract should be preserved, and when the service transitions
+ * from offline to online (i.e., the start method), we'll transfer inherited
+ * contracts.
+ */
+
+#include <sys/contract/process.h>
+#include <sys/ctfs.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <alloca.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libcontract.h>
+#include <libcontract_priv.h>
+#include <libgen.h>
+#include <librestart.h>
+#include <libscf.h>
+#include <limits.h>
+#include <port.h>
+#include <sac.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include "startd.h"
+
+#define SBIN_SH "/sbin/sh"
+
+/*
+ * Mapping from restart_on method-type to contract events. Must correspond to
+ * enum method_restart_t.
+ */
+static uint_t method_events[] = {
+ /* METHOD_RESTART_ALL */
+ CT_PR_EV_HWERR | CT_PR_EV_SIGNAL | CT_PR_EV_CORE | CT_PR_EV_EMPTY,
+ /* METHOD_RESTART_EXTERNAL_FAULT */
+ CT_PR_EV_HWERR | CT_PR_EV_SIGNAL,
+ /* METHOD_RESTART_ANY_FAULT */
+ CT_PR_EV_HWERR | CT_PR_EV_SIGNAL | CT_PR_EV_CORE
+};
+
+/*
+ * method_record_start(restarter_inst_t *)
+ * Record a service start for rate limiting. Place the current time
+ * in the circular array of instance starts.
+ */
+static void
+method_record_start(restarter_inst_t *inst)
+{
+ int index = inst->ri_start_index++ % RINST_START_TIMES;
+
+ inst->ri_start_time[index] = gethrtime();
+}
+
+/*
+ * method_rate_critical(restarter_inst_t *)
+ * Return true if the average start interval is less than the permitted
+ * interval. Implicit success if insufficient measurements for an
+ * average exist.
+ */
+static int
+method_rate_critical(restarter_inst_t *inst)
+{
+ uint_t n = inst->ri_start_index;
+ hrtime_t avg_ns = 0;
+
+ if (inst->ri_start_index < RINST_START_TIMES)
+ return (0);
+
+ avg_ns =
+ (inst->ri_start_time[(n - 1) % RINST_START_TIMES] -
+ inst->ri_start_time[n % RINST_START_TIMES]) /
+ (RINST_START_TIMES - 1);
+
+ return (avg_ns < RINST_FAILURE_RATE_NS);
+}
+
+/*
+ * int method_is_transient()
+ * Determine if the method for the given instance is transient,
+ * from a contract perspective. Return 1 if it is, and 0 if it isn't.
+ */
+static int
+method_is_transient(restarter_inst_t *inst, int type)
+{
+ if (instance_is_transient_style(inst) || type != METHOD_START)
+ return (1);
+ else
+ return (0);
+}
+
+/*
+ * void method_store_contract()
+ * Store the newly created contract id into local structures and
+ * the repository. If the repository connection is broken it is rebound.
+ */
+static void
+method_store_contract(restarter_inst_t *inst, int type, ctid_t *cid)
+{
+ int r;
+ boolean_t primary;
+
+ if (errno = contract_latest(cid))
+ uu_die("%s: Couldn't get new contract's id", inst->ri_i.i_fmri);
+
+ primary = !method_is_transient(inst, type);
+
+ if (!primary) {
+ if (inst->ri_i.i_transient_ctid != 0) {
+ log_framework(LOG_INFO,
+ "%s: transient ctid expected to be 0 but "
+ "was set to %ld\n", inst->ri_i.i_fmri,
+ inst->ri_i.i_transient_ctid);
+ }
+
+ inst->ri_i.i_transient_ctid = *cid;
+ } else {
+ if (inst->ri_i.i_primary_ctid != 0) {
+ /*
+ * There was an old contract that we transferred.
+ * Remove it.
+ */
+ method_remove_contract(inst, B_TRUE, B_FALSE);
+ }
+
+ if (inst->ri_i.i_primary_ctid != 0) {
+ log_framework(LOG_INFO,
+ "%s: primary ctid expected to be 0 but "
+ "was set to %ld\n", inst->ri_i.i_fmri,
+ inst->ri_i.i_primary_ctid);
+ }
+
+ inst->ri_i.i_primary_ctid = *cid;
+ inst->ri_i.i_primary_ctid_stopped = 0;
+
+ contract_hash_store(*cid, inst->ri_id);
+ }
+
+again:
+ if (inst->ri_mi_deleted)
+ return;
+
+ r = restarter_store_contract(inst->ri_m_inst, *cid, primary ?
+ RESTARTER_CONTRACT_PRIMARY : RESTARTER_CONTRACT_TRANSIENT);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ inst->ri_mi_deleted = B_TRUE;
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(scf_instance_handle(inst->ri_m_inst));
+ /* FALLTHROUGH */
+
+ case EBADF:
+ libscf_reget_instance(inst);
+ goto again;
+
+ case ENOMEM:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ uu_die("%s: Couldn't store contract id %ld",
+ inst->ri_i.i_fmri, *cid);
+ /* NOTREACHED */
+
+ case EINVAL:
+ default:
+ bad_error("restarter_store_contract", r);
+ }
+}
+
+/*
+ * void method_remove_contract()
+ * Remove any non-permanent contracts from internal structures and
+ * the repository, then abandon them.
+ * Returns
+ * 0 - success
+ * ECANCELED - inst was deleted from the repository
+ *
+ * If the repository connection was broken, it is rebound.
+ */
+void
+method_remove_contract(restarter_inst_t *inst, boolean_t primary,
+ boolean_t abandon)
+{
+ ctid_t * const ctidp = primary ? &inst->ri_i.i_primary_ctid :
+ &inst->ri_i.i_transient_ctid;
+
+ int r;
+
+ assert(*ctidp != 0);
+
+ log_framework(LOG_DEBUG, "Removing %s contract %lu for %s.\n",
+ primary ? "primary" : "transient", *ctidp, inst->ri_i.i_fmri);
+
+ if (abandon)
+ contract_abandon(*ctidp);
+
+again:
+ if (inst->ri_mi_deleted) {
+ r = ECANCELED;
+ goto out;
+ }
+
+ r = restarter_remove_contract(inst->ri_m_inst, *ctidp, primary ?
+ RESTARTER_CONTRACT_PRIMARY : RESTARTER_CONTRACT_TRANSIENT);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ inst->ri_mi_deleted = B_TRUE;
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(scf_instance_handle(inst->ri_m_inst));
+ /* FALLTHROUGH */
+
+ case EBADF:
+ libscf_reget_instance(inst);
+ goto again;
+
+ case ENOMEM:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_INFO, "%s: Couldn't remove contract id %ld: "
+ "%s.\n", inst->ri_i.i_fmri, *ctidp, strerror(r));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("restarter_remove_contract", r);
+ }
+
+out:
+ if (primary)
+ contract_hash_remove(*ctidp);
+
+ *ctidp = 0;
+}
+
+/*
+ * int method_ready_contract(restarter_inst_t *, int, method_restart_t, int)
+ *
+ * Activate a contract template for the type method of inst. type,
+ * restart_on, and cte_mask dictate the critical events term of the contract.
+ * Returns
+ * 0 - success
+ * ECANCELED - inst has been deleted from the repository
+ */
+static int
+method_ready_contract(restarter_inst_t *inst, int type,
+ method_restart_t restart_on, uint_t cte_mask)
+{
+ int tmpl, err, istrans, iswait, ret;
+ uint_t cevents, fevents;
+
+ /*
+ * Correctly supporting wait-style services is tricky without
+ * rearchitecting startd to cope with multiple event sources
+ * simultaneously trying to stop an instance. Until a better
+ * solution is implemented, we avoid this problem for
+ * wait-style services by making contract events fatal and
+ * letting the wait code alone handle stopping the service.
+ */
+ iswait = instance_is_wait_style(inst);
+ istrans = method_is_transient(inst, type);
+
+ tmpl = open64(CTFS_ROOT "/process/template", O_RDWR);
+ if (tmpl == -1)
+ uu_die("Could not create contract template");
+
+ /*
+ * We assume non-login processes are unlikely to create
+ * multiple process groups, and set CT_PR_PGRPONLY for all
+ * wait-style services' contracts.
+ */
+ err = ct_pr_tmpl_set_param(tmpl, CT_PR_INHERIT | CT_PR_REGENT |
+ (iswait ? CT_PR_PGRPONLY : 0));
+ assert(err == 0);
+
+ if (istrans) {
+ cevents = 0;
+ fevents = 0;
+ } else {
+ assert(restart_on >= 0);
+ assert(restart_on <= METHOD_RESTART_ANY_FAULT);
+ cevents = method_events[restart_on] & ~cte_mask;
+ fevents = iswait ?
+ (method_events[restart_on] & ~cte_mask & CT_PR_ALLFATAL) :
+ 0;
+ }
+
+ err = ct_tmpl_set_critical(tmpl, cevents);
+ assert(err == 0);
+
+ err = ct_tmpl_set_informative(tmpl, 0);
+ assert(err == 0);
+ err = ct_pr_tmpl_set_fatal(tmpl, fevents);
+ assert(err == 0);
+
+ err = ct_tmpl_set_cookie(tmpl, istrans ? METHOD_OTHER_COOKIE :
+ METHOD_START_COOKIE);
+ assert(err == 0);
+
+ if (type == METHOD_START && inst->ri_i.i_primary_ctid != 0) {
+ ret = ct_pr_tmpl_set_transfer(tmpl, inst->ri_i.i_primary_ctid);
+ switch (ret) {
+ case 0:
+ break;
+
+ case ENOTEMPTY:
+ /* No contracts for you! */
+ method_remove_contract(inst, B_TRUE, B_TRUE);
+ if (inst->ri_mi_deleted) {
+ ret = ECANCELED;
+ goto out;
+ }
+ break;
+
+ case EINVAL:
+ case ESRCH:
+ case EACCES:
+ default:
+ bad_error("ct_pr_tmpl_set_transfer", ret);
+ }
+ }
+
+ err = ct_tmpl_activate(tmpl);
+ assert(err == 0);
+
+ ret = 0;
+
+out:
+ err = close(tmpl);
+ assert(err == 0);
+
+ return (ret);
+}
+
+static const char *method_names[] = { "start", "stop", "refresh" };
+
+static void
+exec_method(const restarter_inst_t *inst, int type, const char *method,
+ struct method_context *mcp, uint8_t need_session)
+{
+ char *cmd;
+ const char *errf;
+ char **nenv;
+
+ cmd = uu_msprintf("exec %s", method);
+
+ if (inst->ri_utmpx_prefix[0] != '\0' && inst->ri_utmpx_prefix != NULL)
+ (void) utmpx_mark_init(getpid(), inst->ri_utmpx_prefix);
+
+ setlog(inst->ri_logstem);
+ log_instance(inst, B_FALSE, "Executing %s method (\"%s\")",
+ method_names[type], method);
+
+ if (need_session)
+ (void) setpgrp();
+
+ /* Set credentials. */
+ errno = restarter_set_method_context(mcp, &errf);
+ if (errno != 0) {
+ (void) fputs("svc.startd could not set context for method: ",
+ stderr);
+
+ if (errno == -1) {
+ if (strcmp(errf, "core_set_process_path") == 0) {
+ (void) fputs("Could not set corefile path.\n",
+ stderr);
+ } else if (strcmp(errf, "setproject") == 0) {
+ (void) fprintf(stderr, "%s: a resource control "
+ "assignment failed\n", errf);
+ } else if (strcmp(errf, "pool_set_binding") == 0) {
+ (void) fprintf(stderr, "%s: a system error "
+ "occurred\n", errf);
+ } else {
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad function name \"%s\" for "
+ "error %d from "
+ "restarter_set_method_context().\n",
+ __FILE__, __LINE__, errf, errno);
+#endif
+ abort();
+ }
+
+ exit(1);
+ }
+
+ if (errf != NULL && strcmp(errf, "pool_set_binding") == 0) {
+ switch (errno) {
+ case ENOENT:
+ (void) fprintf(stderr, "%s: the pool could not "
+ "be found\n", errf);
+ break;
+
+ case EBADF:
+ (void) fprintf(stderr, "%s: the configuration "
+ "is invalid\n", errf);
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad error %d for function %s "
+ "in restarter_set_method_context().\n",
+ __FILE__, __LINE__, errno, errf);
+#endif
+ abort();
+ }
+
+ exit(SMF_EXIT_ERR_CONFIG);
+ }
+
+ if (errf != NULL) {
+ perror(errf);
+
+ switch (errno) {
+ case EINVAL:
+ case EPERM:
+ case ENOENT:
+ case ENAMETOOLONG:
+ case ERANGE:
+ case ESRCH:
+ exit(SMF_EXIT_ERR_CONFIG);
+ /* NOTREACHED */
+
+ default:
+ exit(1);
+ }
+ }
+
+ switch (errno) {
+ case ENOMEM:
+ (void) fputs("Out of memory.\n", stderr);
+ exit(1);
+ /* NOTREACHED */
+
+ case ENOENT:
+ (void) fputs("Missing passwd entry for user.\n",
+ stderr);
+ exit(SMF_EXIT_ERR_CONFIG);
+ /* NOTREACHED */
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad miscellaneous error %d from "
+ "restarter_set_method_context().\n", __FILE__,
+ __LINE__, errno);
+#endif
+ abort();
+ }
+ }
+
+ nenv = set_smf_env(mcp->env, mcp->env_sz, NULL, inst, method);
+
+ log_preexec();
+
+ (void) execle(SBIN_SH, SBIN_SH, "-c", cmd, NULL, nenv);
+
+ exit(10);
+}
+
+static void
+write_status(restarter_inst_t *inst, const char *mname, int stat)
+{
+ int r;
+
+again:
+ if (inst->ri_mi_deleted)
+ return;
+
+ r = libscf_write_method_status(inst->ri_m_inst, mname, stat);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_reget_instance(inst);
+ goto again;
+
+ case ECANCELED:
+ inst->ri_mi_deleted = 1;
+ break;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_framework(LOG_INFO, "Could not write exit status "
+ "for %s method of %s: %s.\n", mname,
+ inst->ri_i.i_fmri, strerror(r));
+ break;
+
+ case ENAMETOOLONG:
+ default:
+ bad_error("libscf_write_method_status", r);
+ }
+}
+
+/*
+ * int method_run()
+ * Execute the type method of instp. If it requires a fork(), wait for it
+ * to return and return its exit code in *exit_code. Otherwise set
+ * *exit_code to 0 if the method succeeds & -1 if it fails. If the
+ * repository connection is broken, it is rebound, but inst may not be
+ * reset.
+ * Returns
+ * 0 - success
+ * EINVAL - A correct method or method context couldn't be retrieved.
+ * EIO - Contract kill failed.
+ * EFAULT - Method couldn't be executed successfully.
+ * ELOOP - Retry threshold exceeded.
+ * ECANCELED - inst was deleted from the repository before method was run
+ * ERANGE - Timeout retry threshold exceeded.
+ * EAGAIN - Failed due to external cause, retry.
+ */
+int
+method_run(restarter_inst_t **instp, int type, int *exit_code)
+{
+ char *method;
+ int ret_status;
+ pid_t pid;
+ method_restart_t restart_on;
+ uint_t cte_mask;
+ uint8_t need_session;
+ scf_handle_t *h;
+ scf_snapshot_t *snap;
+ const char *mname;
+ const char *errstr;
+ struct method_context *mcp;
+ int result = 0, timeout_fired = 0;
+ int sig, r;
+ boolean_t transient;
+ uint64_t timeout;
+ uint8_t timeout_retry;
+ ctid_t ctid;
+ int ctfd = -1;
+ ct_evthdl_t ctev;
+ uint_t evtype;
+ restarter_inst_t *inst = *instp;
+ int id = inst->ri_id;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ assert(instance_in_transition(inst));
+
+ if (inst->ri_mi_deleted)
+ return (ECANCELED);
+
+ *exit_code = 0;
+
+ assert(0 <= type && type <= 2);
+ mname = method_names[type];
+
+ if (type == METHOD_START)
+ inst->ri_pre_online_hook();
+
+ h = scf_instance_handle(inst->ri_m_inst);
+
+ snap = scf_snapshot_create(h);
+ if (snap == NULL ||
+ scf_instance_get_snapshot(inst->ri_m_inst, "running", snap) != 0) {
+ log_framework(LOG_DEBUG,
+ "Could not get running snapshot for %s. "
+ "Using editing version to run method %s.\n",
+ inst->ri_i.i_fmri, mname);
+ scf_snapshot_destroy(snap);
+ snap = NULL;
+ }
+
+ /*
+ * After this point, we may be logging to the instance log.
+ * Make sure we've noted where that log is as a property of
+ * the instance.
+ */
+ r = libscf_note_method_log(inst->ri_m_inst, st->st_log_prefix,
+ inst->ri_logstem);
+ if (r != 0) {
+ log_framework(LOG_WARNING,
+ "%s: couldn't note log location: %s\n",
+ inst->ri_i.i_fmri, strerror(r));
+ }
+
+ if ((method = libscf_get_method(h, type, inst, snap, &restart_on,
+ &cte_mask, &need_session, &timeout, &timeout_retry)) == NULL) {
+ if (errno == LIBSCF_PGROUP_ABSENT) {
+ log_framework(LOG_DEBUG,
+ "%s: instance has no method property group '%s'.\n",
+ inst->ri_i.i_fmri, mname);
+ if (type == METHOD_REFRESH)
+ log_instance(inst, B_TRUE, "No '%s' method "
+ "defined. Treating as :true.", mname);
+ else
+ log_instance(inst, B_TRUE, "Method property "
+ "group '%s' is not present.", mname);
+ scf_snapshot_destroy(snap);
+ return (0);
+ } else if (errno == LIBSCF_PROPERTY_ABSENT) {
+ log_framework(LOG_DEBUG,
+ "%s: instance has no '%s/exec' method property.\n",
+ inst->ri_i.i_fmri, mname);
+ log_instance(inst, B_TRUE, "Method property '%s/exec "
+ "is not present.", mname);
+ scf_snapshot_destroy(snap);
+ return (0);
+ } else {
+ log_error(LOG_WARNING,
+ "%s: instance libscf_get_method failed\n",
+ inst->ri_i.i_fmri);
+ scf_snapshot_destroy(snap);
+ return (EINVAL);
+ }
+ }
+
+ /* open service contract if stopping a non-transient service */
+ if (type == METHOD_STOP && (!instance_is_transient_style(inst))) {
+ if (inst->ri_i.i_primary_ctid == 0) {
+ /* service is not running, nothing to stop */
+ log_framework(LOG_DEBUG, "%s: instance has no primary "
+ "contract, no service to stop.\n",
+ inst->ri_i.i_fmri);
+ scf_snapshot_destroy(snap);
+ return (0);
+ }
+ if ((ctfd = contract_open(inst->ri_i.i_primary_ctid, "process",
+ "events", O_RDONLY)) < 0) {
+ result = EFAULT;
+ log_instance(inst, B_TRUE, "Could not open service "
+ "contract %ld. Stop method not run.\n",
+ inst->ri_i.i_primary_ctid);
+ goto out;
+ }
+ }
+
+ if (restarter_is_null_method(method)) {
+ log_framework(LOG_DEBUG, "%s: null method succeeds\n",
+ inst->ri_i.i_fmri);
+
+ log_instance(inst, B_TRUE, "Executing %s method (null)", mname);
+
+ if (type == METHOD_START)
+ write_status(inst, mname, 0);
+ goto out;
+ }
+
+ sig = restarter_is_kill_method(method);
+ if (sig >= 0) {
+
+ if (inst->ri_i.i_primary_ctid == 0) {
+ log_error(LOG_ERR, "%s: :kill with no contract\n",
+ inst->ri_i.i_fmri);
+ result = EINVAL;
+ goto out;
+ }
+
+ log_framework(LOG_DEBUG,
+ "%s: :killing contract with signal %d\n",
+ inst->ri_i.i_fmri, sig);
+
+ log_instance(inst, B_TRUE, "Executing %s method (:kill)",
+ mname);
+
+ if (contract_kill(inst->ri_i.i_primary_ctid, sig,
+ inst->ri_i.i_fmri) != 0) {
+ result = EIO;
+ goto out;
+ } else
+ goto assured_kill;
+ }
+
+ log_framework(LOG_DEBUG, "%s: forking to run method %s\n",
+ inst->ri_i.i_fmri, method);
+
+ errstr = restarter_get_method_context(RESTARTER_METHOD_CONTEXT_VERSION,
+ inst->ri_m_inst, snap, mname, method, &mcp);
+
+ if (errstr != NULL) {
+ log_error(LOG_WARNING, "%s: %s\n", inst->ri_i.i_fmri, errstr);
+ result = EINVAL;
+ goto out;
+ }
+
+ r = method_ready_contract(inst, type, restart_on, cte_mask);
+ if (r != 0) {
+ assert(r == ECANCELED);
+ assert(inst->ri_mi_deleted);
+ restarter_free_method_context(mcp);
+ result = ECANCELED;
+ goto out;
+ }
+
+ /*
+ * Validate safety of method contexts, to save children work.
+ */
+ if (!restarter_rm_libs_loadable())
+ log_framework(LOG_DEBUG, "%s: method contexts limited "
+ "to root-accessible libraries\n", inst->ri_i.i_fmri);
+
+ /*
+ * If the service is restarting too quickly, send it to
+ * maintenance.
+ */
+ if (type == METHOD_START) {
+ method_record_start(inst);
+ if (method_rate_critical(inst)) {
+ log_instance(inst, B_TRUE, "Restarting too quickly, "
+ "changing state to maintenance");
+ result = ELOOP;
+ goto out;
+ }
+ }
+
+ pid = startd_fork1(NULL);
+ if (pid == 0)
+ exec_method(inst, type, method, mcp, need_session);
+
+ if (pid == -1) {
+ log_error(LOG_WARNING,
+ "%s: Couldn't fork to execute method %s\n",
+ inst->ri_i.i_fmri, method);
+ result = EFAULT;
+ goto out;
+ }
+
+ restarter_free_method_context(mcp);
+
+ /*
+ * Get the contract id, decide whether it is primary or transient, and
+ * stash it in inst & the repository.
+ */
+ method_store_contract(inst, type, &ctid);
+
+ /*
+ * Similarly for the start method PID.
+ */
+ if (type == METHOD_START && !inst->ri_mi_deleted)
+ (void) libscf_write_start_pid(inst->ri_m_inst, pid);
+
+ if (instance_is_wait_style(inst) && type == METHOD_START) {
+ /* Wait style instances don't get timeouts on start methods. */
+ if (wait_register(pid, inst->ri_i.i_fmri, 1, 0)) {
+ log_error(LOG_WARNING,
+ "%s: couldn't register %ld for wait\n",
+ inst->ri_i.i_fmri, pid);
+ result = EFAULT;
+ goto contract_out;
+ }
+ write_status(inst, mname, 0);
+
+ } else {
+ int r, err;
+ time_t start_time;
+ time_t end_time;
+
+ /*
+ * Because on upgrade/live-upgrade we may have no chance
+ * to override faulty timeout values on the way to
+ * manifest import, all services on the path to manifest
+ * import are treated the same as INFINITE timeout services.
+ */
+
+ start_time = time(NULL);
+ if (timeout != METHOD_TIMEOUT_INFINITE && !is_timeout_ovr(inst))
+ timeout_insert(inst, ctid, timeout);
+ else
+ timeout = METHOD_TIMEOUT_INFINITE;
+
+ /* Unlock the instance while waiting for the method. */
+ MUTEX_UNLOCK(&inst->ri_lock);
+
+ do
+ r = waitpid(pid, &ret_status, NULL);
+ while (r == -1 && errno == EINTR);
+ if (r == -1)
+ err = errno;
+
+ /* Re-grab the lock. */
+ inst = inst_lookup_by_id(id);
+
+ /*
+ * inst can't be removed, as the removal thread waits
+ * for completion of this one.
+ */
+ assert(inst != NULL);
+ *instp = inst;
+
+ if (inst->ri_timeout != NULL && inst->ri_timeout->te_fired)
+ timeout_fired = 1;
+
+ timeout_remove(inst, ctid);
+
+ log_framework(LOG_DEBUG,
+ "%s method for %s exited with status %d.\n", mname,
+ inst->ri_i.i_fmri, WEXITSTATUS(ret_status));
+
+ if (r == -1) {
+ log_error(LOG_WARNING,
+ "Couldn't waitpid() for %s method of %s (%s).\n",
+ mname, inst->ri_i.i_fmri, strerror(err));
+ result = EFAULT;
+ goto contract_out;
+ }
+
+ if (type == METHOD_START)
+ write_status(inst, mname, ret_status);
+
+ /* return ERANGE if this service doesn't retry on timeout */
+ if (timeout_fired == 1 && timeout_retry == 0) {
+ result = ERANGE;
+ goto contract_out;
+ }
+
+ if (!WIFEXITED(ret_status)) {
+ /*
+ * If method didn't exit itself (it was killed by an
+ * external entity, etc.), consider the entire
+ * method_run as failed.
+ */
+ if (WIFSIGNALED(ret_status)) {
+ char buf[SIG2STR_MAX];
+ (void) sig2str(WTERMSIG(ret_status), buf);
+
+ log_error(LOG_WARNING, "%s: Method \"%s\" "
+ "failed due to signal %s.\n",
+ inst->ri_i.i_fmri, method, buf);
+ log_instance(inst, B_TRUE, "Method \"%s\" "
+ "failed due to signal %s", mname, buf);
+ } else {
+ log_error(LOG_WARNING, "%s: Method \"%s\" "
+ "failed with exit status %d.\n",
+ inst->ri_i.i_fmri, method,
+ WEXITSTATUS(ret_status));
+ log_instance(inst, B_TRUE, "Method \"%s\" "
+ "failed with exit status %d", mname,
+ WEXITSTATUS(ret_status));
+ }
+ result = EAGAIN;
+ goto contract_out;
+ }
+
+ *exit_code = WEXITSTATUS(ret_status);
+ if (*exit_code != 0) {
+ log_error(LOG_WARNING,
+ "%s: Method \"%s\" failed with exit status %d.\n",
+ inst->ri_i.i_fmri, method, WEXITSTATUS(ret_status));
+ }
+
+ log_instance(inst, B_TRUE, "Method \"%s\" exited with status "
+ "%d", mname, *exit_code);
+
+ if (*exit_code != 0)
+ goto contract_out;
+
+ end_time = time(NULL);
+
+ /* Give service contract remaining seconds to empty */
+ if (timeout != METHOD_TIMEOUT_INFINITE)
+ timeout -= (end_time - start_time);
+ }
+
+assured_kill:
+ /*
+ * For stop methods, assure that the service contract has emptied
+ * before returning.
+ */
+ if (type == METHOD_STOP && (!instance_is_transient_style(inst)) &&
+ !(contract_is_empty(inst->ri_i.i_primary_ctid))) {
+
+ if (timeout != METHOD_TIMEOUT_INFINITE)
+ timeout_insert(inst, inst->ri_i.i_primary_ctid,
+ timeout);
+
+ for (;;) {
+ do {
+ r = ct_event_read_critical(ctfd, &ctev);
+ } while (r == EINTR);
+ if (r != 0)
+ break;
+
+ evtype = ct_event_get_type(ctev);
+ ct_event_free(ctev);
+ if (evtype == CT_PR_EV_EMPTY)
+ break;
+ }
+ if (r) {
+ result = EFAULT;
+ log_instance(inst, B_TRUE, "Error reading service "
+ "contract %ld.\n", inst->ri_i.i_primary_ctid);
+ }
+
+ if (timeout != METHOD_TIMEOUT_INFINITE)
+ if (inst->ri_timeout->te_fired)
+ result = EFAULT;
+
+ timeout_remove(inst, inst->ri_i.i_primary_ctid);
+ }
+
+contract_out:
+ /* Abandon contracts for transient methods & methods that fail. */
+ transient = method_is_transient(inst, type);
+ if ((transient || *exit_code != 0 || result != 0) &&
+ (restarter_is_kill_method(method) < 0))
+ method_remove_contract(inst, !transient, B_TRUE);
+
+out:
+ if (ctfd >= 0)
+ (void) close(ctfd);
+ scf_snapshot_destroy(snap);
+ free(method);
+ return (result);
+}
+
+/*
+ * The method thread executes a service method to effect a state transition.
+ * The next_state of info->sf_id should be non-_NONE on entrance, and it will
+ * be _NONE on exit (state will either be what next_state was (on success), or
+ * it will be _MAINT (on error)).
+ *
+ * There are six classes of methods to consider: start & other (stop, refresh)
+ * for each of "normal" services, wait services, and transient services. For
+ * each, the method must be fetched from the repository & executed. fork()ed
+ * methods must be waited on, except for the start method of wait services
+ * (which must be registered with the wait subsystem via wait_register()). If
+ * the method succeeded (returned 0), then for start methods its contract
+ * should be recorded as the primary contract for the service. For other
+ * methods, it should be abandoned. If the method fails, then depending on
+ * the failure, either the method should be reexecuted or the service should
+ * be put into maintenance. Either way the contract should be abandoned.
+ */
+void *
+method_thread(void *arg)
+{
+ fork_info_t *info = arg;
+ restarter_inst_t *inst;
+ scf_handle_t *local_handle;
+ scf_instance_t *s_inst = NULL;
+ int r, exit_code;
+ boolean_t retryable;
+ const char *aux;
+
+ assert(0 <= info->sf_method_type && info->sf_method_type <= 2);
+
+ /* Get (and lock) the restarter_inst_t. */
+ inst = inst_lookup_by_id(info->sf_id);
+
+ assert(inst->ri_method_thread != 0);
+ assert(instance_in_transition(inst) == 1);
+
+ /*
+ * We cannot leave this function with inst in transition, because
+ * protocol.c withholds messages for inst otherwise.
+ */
+
+ log_framework(LOG_DEBUG, "method_thread() running %s method for %s.\n",
+ method_names[info->sf_method_type], inst->ri_i.i_fmri);
+
+ local_handle = libscf_handle_create_bound_loop();
+
+rebind_retry:
+ /* get scf_instance_t */
+ switch (r = libscf_fmri_get_instance(local_handle, inst->ri_i.i_fmri,
+ &s_inst)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(local_handle);
+ goto rebind_retry;
+
+ case ENOENT:
+ /*
+ * It's not there, but we need to call this so protocol.c
+ * doesn't think it's in transition anymore.
+ */
+ (void) restarter_instance_update_states(local_handle, inst,
+ inst->ri_i.i_state, RESTARTER_STATE_NONE, RERR_NONE,
+ NULL);
+ goto out;
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_fmri_get_instance", r);
+ }
+
+ inst->ri_m_inst = s_inst;
+ inst->ri_mi_deleted = B_FALSE;
+
+retry:
+ if (info->sf_method_type == METHOD_START)
+ log_transition(inst, START_REQUESTED);
+
+ r = method_run(&inst, info->sf_method_type, &exit_code);
+
+ if (r == 0 && exit_code == 0) {
+ /* Success! */
+ assert(inst->ri_i.i_next_state != RESTARTER_STATE_NONE);
+
+ /*
+ * When a stop method succeeds, remove the primary contract of
+ * the service, unless we're going to offline, in which case
+ * retain the contract so we can transfer inherited contracts to
+ * the replacement service.
+ */
+
+ if (info->sf_method_type == METHOD_STOP &&
+ inst->ri_i.i_primary_ctid != 0) {
+ if (inst->ri_i.i_next_state == RESTARTER_STATE_OFFLINE)
+ inst->ri_i.i_primary_ctid_stopped = 1;
+ else
+ method_remove_contract(inst, B_TRUE, B_TRUE);
+ }
+ /*
+ * We don't care whether the handle was rebound because this is
+ * the last thing we do with it.
+ */
+ (void) restarter_instance_update_states(local_handle, inst,
+ inst->ri_i.i_next_state, RESTARTER_STATE_NONE,
+ info->sf_event_type, NULL);
+
+ (void) update_fault_count(inst, FAULT_COUNT_RESET);
+
+ goto out;
+ }
+
+ /* Failure. Retry or go to maintenance. */
+
+ if (r != 0 && r != EAGAIN) {
+ retryable = B_FALSE;
+ } else {
+ switch (exit_code) {
+ case SMF_EXIT_ERR_CONFIG:
+ case SMF_EXIT_ERR_NOSMF:
+ case SMF_EXIT_ERR_PERM:
+ case SMF_EXIT_ERR_FATAL:
+ retryable = B_FALSE;
+ break;
+
+ default:
+ retryable = B_TRUE;
+ }
+ }
+
+ if (retryable && update_fault_count(inst, FAULT_COUNT_INCR) != 1)
+ goto retry;
+
+ /* maintenance */
+ if (r == ELOOP)
+ log_transition(inst, START_FAILED_REPEATEDLY);
+ else if (r == ERANGE)
+ log_transition(inst, START_FAILED_TIMEOUT_FATAL);
+ else if (exit_code == SMF_EXIT_ERR_CONFIG)
+ log_transition(inst, START_FAILED_CONFIGURATION);
+ else if (exit_code == SMF_EXIT_ERR_FATAL)
+ log_transition(inst, START_FAILED_FATAL);
+ else
+ log_transition(inst, START_FAILED_OTHER);
+
+ if (r == ELOOP)
+ aux = "restarting_too_quickly";
+ else if (retryable)
+ aux = "fault_threshold_reached";
+ else
+ aux = "method_failed";
+
+ (void) restarter_instance_update_states(local_handle, inst,
+ RESTARTER_STATE_MAINT, RESTARTER_STATE_NONE, RERR_FAULT,
+ (char *)aux);
+
+ if (!method_is_transient(inst, info->sf_method_type) &&
+ inst->ri_i.i_primary_ctid != 0)
+ method_remove_contract(inst, B_TRUE, B_TRUE);
+
+out:
+ inst->ri_method_thread = 0;
+ MUTEX_UNLOCK(&inst->ri_lock);
+ (void) pthread_cond_broadcast(&inst->ri_method_cv);
+
+ scf_instance_destroy(s_inst);
+ scf_handle_destroy(local_handle);
+ startd_free(info, sizeof (fork_info_t));
+ return (NULL);
+}
diff --git a/usr/src/cmd/svc/startd/misc.c b/usr/src/cmd/svc/startd/misc.c
new file mode 100644
index 0000000000..68981951cc
--- /dev/null
+++ b/usr/src/cmd/svc/startd/misc.c
@@ -0,0 +1,170 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * misc.c - miscellaneous and utility functions
+ */
+
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/types.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <syslog.h>
+#include <unistd.h>
+
+#include "startd.h"
+
+void
+startd_close(int fd)
+{
+ if (close(fd) == 0)
+ return;
+
+ log_error(LOG_WARNING, "close(%d) failed: %s\n", fd, strerror(errno));
+ abort();
+}
+
+void
+startd_fclose(FILE *fp)
+{
+ if (fclose(fp) == 0)
+ return;
+
+ log_error(LOG_WARNING, "fclose() failed\n");
+ abort();
+}
+
+/*
+ * Canonify fmri. On success, sets *retp to a string which should be freed
+ * with startd_free( , max_scf_fmri_size) and returns 0. On failure returns
+ * EINVAL.
+ *
+ * If 'isinstance' is non-zero, then return EINVAL if the FMRI specificies
+ * anything other than an instance.
+ */
+int
+fmri_canonify(const char *fmri, char **retp, boolean_t isinstance)
+{
+ char *cf;
+
+ cf = startd_alloc(max_scf_fmri_size);
+
+ if (isinstance) {
+ const char *instance, *pg;
+
+ /*
+ * Verify that this fmri specifies an instance, using
+ * scf_parse_svc_fmri().
+ */
+ if (strlcpy(cf, fmri, max_scf_fmri_size) >= max_scf_fmri_size ||
+ scf_parse_svc_fmri(cf, NULL, NULL, &instance, &pg,
+ NULL) != 0) {
+ startd_free(cf, max_scf_fmri_size);
+ return (EINVAL);
+ }
+
+ if (instance == NULL || pg != NULL) {
+ startd_free(cf, max_scf_fmri_size);
+ return (EINVAL);
+ }
+ }
+
+ if (scf_canonify_fmri(fmri, cf, max_scf_fmri_size) < 0) {
+ startd_free(cf, max_scf_fmri_size);
+ return (EINVAL);
+ }
+
+ *retp = cf;
+ return (0);
+}
+
+/*
+ * int fs_is_read_only(char *, ulong_t *)
+ * Returns 1 if the given path is that of a filesystem with the ST_RDONLY flag
+ * set. 0 if ST_RDONLY is unset. -1 if the statvfs(2) call failed. If the
+ * second parameter is non-NULL, the fsid for the requested filesystem is
+ * written to the given address on success.
+ */
+int
+fs_is_read_only(char *path, ulong_t *fsidp)
+{
+ int err;
+ struct statvfs sfb;
+
+ do {
+ err = statvfs(path, &sfb);
+ } while (err == -1 && errno == EINTR);
+
+ if (err)
+ return (-1);
+
+ if (fsidp != NULL)
+ *fsidp = sfb.f_fsid;
+
+ if (sfb.f_flag & ST_RDONLY)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * int fs_remount(char *)
+ * Attempt to remount the given filesystem read-write, so that we can unlock
+ * the repository (or handle other similar failures).
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+int
+fs_remount(char *path)
+{
+ if (fork_mount(path, "remount,rw"))
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * void xstr_sanitize(char *s)
+ * In-place transform any non-alphanumeric characters (or '_') to '_'
+ * characters.
+ */
+void
+xstr_sanitize(char *s)
+{
+ for (; *s != '\0'; s++)
+ if (!isalnum(*s) && *s != '_')
+ *s = '_';
+}
diff --git a/usr/src/cmd/svc/startd/proc.c b/usr/src/cmd/svc/startd/proc.c
new file mode 100644
index 0000000000..405ac9ae60
--- /dev/null
+++ b/usr/src/cmd/svc/startd/proc.c
@@ -0,0 +1,60 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This is in a separate file because procfs.h cannot be included if
+ * _FILE_OFFSET_BITES=64 is defined.
+ */
+
+#include <procfs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+ctid_t
+proc_get_ctid()
+{
+ id_t ctid;
+ int fd;
+ ssize_t bytes;
+ psinfo_t psinfo;
+
+ fd = open("/proc/self/psinfo", O_RDONLY);
+ if (fd < 0)
+ return (-1);
+
+ bytes = read(fd, &psinfo, sizeof (psinfo));
+ if (bytes == sizeof (psinfo))
+ ctid = psinfo.pr_contract;
+ else
+ ctid = -1;
+
+ (void) close(fd);
+ return (ctid);
+}
diff --git a/usr/src/cmd/svc/startd/protocol.c b/usr/src/cmd/svc/startd/protocol.c
new file mode 100644
index 0000000000..b75f3ad266
--- /dev/null
+++ b/usr/src/cmd/svc/startd/protocol.c
@@ -0,0 +1,420 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * protocol.c - protocols between graph engine and restarters
+ *
+ * The graph engine uses restarter_protocol_send_event() to send a
+ * restarter_event_type_t to the restarter. For delegated restarters,
+ * this is published on the GPEC queue for the restarter, which can
+ * then be consumed by the librestart interfaces. For services managed
+ * by svc.startd, the event is stored on the local restarter_queue list,
+ * where it can be dequeued by the restarter.
+ *
+ * The svc.startd restarter uses graph_protocol_send_event() to send
+ * a graph_event_type_t to the graph engine when an instance's states are
+ * updated.
+ *
+ * The graph engine uses restarter_protocol_init_delegate() to
+ * register its interest in a particular delegated restarter's instance
+ * state events. The state_cb() registered on the event channel then
+ * invokes graph_protocol_send_event() to communicate the update to
+ * the graph engine.
+ */
+
+#include <assert.h>
+#include <libintl.h>
+#include <libsysevent.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <strings.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <libuutil.h>
+
+#include <librestart.h>
+#include <librestart_priv.h>
+
+#include "protocol.h"
+#include "startd.h"
+
+/* Local event queue structures. */
+typedef struct graph_protocol_event_queue {
+ uu_list_t *gpeq_event_list;
+ pthread_mutex_t gpeq_lock;
+} graph_protocol_event_queue_t;
+
+typedef struct restarter_protocol_event_queue {
+ uu_list_t *rpeq_event_list;
+ pthread_mutex_t rpeq_lock;
+} restarter_protocol_event_queue_t;
+
+static uu_list_pool_t *restarter_protocol_event_queue_pool;
+static restarter_protocol_event_queue_t *restarter_queue;
+
+static uu_list_pool_t *graph_protocol_event_queue_pool;
+static graph_protocol_event_queue_t *graph_queue;
+
+void
+graph_protocol_init()
+{
+ graph_protocol_event_queue_pool = startd_list_pool_create(
+ "graph_protocol_events", sizeof (graph_protocol_event_t),
+ offsetof(graph_protocol_event_t, gpe_link), NULL,
+ UU_LIST_POOL_DEBUG);
+
+ graph_queue = startd_zalloc(sizeof (graph_protocol_event_queue_t));
+
+ (void) pthread_mutex_init(&graph_queue->gpeq_lock, &mutex_attrs);
+ graph_queue->gpeq_event_list = startd_list_create(
+ graph_protocol_event_queue_pool, graph_queue, NULL);
+}
+
+/*
+ * "data" will be freed by the consumer
+ */
+static void
+graph_event_enqueue(const char *inst, graph_event_type_t event,
+ protocol_states_t *data)
+{
+ graph_protocol_event_t *e;
+
+ e = startd_zalloc(sizeof (graph_protocol_event_t));
+
+ if (inst != NULL) {
+ int size = strlen(inst) + 1;
+ e->gpe_inst = startd_alloc(size);
+ e->gpe_inst_sz = size;
+ (void) strlcpy(e->gpe_inst, inst, size);
+ }
+ e->gpe_type = event;
+ e->gpe_data = data;
+
+ (void) pthread_mutex_init(&e->gpe_lock, &mutex_attrs);
+
+ MUTEX_LOCK(&graph_queue->gpeq_lock);
+ uu_list_node_init(e, &e->gpe_link, graph_protocol_event_queue_pool);
+ if (uu_list_insert_before(graph_queue->gpeq_event_list, NULL, e) == -1)
+ uu_die("failed to enqueue graph event (%s: %s)\n",
+ e->gpe_inst, uu_strerror(uu_error()));
+
+ MUTEX_UNLOCK(&graph_queue->gpeq_lock);
+}
+
+void
+graph_event_release(graph_protocol_event_t *e)
+{
+ uu_list_node_fini(e, &e->gpe_link, graph_protocol_event_queue_pool);
+ (void) pthread_mutex_destroy(&e->gpe_lock);
+ if (e->gpe_inst != NULL)
+ startd_free(e->gpe_inst, e->gpe_inst_sz);
+ startd_free(e, sizeof (graph_protocol_event_t));
+}
+
+/*
+ * graph_protocol_event_t *graph_event_dequeue()
+ * The caller must hold gu_lock, and is expected to be a single thread.
+ * It is allowed to utilize graph_event_requeue() and abort processing
+ * on the event. If graph_event_requeue() is not called, the caller is
+ * expected to call graph_event_release() when finished.
+ */
+graph_protocol_event_t *
+graph_event_dequeue()
+{
+ graph_protocol_event_t *e;
+
+ MUTEX_LOCK(&graph_queue->gpeq_lock);
+
+ e = uu_list_first(graph_queue->gpeq_event_list);
+ if (e == NULL) {
+ MUTEX_UNLOCK(&graph_queue->gpeq_lock);
+ return (NULL);
+ }
+
+ if (uu_list_next(graph_queue->gpeq_event_list, e) != NULL)
+ gu->gu_wakeup = 1;
+ uu_list_remove(graph_queue->gpeq_event_list, e);
+ MUTEX_UNLOCK(&graph_queue->gpeq_lock);
+
+ return (e);
+}
+
+/*
+ * void graph_event_requeue()
+ * Requeue the event back at the head of the queue.
+ */
+void
+graph_event_requeue(graph_protocol_event_t *e)
+{
+ assert(e != NULL);
+
+ log_framework(LOG_DEBUG, "Requeing event\n");
+
+ MUTEX_LOCK(&graph_queue->gpeq_lock);
+ if (uu_list_insert_after(graph_queue->gpeq_event_list, NULL, e) == -1)
+ uu_die("failed to requeue graph event (%s: %s)\n",
+ e->gpe_inst, uu_strerror(uu_error()));
+
+ MUTEX_UNLOCK(&graph_queue->gpeq_lock);
+}
+
+void
+graph_protocol_send_event(const char *inst, graph_event_type_t event,
+ protocol_states_t *data)
+{
+ graph_event_enqueue(inst, event, data);
+ MUTEX_LOCK(&gu->gu_lock);
+ gu->gu_wakeup = 1;
+ (void) pthread_cond_broadcast(&gu->gu_cv);
+ MUTEX_UNLOCK(&gu->gu_lock);
+}
+
+void
+restarter_protocol_init()
+{
+ restarter_protocol_event_queue_pool = startd_list_pool_create(
+ "restarter_protocol_events", sizeof (restarter_protocol_event_t),
+ offsetof(restarter_protocol_event_t, rpe_link), NULL,
+ UU_LIST_POOL_DEBUG);
+
+ restarter_queue = startd_zalloc(
+ sizeof (restarter_protocol_event_queue_t));
+
+ (void) pthread_mutex_init(&restarter_queue->rpeq_lock, &mutex_attrs);
+ restarter_queue->rpeq_event_list = startd_list_create(
+ restarter_protocol_event_queue_pool, restarter_queue, NULL);
+
+ log_framework(LOG_DEBUG, "Initialized restarter protocol\n");
+}
+
+/*
+ * void restarter_event_enqueue()
+ * Enqueue a restarter event.
+ */
+static void
+restarter_event_enqueue(const char *inst, restarter_event_type_t event)
+{
+ restarter_protocol_event_t *e;
+ int r;
+
+ /* Allocate and populate the event structure. */
+ e = startd_zalloc(sizeof (restarter_protocol_event_t));
+
+ e->rpe_inst = startd_alloc(strlen(inst) + 1);
+ (void) strlcpy(e->rpe_inst, inst, strlen(inst)+1);
+ e->rpe_type = event;
+
+ MUTEX_LOCK(&restarter_queue->rpeq_lock);
+ uu_list_node_init(e, &e->rpe_link, restarter_protocol_event_queue_pool);
+ r = uu_list_insert_before(restarter_queue->rpeq_event_list, NULL, e);
+ assert(r == 0);
+
+ MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
+
+}
+
+void
+restarter_event_release(restarter_protocol_event_t *e)
+{
+ uu_list_node_fini(e, &e->rpe_link, restarter_protocol_event_queue_pool);
+ startd_free(e->rpe_inst, strlen(e->rpe_inst) + 1);
+ startd_free(e, sizeof (restarter_protocol_event_t));
+}
+
+/*
+ * restarter_protocol_event_t *restarter_event_dequeue()
+ * Dequeue a restarter protocol event. The caller is expected to be
+ * a single thread. It is allowed to utilize restarter_event_requeue()
+ * and abort processing on the event. The caller is expected to call
+ * restarter_event_release() when finished.
+ */
+restarter_protocol_event_t *
+restarter_event_dequeue()
+{
+ restarter_protocol_event_t *e = NULL;
+
+ MUTEX_LOCK(&restarter_queue->rpeq_lock);
+
+ e = uu_list_first(restarter_queue->rpeq_event_list);
+ if (e == NULL) {
+ MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
+ return (NULL);
+ }
+
+ if (uu_list_next(restarter_queue->rpeq_event_list, e) != NULL)
+ ru->restarter_update_wakeup = 1;
+ uu_list_remove(restarter_queue->rpeq_event_list, e);
+ MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
+
+ return (e);
+}
+
+static int
+state_cb(sysevent_t *syse, void *cookie)
+{
+ char *fmri = (char *)cookie;
+ char *instance_name;
+ nvlist_t *attr_list = NULL;
+ int state, next_state;
+ protocol_states_t *states;
+ int err;
+
+ /*
+ * Might fail due to a bad event or a lack of memory. Try
+ * the callback again to see if it goes better the next time.
+ */
+ if (sysevent_get_attr_list(syse, &attr_list) != 0)
+ return (EAGAIN);
+
+ if ((nvlist_lookup_int32(attr_list, RESTARTER_NAME_STATE,
+ &state) != 0) ||
+ (nvlist_lookup_int32(attr_list, RESTARTER_NAME_NEXT_STATE,
+ &next_state) != 0) ||
+ (nvlist_lookup_int32(attr_list, RESTARTER_NAME_ERROR, &err) != 0) ||
+ (nvlist_lookup_string(attr_list, RESTARTER_NAME_INSTANCE,
+ &instance_name) != 0))
+ uu_die("%s: can't decode nvlist\n", fmri);
+
+ states = startd_alloc(sizeof (protocol_states_t));
+ states->ps_state = state;
+ states->ps_state_next = next_state;
+ states->ps_err = err;
+
+ graph_protocol_send_event(instance_name, GRAPH_UPDATE_STATE_CHANGE,
+ states);
+
+ log_framework(LOG_DEBUG, "%s: state updates for %s (%d, %d)\n", fmri,
+ instance_name, state, next_state);
+ nvlist_free(attr_list);
+ return (0);
+}
+
+evchan_t *
+restarter_protocol_init_delegate(char *fmri)
+{
+ char *delegate_channel_name, *master_channel_name, *sid;
+ evchan_t *delegate_channel, *master_channel;
+
+ /* master restarter -- nothing to do */
+ if (strcmp(fmri, SCF_SERVICE_STARTD) == 0)
+ return (NULL);
+
+ log_framework(LOG_DEBUG, "%s: Intializing protocol for delegate\n",
+ fmri);
+
+ if ((delegate_channel_name = _restarter_get_channel_name(fmri,
+ RESTARTER_CHANNEL_DELEGATE)) == NULL ||
+ (master_channel_name = _restarter_get_channel_name(fmri,
+ RESTARTER_CHANNEL_MASTER)) == NULL ||
+ (sid = strdup("svc.startd")) == NULL)
+ uu_die("Allocation failure\n");
+
+ if (sysevent_evc_bind(delegate_channel_name, &delegate_channel,
+ EVCH_CREAT|EVCH_HOLD_PEND) != 0)
+ uu_die("%s: sysevent_evc_bind failed: %s\n",
+ delegate_channel_name, strerror(errno));
+ if (sysevent_evc_bind(master_channel_name, &master_channel,
+ EVCH_CREAT|EVCH_HOLD_PEND) != 0)
+ uu_die("%s: sysevent_evc_bind failed: %s\n",
+ master_channel_name, strerror(errno));
+ log_framework(LOG_DEBUG,
+ "%s: Bound to channel %s (delegate), %s (master)\n", fmri,
+ delegate_channel_name, master_channel_name);
+
+ if (sysevent_evc_subscribe(master_channel, sid, EC_ALL,
+ state_cb, fmri, EVCH_SUB_KEEP) != 0)
+ uu_die("%s: Failed to subscribe to channel %s with "
+ "subscriber id %s: %s\n", fmri,
+ master_channel_name, sid, strerror(errno));
+ log_framework(LOG_DEBUG,
+ "%s: Subscribed to channel %s with subscriber id %s\n", fmri,
+ master_channel_name, "svc.startd");
+
+ free(delegate_channel_name);
+ free(master_channel_name);
+ free(sid);
+
+ return (delegate_channel);
+}
+
+void
+restarter_protocol_send_event(const char *inst, evchan_t *chan,
+ restarter_event_type_t event)
+{
+ nvlist_t *attr;
+
+ /*
+ * If the service is managed by the master restarter,
+ * queue the event locally.
+ */
+ if (chan == NULL) {
+ restarter_event_enqueue(inst, event);
+ MUTEX_LOCK(&ru->restarter_update_lock);
+ ru->restarter_update_wakeup = 1;
+ (void) pthread_cond_broadcast(&ru->restarter_update_cv);
+ MUTEX_UNLOCK(&ru->restarter_update_lock);
+ return;
+ }
+
+ /*
+ * Otherwise, send the event to the delegate.
+ */
+ log_framework(LOG_DEBUG, "Sending %s to channel 0x%p for %s.\n",
+ event_names[event], chan, inst);
+ if (nvlist_alloc(&attr, NV_UNIQUE_NAME, 0) != 0 ||
+ nvlist_add_uint32(attr, RESTARTER_NAME_TYPE, event) != 0 ||
+ nvlist_add_string(attr, RESTARTER_NAME_INSTANCE, (char *)inst) != 0)
+ uu_die("Allocation failure\n");
+
+ if (sysevent_evc_publish(chan, "protocol", "restarter", "com.sun",
+ "svc.startd", attr, EVCH_NOSLEEP) != 0) {
+ if (errno == EAGAIN)
+ uu_die("%s: queue is full\n", inst);
+ uu_die("%s: can't publish event: %s\n", inst, strerror(errno));
+ }
+ nvlist_free(attr);
+
+ if (event != RESTARTER_EVENT_TYPE_ADD_INSTANCE) {
+ /*
+ * Not relevant for graph loading.
+ */
+ return;
+ }
+
+ /*
+ * For the purposes of loading state after interruption, this is
+ * sufficient, as svc.startd(1M) won't receive events on the contracts
+ * associated with each delegate.
+ */
+ MUTEX_LOCK(&st->st_load_lock);
+ if (--st->st_load_instances == 0)
+ (void) pthread_cond_broadcast(&st->st_load_cv);
+ MUTEX_UNLOCK(&st->st_load_lock);
+
+}
diff --git a/usr/src/cmd/svc/startd/protocol.h b/usr/src/cmd/svc/startd/protocol.h
new file mode 100644
index 0000000000..f65aa57e72
--- /dev/null
+++ b/usr/src/cmd/svc/startd/protocol.h
@@ -0,0 +1,106 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <startd.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ GRAPH_UPDATE_RELOAD_GRAPH,
+ GRAPH_UPDATE_ADD_INSTANCE,
+ GRAPH_UPDATE_STATE_CHANGE
+} graph_event_type_t;
+
+typedef struct protocol_states {
+ restarter_instance_state_t ps_state;
+ restarter_instance_state_t ps_state_next;
+ restarter_error_t ps_err;
+} protocol_states_t;
+
+
+typedef struct graph_protocol_event {
+ char *gpe_inst;
+ size_t gpe_inst_sz;
+ graph_event_type_t gpe_type;
+ protocol_states_t *gpe_data;
+
+ uu_list_node_t gpe_link;
+ pthread_mutex_t gpe_lock;
+} graph_protocol_event_t;
+
+typedef struct graph_update {
+ pthread_mutex_t gu_lock;
+ pthread_cond_t gu_cv;
+ int gu_wakeup;
+
+ pthread_mutex_t gu_freeze_lock;
+ pthread_cond_t gu_freeze_cv;
+ int gu_freeze_wakeup;
+} graph_update_t;
+
+typedef struct restarter_protocol_event {
+ char *rpe_inst;
+ restarter_event_type_t rpe_type;
+
+ uu_list_node_t rpe_link;
+} restarter_protocol_event_t;
+
+typedef struct restarter_update {
+ pthread_mutex_t restarter_update_lock;
+ pthread_cond_t restarter_update_cv;
+ int restarter_update_wakeup;
+} restarter_update_t;
+
+extern restarter_update_t *ru;
+extern graph_update_t *gu;
+
+void graph_protocol_init();
+void graph_protocol_send_event(const char *, graph_event_type_t,
+ protocol_states_t *);
+graph_protocol_event_t *graph_event_dequeue();
+void graph_event_requeue(graph_protocol_event_t *);
+void graph_event_release(graph_protocol_event_t *);
+
+void restarter_protocol_init();
+evchan_t *restarter_protocol_init_delegate(char *);
+void restarter_protocol_send_event(const char *, evchan_t *,
+ restarter_event_type_t);
+restarter_protocol_event_t *restarter_event_dequeue();
+void restarter_event_requeue(restarter_protocol_event_t *);
+void restarter_event_release(restarter_protocol_event_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PROTOCOL_H */
diff --git a/usr/src/cmd/svc/startd/restarter.c b/usr/src/cmd/svc/startd/restarter.c
new file mode 100644
index 0000000000..096ffde037
--- /dev/null
+++ b/usr/src/cmd/svc/startd/restarter.c
@@ -0,0 +1,2308 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * restarter.c - service manipulation
+ *
+ * This component manages services whose restarter is svc.startd, the standard
+ * restarter. It translates restarter protocol events from the graph engine
+ * into actions on processes, as a delegated restarter would do.
+ *
+ * The master restarter manages a number of always-running threads:
+ * - restarter event thread: events from the graph engine
+ * - timeout thread: thread to fire queued timeouts
+ * - contract thread: thread to handle contract events
+ * - wait thread: thread to handle wait-based services
+ *
+ * The other threads are created as-needed:
+ * - per-instance method threads
+ * - per-instance event processing threads
+ *
+ * The interaction of all threads must result in the following conditions
+ * being satisfied (on a per-instance basis):
+ * - restarter events must be processed in order
+ * - method execution must be serialized
+ * - instance delete must be held until outstanding methods are complete
+ * - contract events shouldn't be processed while a method is running
+ * - timeouts should fire even when a method is running
+ *
+ * Service instances are represented by restarter_inst_t's and are kept in the
+ * instance_list list.
+ *
+ * Service States
+ * The current state of a service instance is kept in
+ * restarter_inst_t->ri_i.i_state. If transition to a new state could take
+ * some time, then before we effect the transition we set
+ * restarter_inst_t->ri_i.i_next_state to the target state, and afterwards we
+ * rotate i_next_state to i_state and set i_next_state to
+ * RESTARTER_STATE_NONE. So usually i_next_state is _NONE when ri_lock is not
+ * held. The exception is when we launch methods, which are done with
+ * a separate thread. To keep any other threads from grabbing ri_lock before
+ * method_thread() does, we set ri_method_thread to the thread id of the
+ * method thread, and when it is nonzero any thread with a different thread id
+ * waits on ri_method_cv.
+ *
+ * Method execution is serialized by blocking on ri_method_cv in
+ * inst_lookup_by_id() and waiting for a 0 value of ri_method_thread. This
+ * also prevents the instance structure from being deleted until all
+ * outstanding operations such as method_thread() have finished.
+ *
+ * Lock ordering:
+ *
+ * dgraph_lock [can be held when taking:]
+ * utmpx_lock
+ * dictionary->dict_lock
+ * st->st_load_lock
+ * wait_info_lock
+ * ru->restarter_update_lock
+ * restarter_queue->rpeq_lock
+ * instance_list.ril_lock
+ * inst->ri_lock
+ * st->st_configd_live_lock
+ *
+ * instance_list.ril_lock
+ * graph_queue->gpeq_lock
+ * gu->gu_lock
+ * st->st_configd_live_lock
+ * dictionary->dict_lock
+ * inst->ri_lock
+ * graph_queue->gpeq_lock
+ * gu->gu_lock
+ * tu->tu_lock
+ * tq->tq_lock
+ * inst->ri_queue_lock
+ * wait_info_lock
+ * bp->cb_lock
+ * utmpx_lock
+ *
+ * single_user_thread_lock
+ * wait_info_lock
+ * utmpx_lock
+ *
+ * gu_freeze_lock
+ *
+ * logbuf_mutex nests inside pretty much everything.
+ */
+
+#include <sys/contract/process.h>
+#include <sys/ctfs.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libcontract.h>
+#include <libcontract_priv.h>
+#include <libintl.h>
+#include <librestart.h>
+#include <librestart_priv.h>
+#include <libuutil.h>
+#include <limits.h>
+#include <poll.h>
+#include <port.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include "startd.h"
+#include "protocol.h"
+
+static uu_list_pool_t *restarter_instance_pool;
+static restarter_instance_list_t instance_list;
+
+static uu_list_pool_t *restarter_queue_pool;
+
+/*ARGSUSED*/
+static int
+restarter_instance_compare(const void *lc_arg, const void *rc_arg,
+ void *private)
+{
+ int lc_id = ((const restarter_inst_t *)lc_arg)->ri_id;
+ int rc_id = *(int *)rc_arg;
+
+ if (lc_id > rc_id)
+ return (1);
+ if (lc_id < rc_id)
+ return (-1);
+ return (0);
+}
+
+static restarter_inst_t *
+inst_lookup_by_name(const char *name)
+{
+ int id;
+
+ id = dict_lookup_byname(name);
+ if (id == -1)
+ return (NULL);
+
+ return (inst_lookup_by_id(id));
+}
+
+restarter_inst_t *
+inst_lookup_by_id(int id)
+{
+ restarter_inst_t *inst;
+
+ MUTEX_LOCK(&instance_list.ril_lock);
+ inst = uu_list_find(instance_list.ril_instance_list, &id, NULL, NULL);
+ if (inst != NULL)
+ MUTEX_LOCK(&inst->ri_lock);
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+
+ if (inst != NULL) {
+ while (inst->ri_method_thread != 0 &&
+ !pthread_equal(inst->ri_method_thread, pthread_self())) {
+ ++inst->ri_method_waiters;
+ (void) pthread_cond_wait(&inst->ri_method_cv,
+ &inst->ri_lock);
+ assert(inst->ri_method_waiters > 0);
+ --inst->ri_method_waiters;
+ }
+ }
+
+ return (inst);
+}
+
+static restarter_inst_t *
+inst_lookup_queue(const char *name)
+{
+ int id;
+ restarter_inst_t *inst;
+
+ id = dict_lookup_byname(name);
+ if (id == -1)
+ return (NULL);
+
+ MUTEX_LOCK(&instance_list.ril_lock);
+ inst = uu_list_find(instance_list.ril_instance_list, &id, NULL, NULL);
+ if (inst != NULL)
+ MUTEX_LOCK(&inst->ri_queue_lock);
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+
+ return (inst);
+}
+
+const char *
+service_style(int flags)
+{
+ switch (flags & RINST_STYLE_MASK) {
+ case RINST_CONTRACT: return ("contract");
+ case RINST_TRANSIENT: return ("transient");
+ case RINST_WAIT: return ("wait");
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad flags 0x%x.\n", __FILE__, __LINE__, flags);
+#endif
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Fails with ECONNABORTED or ECANCELED.
+ */
+static int
+check_contract(restarter_inst_t *inst, boolean_t primary,
+ scf_instance_t *scf_inst)
+{
+ ctid_t *ctidp;
+ int fd, r;
+
+ ctidp = primary ? &inst->ri_i.i_primary_ctid :
+ &inst->ri_i.i_transient_ctid;
+
+ assert(*ctidp >= 1);
+
+ fd = contract_open(*ctidp, NULL, "status", O_RDONLY);
+ if (fd >= 0) {
+ r = close(fd);
+ assert(r == 0);
+ return (0);
+ }
+
+ r = restarter_remove_contract(scf_inst, *ctidp, primary ?
+ RESTARTER_CONTRACT_PRIMARY : RESTARTER_CONTRACT_TRANSIENT);
+ switch (r) {
+ case 0:
+ case ECONNABORTED:
+ case ECANCELED:
+ *ctidp = 0;
+ return (r);
+
+ case ENOMEM:
+ uu_die("Out of memory\n");
+ /* NOTREACHED */
+
+ case EPERM:
+ uu_die("Insufficient privilege.\n");
+ /* NOTREACHED */
+
+ case EACCES:
+ uu_die("Repository backend access denied.\n");
+ /* NOTREACHED */
+
+ case EROFS:
+ log_error(LOG_INFO, "Could not remove unusable contract id %ld "
+ "for %s from repository.\n", *ctidp, inst->ri_i.i_fmri);
+ return (0);
+
+ case EINVAL:
+ case EBADF:
+ default:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+static int stop_instance(scf_handle_t *, restarter_inst_t *, stop_cause_t);
+
+/*
+ * int restarter_insert_inst(scf_handle_t *, char *)
+ * If the inst is already in the restarter list, return its id. If the inst
+ * is not in the restarter list, initialize a restarter_inst_t, initialize its
+ * states, insert it into the list, and return 0.
+ *
+ * Fails with
+ * ENOENT - name is not in the repository
+ */
+static int
+restarter_insert_inst(scf_handle_t *h, const char *name)
+{
+ int id, r;
+ restarter_inst_t *inst;
+ uu_list_index_t idx;
+ scf_service_t *scf_svc;
+ scf_instance_t *scf_inst;
+ scf_snapshot_t *snap;
+ scf_propertygroup_t *pg;
+ char *svc_name, *inst_name;
+ char logfilebuf[PATH_MAX];
+ char *c;
+ boolean_t do_commit_states;
+ restarter_instance_state_t state, next_state;
+ protocol_states_t *ps;
+ pid_t start_pid;
+
+ MUTEX_LOCK(&instance_list.ril_lock);
+
+ /*
+ * We don't use inst_lookup_by_name() here because we want the lookup
+ * & insert to be atomic.
+ */
+ id = dict_lookup_byname(name);
+ if (id != -1) {
+ inst = uu_list_find(instance_list.ril_instance_list, &id, NULL,
+ &idx);
+ if (inst != NULL) {
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+ return (0);
+ }
+ }
+
+ /* Allocate an instance */
+ inst = startd_zalloc(sizeof (restarter_inst_t));
+ inst->ri_utmpx_prefix = startd_alloc(max_scf_value_size);
+ inst->ri_utmpx_prefix[0] = '\0';
+
+ inst->ri_i.i_fmri = startd_alloc(strlen(name) + 1);
+ (void) strcpy((char *)inst->ri_i.i_fmri, name);
+
+ inst->ri_queue = startd_list_create(restarter_queue_pool, inst, 0);
+
+ /*
+ * id shouldn't be -1 since we use the same dictionary as graph.c, but
+ * just in case.
+ */
+ inst->ri_id = (id != -1 ? id : dict_insert(name));
+
+ special_online_hooks_get(name, &inst->ri_pre_online_hook,
+ &inst->ri_post_online_hook, &inst->ri_post_offline_hook);
+
+ scf_svc = safe_scf_service_create(h);
+ scf_inst = safe_scf_instance_create(h);
+ pg = safe_scf_pg_create(h);
+ svc_name = startd_alloc(max_scf_name_size);
+ inst_name = startd_alloc(max_scf_name_size);
+
+rep_retry:
+ if (scf_handle_decode_fmri(h, name, NULL, scf_svc, scf_inst, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case SCF_ERROR_NOT_FOUND:
+deleted:
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+ startd_free(inst_name, max_scf_name_size);
+ startd_free(svc_name, max_scf_name_size);
+ scf_pg_destroy(pg);
+ scf_instance_destroy(scf_inst);
+ scf_service_destroy(scf_svc);
+ startd_free((void *)inst->ri_i.i_fmri,
+ strlen(inst->ri_i.i_fmri) + 1);
+ startd_free(inst, sizeof (restarter_inst_t));
+ return (ENOENT);
+ }
+
+ uu_die("Can't decode FMRI %s: %s\n", name,
+ scf_strerror(scf_error()));
+ }
+
+ /*
+ * If there's no running snapshot, then we execute using the editing
+ * snapshot. Pending snapshots will be taken later.
+ */
+ snap = libscf_get_running_snapshot(scf_inst);
+
+ if ((scf_service_get_name(scf_svc, svc_name, max_scf_name_size) < 0) ||
+ (scf_instance_get_name(scf_inst, inst_name, max_scf_name_size) <
+ 0)) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_SET:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ scf_snapshot_destroy(snap);
+ goto deleted;
+ }
+
+ /*
+ * If the restarter group is missing, use uninit/none. Otherwise,
+ * we're probably being restarted & don't want to mess up the states
+ * that are there.
+ */
+ state = RESTARTER_STATE_UNINIT;
+ next_state = RESTARTER_STATE_NONE;
+
+ r = scf_instance_get_pg(scf_inst, SCF_PG_RESTARTER, pg);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case SCF_ERROR_NOT_SET:
+ scf_snapshot_destroy(snap);
+ goto deleted;
+
+ case SCF_ERROR_NOT_FOUND:
+ /*
+ * This shouldn't happen since the graph engine should
+ * have initialized the state to uninitialized/none if
+ * there was no restarter pg. In case somebody
+ * deleted it, though....
+ */
+ do_commit_states = B_TRUE;
+ break;
+
+ default:
+ assert(0);
+ abort();
+ }
+ } else {
+ r = libscf_read_states(pg, &state, &next_state);
+ if (r != 0) {
+ do_commit_states = B_TRUE;
+ } else {
+ if (next_state != RESTARTER_STATE_NONE) {
+ /*
+ * Force next_state to _NONE since we
+ * don't look for method processes.
+ */
+ next_state = RESTARTER_STATE_NONE;
+ do_commit_states = B_TRUE;
+ } else {
+ /*
+ * Inform the restarter of our state without
+ * changing the STIME in the repository.
+ */
+ ps = startd_alloc(sizeof (*ps));
+ inst->ri_i.i_state = ps->ps_state = state;
+ inst->ri_i.i_next_state = ps->ps_state_next =
+ next_state;
+
+ graph_protocol_send_event(inst->ri_i.i_fmri,
+ GRAPH_UPDATE_STATE_CHANGE, ps);
+
+ do_commit_states = B_FALSE;
+ }
+ }
+ }
+
+ switch (libscf_get_startd_properties(scf_inst, snap, &inst->ri_flags,
+ &inst->ri_utmpx_prefix)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ scf_snapshot_destroy(snap);
+ startd_free(inst->ri_utmpx_prefix, max_scf_value_size);
+ goto deleted;
+
+ case ENOENT:
+ /*
+ * This is odd, because the graph engine should have required
+ * the general property group. So we'll just use default
+ * flags in anticipation of the graph engine sending us
+ * REMOVE_INSTANCE when it finds out that the general property
+ * group has been deleted.
+ */
+ inst->ri_flags = RINST_CONTRACT;
+ break;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ switch (libscf_get_template_values(scf_inst, snap,
+ &inst->ri_common_name, &inst->ri_C_common_name)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ scf_snapshot_destroy(snap);
+ startd_free(inst->ri_common_name, max_scf_value_size);
+ inst->ri_common_name = NULL;
+ goto deleted;
+
+ case ECHILD:
+ case ENOENT:
+ break;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ switch (libscf_read_method_ids(h, scf_inst, inst->ri_i.i_fmri,
+ &inst->ri_i.i_primary_ctid, &inst->ri_i.i_transient_ctid,
+ &start_pid)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ scf_snapshot_destroy(snap);
+ goto deleted;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ if (inst->ri_i.i_primary_ctid >= 1) {
+ contract_hash_store(inst->ri_i.i_primary_ctid, inst->ri_id);
+
+ switch (check_contract(inst, B_TRUE, scf_inst)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ scf_snapshot_destroy(snap);
+ goto deleted;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+
+ if (inst->ri_i.i_transient_ctid >= 1) {
+ switch (check_contract(inst, B_FALSE, scf_inst)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ scf_snapshot_destroy(snap);
+ goto deleted;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+
+ /* No more failures we live through, so add it to the list. */
+ (void) pthread_mutex_init(&inst->ri_lock, &mutex_attrs);
+ (void) pthread_mutex_init(&inst->ri_queue_lock, &mutex_attrs);
+ MUTEX_LOCK(&inst->ri_lock);
+ MUTEX_LOCK(&inst->ri_queue_lock);
+
+ (void) pthread_cond_init(&inst->ri_method_cv, NULL);
+
+ uu_list_node_init(inst, &inst->ri_link, restarter_instance_pool);
+ uu_list_insert(instance_list.ril_instance_list, inst, idx);
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+
+ if (start_pid != -1 &&
+ (inst->ri_flags & RINST_STYLE_MASK) == RINST_WAIT) {
+ int ret;
+ ret = wait_register(start_pid, inst->ri_i.i_fmri, 0, 1);
+ if (ret == -1) {
+ /*
+ * Implication: if we can't reregister the
+ * instance, we will start another one. Two
+ * instances may or may not result in a resource
+ * conflict.
+ */
+ log_error(LOG_WARNING,
+ "%s: couldn't reregister %ld for wait\n",
+ inst->ri_i.i_fmri, start_pid);
+ } else if (ret == 1) {
+ /*
+ * Leading PID has exited.
+ */
+ (void) stop_instance(h, inst, RSTOP_EXIT);
+ }
+ }
+
+
+ scf_pg_destroy(pg);
+
+ if (do_commit_states)
+ (void) restarter_instance_update_states(h, inst, state,
+ next_state, RERR_NONE, NULL);
+
+ (void) snprintf(logfilebuf, PATH_MAX, "%s:%s", svc_name, inst_name);
+ for (c = logfilebuf; *c != '\0'; c++)
+ if (*c == '/')
+ *c = '-';
+
+ if ((inst->ri_logstem = uu_msprintf("%s%s", logfilebuf, LOG_SUFFIX)) ==
+ NULL)
+ uu_die("Allocation failure\n");
+
+ log_framework(LOG_DEBUG, "%s is a %s-style service\n", name,
+ service_style(inst->ri_flags));
+
+ MUTEX_UNLOCK(&inst->ri_queue_lock);
+ MUTEX_UNLOCK(&inst->ri_lock);
+
+ startd_free(svc_name, max_scf_name_size);
+ startd_free(inst_name, max_scf_name_size);
+ scf_snapshot_destroy(snap);
+ scf_instance_destroy(scf_inst);
+ scf_service_destroy(scf_svc);
+
+ log_framework(LOG_DEBUG, "%s: inserted instance into restarter list\n",
+ name);
+
+ return (0);
+}
+
+static void
+restarter_delete_inst(restarter_inst_t *ri)
+{
+ int id;
+ restarter_inst_t *rip;
+ void *cookie = NULL;
+ restarter_instance_qentry_t *e;
+
+ assert(PTHREAD_MUTEX_HELD(&ri->ri_lock));
+
+ /*
+ * Must drop the instance lock so we can pick up the instance_list
+ * lock & remove the instance.
+ */
+ id = ri->ri_id;
+ MUTEX_UNLOCK(&ri->ri_lock);
+
+ MUTEX_LOCK(&instance_list.ril_lock);
+
+ rip = uu_list_find(instance_list.ril_instance_list, &id, NULL, NULL);
+ if (rip == NULL) {
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+ return;
+ }
+
+ assert(ri == rip);
+
+ uu_list_remove(instance_list.ril_instance_list, ri);
+
+ log_framework(LOG_DEBUG, "%s: deleted instance from restarter list\n",
+ ri->ri_i.i_fmri);
+
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+
+ /*
+ * We can lock the instance without holding the instance_list lock
+ * since we removed the instance from the list.
+ */
+ MUTEX_LOCK(&ri->ri_lock);
+ MUTEX_LOCK(&ri->ri_queue_lock);
+
+ if (ri->ri_i.i_primary_ctid >= 1)
+ contract_hash_remove(ri->ri_i.i_primary_ctid);
+
+ while (ri->ri_method_thread != 0 || ri->ri_method_waiters > 0)
+ (void) pthread_cond_wait(&ri->ri_method_cv, &ri->ri_lock);
+
+ while ((e = uu_list_teardown(ri->ri_queue, &cookie)) != NULL)
+ startd_free(e, sizeof (*e));
+ uu_list_destroy(ri->ri_queue);
+
+ startd_free((void *)ri->ri_i.i_fmri, strlen(ri->ri_i.i_fmri) + 1);
+ free(ri->ri_logstem);
+ startd_free(ri->ri_utmpx_prefix, max_scf_value_size);
+ (void) pthread_mutex_destroy(&ri->ri_lock);
+ (void) pthread_mutex_destroy(&ri->ri_queue_lock);
+ startd_free(ri, sizeof (restarter_inst_t));
+}
+
+/*
+ * instance_is_wait_style()
+ *
+ * Returns 1 if the given instance is a "wait-style" service instance.
+ */
+int
+instance_is_wait_style(restarter_inst_t *inst)
+{
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ return ((inst->ri_flags & RINST_STYLE_MASK) == RINST_WAIT);
+}
+
+/*
+ * instance_is_transient_style()
+ *
+ * Returns 1 if the given instance is a transient service instance.
+ */
+int
+instance_is_transient_style(restarter_inst_t *inst)
+{
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ return ((inst->ri_flags & RINST_STYLE_MASK) == RINST_TRANSIENT);
+}
+
+/*
+ * instance_in_transition()
+ * Returns 1 if instance is in transition, 0 if not
+ */
+int
+instance_in_transition(restarter_inst_t *inst)
+{
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ if (inst->ri_i.i_next_state == RESTARTER_STATE_NONE)
+ return (0);
+ return (1);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNRESET - success, but h was rebound
+ */
+int
+restarter_instance_update_states(scf_handle_t *h, restarter_inst_t *ri,
+ restarter_instance_state_t new_state,
+ restarter_instance_state_t new_state_next, restarter_error_t err, char *aux)
+{
+ protocol_states_t *states;
+ int e;
+ uint_t retry_count = 0, msecs = ALLOC_DELAY;
+ boolean_t rebound = B_FALSE;
+
+ assert(PTHREAD_MUTEX_HELD(&ri->ri_lock));
+
+retry:
+ e = _restarter_commit_states(h, &ri->ri_i, new_state, new_state_next,
+ aux);
+ switch (e) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ ++retry_count;
+ if (retry_count < ALLOC_RETRY) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ goto retry;
+ }
+
+ /* Like startd_alloc(). */
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ rebound = B_TRUE;
+ goto retry;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_NOTICE, "Could not commit state change for %s "
+ "to repository: %s.\n", ri->ri_i.i_fmri, strerror(e));
+ /* FALLTHROUGH */
+
+ case ENOENT:
+ ri->ri_i.i_state = new_state;
+ ri->ri_i.i_next_state = new_state_next;
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("_restarter_commit_states", e);
+ }
+
+ states = startd_alloc(sizeof (protocol_states_t));
+ states->ps_state = new_state;
+ states->ps_state_next = new_state_next;
+ states->ps_err = err;
+ graph_protocol_send_event(ri->ri_i.i_fmri, GRAPH_UPDATE_STATE_CHANGE,
+ (void *)states);
+
+ if (new_state == RESTARTER_STATE_ONLINE)
+ ri->ri_post_online_hook();
+
+ return (rebound ? ECONNRESET : 0);
+}
+
+void
+restarter_mark_pending_snapshot(const char *fmri, uint_t flag)
+{
+ restarter_inst_t *inst;
+
+ assert(flag == RINST_RETAKE_RUNNING || flag == RINST_RETAKE_START);
+
+ inst = inst_lookup_by_name(fmri);
+ if (inst == NULL)
+ return;
+
+ inst->ri_flags |= flag;
+
+ MUTEX_UNLOCK(&inst->ri_lock);
+}
+
+static void
+restarter_take_pending_snapshots(scf_handle_t *h)
+{
+ restarter_inst_t *inst;
+ int r;
+
+ MUTEX_LOCK(&instance_list.ril_lock);
+
+ for (inst = uu_list_first(instance_list.ril_instance_list);
+ inst != NULL;
+ inst = uu_list_next(instance_list.ril_instance_list, inst)) {
+ const char *fmri;
+ scf_instance_t *sinst = NULL;
+
+ MUTEX_LOCK(&inst->ri_lock);
+
+ /*
+ * This is where we'd check inst->ri_method_thread and if it
+ * were nonzero we'd wait in anticipation of another thread
+ * executing a method for inst. Doing so with the instance_list
+ * locked, though, leads to deadlock. Since taking a snapshot
+ * during that window won't hurt anything, we'll just continue.
+ */
+
+ fmri = inst->ri_i.i_fmri;
+
+ if (inst->ri_flags & RINST_RETAKE_RUNNING) {
+ scf_snapshot_t *rsnap;
+
+ (void) libscf_fmri_get_instance(h, fmri, &sinst);
+
+ rsnap = libscf_get_or_make_running_snapshot(sinst,
+ fmri, B_FALSE);
+
+ scf_instance_destroy(sinst);
+
+ if (rsnap != NULL)
+ inst->ri_flags &= ~RINST_RETAKE_RUNNING;
+
+ scf_snapshot_destroy(rsnap);
+ }
+
+ if (inst->ri_flags & RINST_RETAKE_START) {
+ switch (r = libscf_snapshots_poststart(h, fmri,
+ B_FALSE)) {
+ case 0:
+ case ENOENT:
+ inst->ri_flags &= ~RINST_RETAKE_START;
+ break;
+
+ case ECONNABORTED:
+ break;
+
+ case EACCES:
+ default:
+ bad_error("libscf_snapshots_poststart", r);
+ }
+ }
+
+ MUTEX_UNLOCK(&inst->ri_lock);
+ }
+
+ MUTEX_UNLOCK(&instance_list.ril_lock);
+}
+
+/* ARGSUSED */
+void *
+restarter_post_fsminimal_thread(void *unused)
+{
+ scf_handle_t *h;
+ int r;
+
+ h = libscf_handle_create_bound_loop();
+
+ for (;;) {
+ r = libscf_create_self(h);
+ if (r == 0)
+ break;
+
+ assert(r == ECONNABORTED);
+ libscf_handle_rebind(h);
+ }
+
+ restarter_take_pending_snapshots(h);
+
+ (void) scf_handle_unbind(h);
+ scf_handle_destroy(h);
+
+ return (NULL);
+}
+
+/*
+ * returns 1 if instance is already started, 0 if not
+ */
+static int
+instance_started(restarter_inst_t *inst)
+{
+ int ret;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+
+ if (inst->ri_i.i_state == RESTARTER_STATE_ONLINE ||
+ inst->ri_i.i_state == RESTARTER_STATE_DEGRADED)
+ ret = 1;
+ else
+ ret = 0;
+
+ return (ret);
+}
+
+/*
+ * int stop_instance()
+ *
+ * Stop the instance identified by the instance given as the second argument,
+ * for the cause stated.
+ *
+ * Returns
+ * 0 - success
+ * -1 - inst is in transition
+ */
+static int
+stop_instance(scf_handle_t *local_handle, restarter_inst_t *inst,
+ stop_cause_t cause)
+{
+ fork_info_t *info;
+ const char *cp;
+ int err;
+ restarter_error_t re;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ assert(inst->ri_method_thread == 0);
+
+ switch (cause) {
+ case RSTOP_EXIT:
+ re = RERR_RESTART;
+ cp = "all processes in service exited";
+ break;
+ case RSTOP_CORE:
+ re = RERR_FAULT;
+ cp = "process dumped core";
+ break;
+ case RSTOP_SIGNAL:
+ re = RERR_FAULT;
+ cp = "process received fatal signal from outside the service";
+ break;
+ case RSTOP_HWERR:
+ re = RERR_FAULT;
+ cp = "process killed due to uncorrectable hardware error";
+ break;
+ case RSTOP_DEPENDENCY:
+ re = RERR_RESTART;
+ cp = "dependency activity requires stop";
+ break;
+ case RSTOP_DISABLE:
+ re = RERR_RESTART;
+ cp = "service disabled";
+ break;
+ case RSTOP_RESTART:
+ re = RERR_RESTART;
+ cp = "service restarting";
+ break;
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "Unknown cause %d at %s:%d.\n",
+ cause, __FILE__, __LINE__);
+#endif
+ abort();
+ }
+
+ /* Services in the disabled and maintenance state are ignored */
+ if (inst->ri_i.i_state == RESTARTER_STATE_MAINT ||
+ inst->ri_i.i_state == RESTARTER_STATE_DISABLED) {
+ log_framework(LOG_DEBUG,
+ "%s: stop_instance -> is maint/disabled\n",
+ inst->ri_i.i_fmri);
+ return (0);
+ }
+
+ /* Already stopped instances are left alone */
+ if (instance_started(inst) == 0) {
+ log_framework(LOG_DEBUG, "Restarter: %s is already stopped.\n",
+ inst->ri_i.i_fmri);
+ return (0);
+ }
+
+ if (instance_in_transition(inst)) {
+ /* requeue event by returning -1 */
+ log_framework(LOG_DEBUG,
+ "Restarter: Not stopping %s, in transition.\n",
+ inst->ri_i.i_fmri);
+ return (-1);
+ }
+
+ log_instance(inst, B_TRUE, "Stopping because %s.", cp);
+
+ log_framework(re == RERR_FAULT ? LOG_INFO : LOG_DEBUG,
+ "%s: Instance stopping because %s.\n", inst->ri_i.i_fmri, cp);
+
+ if (instance_is_wait_style(inst) && cause == RSTOP_EXIT) {
+ /*
+ * No need to stop instance, as child has exited; remove
+ * contract and move the instance to the offline state.
+ */
+ switch (err = restarter_instance_update_states(local_handle,
+ inst, inst->ri_i.i_state, RESTARTER_STATE_OFFLINE, re,
+ NULL)) {
+ case 0:
+ case ECONNRESET:
+ break;
+
+ default:
+ bad_error("restarter_instance_update_states", err);
+ }
+
+ (void) update_fault_count(inst, FAULT_COUNT_RESET);
+
+ if (inst->ri_i.i_primary_ctid != 0) {
+ inst->ri_m_inst =
+ safe_scf_instance_create(local_handle);
+ inst->ri_mi_deleted = B_FALSE;
+
+ libscf_reget_instance(inst);
+ method_remove_contract(inst, B_TRUE, B_TRUE);
+
+ scf_instance_destroy(inst->ri_m_inst);
+ inst->ri_m_inst = NULL;
+ }
+
+ switch (err = restarter_instance_update_states(local_handle,
+ inst, inst->ri_i.i_next_state, RESTARTER_STATE_NONE, re,
+ NULL)) {
+ case 0:
+ case ECONNRESET:
+ break;
+
+ default:
+ bad_error("restarter_instance_update_states", err);
+ }
+
+ return (0);
+ }
+
+ switch (err = restarter_instance_update_states(local_handle, inst,
+ inst->ri_i.i_state, inst->ri_i.i_enabled ? RESTARTER_STATE_OFFLINE :
+ RESTARTER_STATE_DISABLED, RERR_NONE, NULL)) {
+ case 0:
+ case ECONNRESET:
+ break;
+
+ default:
+ bad_error("restarter_instance_update_states", err);
+ }
+
+ info = startd_zalloc(sizeof (fork_info_t));
+
+ info->sf_id = inst->ri_id;
+ info->sf_method_type = METHOD_STOP;
+ info->sf_event_type = re;
+ inst->ri_method_thread = startd_thread_create(method_thread, info);
+
+ return (0);
+}
+
+/*
+ * Returns
+ * ENOENT - fmri is not in instance_list
+ * 0 - success
+ * ECONNRESET - success, though handle was rebound
+ * -1 - instance is in transition
+ */
+int
+stop_instance_fmri(scf_handle_t *h, const char *fmri, uint_t flags)
+{
+ restarter_inst_t *rip;
+ int r;
+
+ rip = inst_lookup_by_name(fmri);
+ if (rip == NULL)
+ return (ENOENT);
+
+ r = stop_instance(h, rip, flags);
+
+ MUTEX_UNLOCK(&rip->ri_lock);
+
+ return (r);
+}
+
+static void
+unmaintain_instance(scf_handle_t *h, restarter_inst_t *rip,
+ unmaint_cause_t cause)
+{
+ ctid_t ctid;
+ scf_instance_t *inst;
+ int r;
+ uint_t tries = 0, msecs = ALLOC_DELAY;
+ const char *cp;
+
+ assert(PTHREAD_MUTEX_HELD(&rip->ri_lock));
+
+ if (rip->ri_i.i_state != RESTARTER_STATE_MAINT) {
+ log_error(LOG_DEBUG, "Restarter: "
+ "Ignoring maintenance off command because %s is not in the "
+ "maintenance state.\n", rip->ri_i.i_fmri);
+ return;
+ }
+
+ switch (cause) {
+ case RUNMAINT_CLEAR:
+ cp = "clear requested";
+ break;
+ case RUNMAINT_DISABLE:
+ cp = "disable requested";
+ break;
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "Uncaught case for %d at %s:%d.\n",
+ cause, __FILE__, __LINE__);
+#endif
+ abort();
+ }
+
+ log_instance(rip, B_TRUE, "Leaving maintenance because %s.",
+ cp);
+ log_framework(LOG_DEBUG, "%s: Instance leaving maintenance because "
+ "%s.\n", rip->ri_i.i_fmri, cp);
+
+ (void) restarter_instance_update_states(h, rip, RESTARTER_STATE_UNINIT,
+ RESTARTER_STATE_NONE, RERR_RESTART, NULL);
+
+ /*
+ * If we did ADMIN_MAINT_ON_IMMEDIATE, then there might still be
+ * a primary contract.
+ */
+ if (rip->ri_i.i_primary_ctid == 0)
+ return;
+
+ ctid = rip->ri_i.i_primary_ctid;
+ contract_abandon(ctid);
+ rip->ri_i.i_primary_ctid = 0;
+
+rep_retry:
+ switch (r = libscf_fmri_get_instance(h, rip->ri_i.i_fmri, &inst)) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ENOENT:
+ /* Must have been deleted. */
+ return;
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_handle_rebind", r);
+ }
+
+again:
+ r = restarter_remove_contract(inst, ctid, RESTARTER_CONTRACT_PRIMARY);
+ switch (r) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ ++tries;
+ if (tries < ALLOC_RETRY) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ goto again;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+
+ case ECONNABORTED:
+ scf_instance_destroy(inst);
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ break;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_INFO,
+ "Could not remove contract id %lu for %s (%s).\n", ctid,
+ rip->ri_i.i_fmri, strerror(r));
+ break;
+
+ case EINVAL:
+ case EBADF:
+ default:
+ bad_error("restarter_remove_contract", r);
+ }
+
+ scf_instance_destroy(inst);
+}
+
+/*
+ * enable_inst()
+ * Set inst->ri_i.i_enabled. Expects 'e' to be _ENABLE, _DISABLE, or
+ * _ADMIN_DISABLE. If the event is _ENABLE and inst is uninitialized or
+ * disabled, move it to offline. If the event is _DISABLE or
+ * _ADMIN_DISABLE, make sure inst will move to disabled.
+ *
+ * Returns
+ * 0 - success
+ * ECONNRESET - h was rebound
+ */
+static int
+enable_inst(scf_handle_t *h, restarter_inst_t *inst, restarter_event_type_t e)
+{
+ restarter_instance_state_t state;
+ int r;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ assert(e == RESTARTER_EVENT_TYPE_ADMIN_DISABLE ||
+ e == RESTARTER_EVENT_TYPE_DISABLE ||
+ e == RESTARTER_EVENT_TYPE_ENABLE);
+ assert(instance_in_transition(inst) == 0);
+
+ state = inst->ri_i.i_state;
+
+ if (e == RESTARTER_EVENT_TYPE_ENABLE) {
+ inst->ri_i.i_enabled = 1;
+
+ if (state == RESTARTER_STATE_UNINIT ||
+ state == RESTARTER_STATE_DISABLED) {
+ /*
+ * B_FALSE: Don't log an error if the log_instance()
+ * fails because it will fail on the miniroot before
+ * install-discovery runs.
+ */
+ log_instance(inst, B_FALSE, "Enabled.");
+ log_framework(LOG_DEBUG, "%s: Instance enabled.\n",
+ inst->ri_i.i_fmri);
+ (void) restarter_instance_update_states(h, inst,
+ RESTARTER_STATE_OFFLINE, RESTARTER_STATE_NONE,
+ RERR_NONE, NULL);
+ } else {
+ log_framework(LOG_DEBUG, "Restarter: "
+ "Not changing state of %s for enable command.\n",
+ inst->ri_i.i_fmri);
+ }
+ } else {
+ inst->ri_i.i_enabled = 0;
+
+ switch (state) {
+ case RESTARTER_STATE_ONLINE:
+ case RESTARTER_STATE_DEGRADED:
+ r = stop_instance(h, inst, RSTOP_DISABLE);
+ return (r == ECONNRESET ? 0 : r);
+
+ case RESTARTER_STATE_OFFLINE:
+ case RESTARTER_STATE_UNINIT:
+ if (inst->ri_i.i_primary_ctid != 0) {
+ inst->ri_m_inst = safe_scf_instance_create(h);
+ inst->ri_mi_deleted = B_FALSE;
+
+ libscf_reget_instance(inst);
+ method_remove_contract(inst, B_TRUE, B_TRUE);
+
+ scf_instance_destroy(inst->ri_m_inst);
+ }
+ /* B_FALSE: See log_instance(..., "Enabled."); above */
+ log_instance(inst, B_FALSE, "Disabled.");
+ log_framework(LOG_DEBUG, "%s: Instance disabled.\n",
+ inst->ri_i.i_fmri);
+ (void) restarter_instance_update_states(h, inst,
+ RESTARTER_STATE_DISABLED, RESTARTER_STATE_NONE,
+ RERR_RESTART, NULL);
+ return (0);
+
+ case RESTARTER_STATE_DISABLED:
+ break;
+
+ case RESTARTER_STATE_MAINT:
+ /*
+ * We only want to pull the instance out of maintenance
+ * if the disable is on adminstrative request. The
+ * graph engine sends _DISABLE events whenever a
+ * service isn't in the disabled state, and we don't
+ * want to pull the service out of maintenance if,
+ * for example, it is there due to a dependency cycle.
+ */
+ if (e == RESTARTER_EVENT_TYPE_ADMIN_DISABLE)
+ unmaintain_instance(h, inst, RUNMAINT_DISABLE);
+ break;
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "Restarter instance %s has "
+ "unknown state %d.\n", inst->ri_i.i_fmri, state);
+#endif
+ abort();
+ }
+ }
+
+ return (0);
+}
+
+static void
+start_instance(scf_handle_t *local_handle, restarter_inst_t *inst)
+{
+ fork_info_t *info;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+ assert(instance_in_transition(inst) == 0);
+ assert(inst->ri_method_thread == 0);
+
+ log_framework(LOG_DEBUG, "%s: trying to start instance\n",
+ inst->ri_i.i_fmri);
+
+ /* Services in the disabled and maintenance state are ignored */
+ if (inst->ri_i.i_state == RESTARTER_STATE_MAINT ||
+ inst->ri_i.i_state == RESTARTER_STATE_DISABLED ||
+ inst->ri_i.i_enabled == 0) {
+ log_framework(LOG_DEBUG,
+ "%s: start_instance -> is maint/disabled\n",
+ inst->ri_i.i_fmri);
+ return;
+ }
+
+ /* Already started instances are left alone */
+ if (instance_started(inst) == 1) {
+ log_framework(LOG_DEBUG,
+ "%s: start_instance -> is already started\n",
+ inst->ri_i.i_fmri);
+ return;
+ }
+
+ log_framework(LOG_DEBUG, "%s: starting instance.\n", inst->ri_i.i_fmri);
+
+ (void) restarter_instance_update_states(local_handle, inst,
+ inst->ri_i.i_state, RESTARTER_STATE_ONLINE, RERR_NONE, NULL);
+
+ info = startd_zalloc(sizeof (fork_info_t));
+
+ info->sf_id = inst->ri_id;
+ info->sf_method_type = METHOD_START;
+ info->sf_event_type = RERR_NONE;
+ inst->ri_method_thread = startd_thread_create(method_thread, info);
+}
+
+static void
+maintain_instance(scf_handle_t *h, restarter_inst_t *rip, int immediate,
+ const char *aux)
+{
+ fork_info_t *info;
+
+ assert(PTHREAD_MUTEX_HELD(&rip->ri_lock));
+ assert(aux != NULL);
+ assert(rip->ri_method_thread == 0);
+
+ log_instance(rip, B_TRUE, "Stopping for maintenance due to %s.", aux);
+ log_framework(LOG_DEBUG, "%s: stopping for maintenance due to %s.\n",
+ rip->ri_i.i_fmri, aux);
+
+ /* Services in the maintenance state are ignored */
+ if (rip->ri_i.i_state == RESTARTER_STATE_MAINT) {
+ log_framework(LOG_DEBUG,
+ "%s: maintain_instance -> is already in maintenance\n",
+ rip->ri_i.i_fmri);
+ return;
+ }
+
+ if (immediate || !instance_started(rip)) {
+ if (rip->ri_i.i_primary_ctid != 0) {
+ rip->ri_m_inst = safe_scf_instance_create(h);
+ rip->ri_mi_deleted = B_FALSE;
+
+ libscf_reget_instance(rip);
+ method_remove_contract(rip, B_TRUE, B_TRUE);
+
+ scf_instance_destroy(rip->ri_m_inst);
+ }
+
+ (void) restarter_instance_update_states(h, rip,
+ RESTARTER_STATE_MAINT, RESTARTER_STATE_NONE, RERR_RESTART,
+ (char *)aux);
+ return;
+ }
+
+ (void) restarter_instance_update_states(h, rip, rip->ri_i.i_state,
+ RESTARTER_STATE_MAINT, RERR_NONE, (char *)aux);
+
+ info = startd_zalloc(sizeof (*info));
+ info->sf_id = rip->ri_id;
+ info->sf_method_type = METHOD_STOP;
+ info->sf_event_type = RERR_RESTART;
+ rip->ri_method_thread = startd_thread_create(method_thread, info);
+}
+
+static void
+refresh_instance(scf_handle_t *h, restarter_inst_t *rip)
+{
+ scf_instance_t *inst;
+ scf_snapshot_t *snap;
+ fork_info_t *info;
+ int r;
+
+ assert(PTHREAD_MUTEX_HELD(&rip->ri_lock));
+
+ log_instance(rip, B_TRUE, "Rereading configuration.");
+ log_framework(LOG_DEBUG, "%s: rereading configuration.\n",
+ rip->ri_i.i_fmri);
+
+rep_retry:
+ r = libscf_fmri_get_instance(h, rip->ri_i.i_fmri, &inst);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ENOENT:
+ /* Must have been deleted. */
+ return;
+
+ case EINVAL:
+ case ENOTSUP:
+ default:
+ bad_error("libscf_fmri_get_instance", r);
+ }
+
+ snap = libscf_get_running_snapshot(inst);
+
+ r = libscf_get_startd_properties(inst, snap, &rip->ri_flags,
+ &rip->ri_utmpx_prefix);
+ switch (r) {
+ case 0:
+ log_framework(LOG_DEBUG, "%s is a %s-style service\n",
+ rip->ri_i.i_fmri, service_style(rip->ri_flags));
+ break;
+
+ case ECONNABORTED:
+ scf_instance_destroy(inst);
+ scf_snapshot_destroy(snap);
+ libscf_handle_rebind(h);
+ goto rep_retry;
+
+ case ECANCELED:
+ case ENOENT:
+ /* Succeed in anticipation of REMOVE_INSTANCE. */
+ break;
+
+ default:
+ bad_error("libscf_get_startd_properties", r);
+ }
+
+ if (instance_started(rip)) {
+ /* Refresh does not change the state. */
+ (void) restarter_instance_update_states(h, rip,
+ rip->ri_i.i_state, rip->ri_i.i_state, RERR_NONE, NULL);
+
+ info = startd_zalloc(sizeof (*info));
+ info->sf_id = rip->ri_id;
+ info->sf_method_type = METHOD_REFRESH;
+ info->sf_event_type = RERR_REFRESH;
+
+ assert(rip->ri_method_thread == 0);
+ rip->ri_method_thread =
+ startd_thread_create(method_thread, info);
+ }
+
+ scf_snapshot_destroy(snap);
+ scf_instance_destroy(inst);
+}
+
+const char *event_names[] = { "INVALID", "ADD_INSTANCE", "REMOVE_INSTANCE",
+ "ENABLE", "DISABLE", "ADMIN_DEGRADED", "ADMIN_REFRESH",
+ "ADMIN_RESTART", "ADMIN_MAINT_OFF", "ADMIN_MAINT_ON",
+ "ADMIN_MAINT_ON_IMMEDIATE", "STOP", "START", "DEPENDENCY_CYCLE",
+ "INVALID_DEPENDENCY", "ADMIN_DISABLE"
+};
+
+/*
+ * void *restarter_process_events()
+ *
+ * Called in a separate thread to process the events on an instance's
+ * queue. Empties the queue completely, and tries to keep the thread
+ * around for a little while after the queue is empty to save on
+ * startup costs.
+ */
+static void *
+restarter_process_events(void *arg)
+{
+ scf_handle_t *h;
+ restarter_instance_qentry_t *event;
+ restarter_inst_t *rip;
+ char *fmri = (char *)arg;
+ struct timespec to;
+
+ assert(fmri != NULL);
+
+ h = libscf_handle_create_bound_loop();
+
+ /* grab the queue lock */
+ rip = inst_lookup_queue(fmri);
+ if (rip == NULL)
+ goto out;
+
+again:
+
+ while ((event = uu_list_first(rip->ri_queue)) != NULL) {
+ restarter_inst_t *inst;
+
+ /* drop the queue lock */
+ MUTEX_UNLOCK(&rip->ri_queue_lock);
+
+ /*
+ * Grab the inst lock -- this waits until any outstanding
+ * method finishes running.
+ */
+ inst = inst_lookup_by_name(fmri);
+ if (inst == NULL) {
+ /* Getting deleted in the middle isn't an error. */
+ goto cont;
+ }
+
+ assert(instance_in_transition(inst) == 0);
+
+ /* process the event */
+ switch (event->riq_type) {
+ case RESTARTER_EVENT_TYPE_ENABLE:
+ case RESTARTER_EVENT_TYPE_DISABLE:
+ case RESTARTER_EVENT_TYPE_ADMIN_DISABLE:
+ (void) enable_inst(h, inst, event->riq_type);
+ break;
+
+ case RESTARTER_EVENT_TYPE_REMOVE_INSTANCE:
+ restarter_delete_inst(inst);
+ inst = NULL;
+ goto cont;
+
+ case RESTARTER_EVENT_TYPE_STOP:
+ (void) stop_instance(h, inst, RSTOP_DEPENDENCY);
+ break;
+
+ case RESTARTER_EVENT_TYPE_START:
+ start_instance(h, inst);
+ break;
+
+ case RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE:
+ maintain_instance(h, inst, 0, "dependency_cycle");
+ break;
+
+ case RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY:
+ maintain_instance(h, inst, 0, "invalid_dependency");
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON:
+ maintain_instance(h, inst, 0, "administrative_request");
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE:
+ maintain_instance(h, inst, 1, "administrative_request");
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF:
+ unmaintain_instance(h, inst, RUNMAINT_CLEAR);
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADMIN_REFRESH:
+ refresh_instance(h, inst);
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADMIN_DEGRADED:
+ log_framework(LOG_WARNING, "Restarter: "
+ "%s command (for %s) unimplemented.\n",
+ event_names[event->riq_type], inst->ri_i.i_fmri);
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADMIN_RESTART:
+ if (!instance_started(inst)) {
+ log_framework(LOG_DEBUG, "Restarter: "
+ "Not restarting %s; not running.\n",
+ inst->ri_i.i_fmri);
+ } else {
+ /*
+ * Stop the instance. If it can be restarted,
+ * the graph engine will send a new event.
+ */
+ (void) stop_instance(h, inst, RSTOP_RESTART);
+ }
+ break;
+
+ case RESTARTER_EVENT_TYPE_ADD_INSTANCE:
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Bad restarter event %d. "
+ "Aborting.\n", __FILE__, __LINE__, event->riq_type);
+#endif
+ abort();
+ }
+
+ assert(inst != NULL);
+ MUTEX_UNLOCK(&inst->ri_lock);
+
+cont:
+ /* grab the queue lock */
+ rip = inst_lookup_queue(fmri);
+ if (rip == NULL)
+ goto out;
+
+ /* delete the event */
+ uu_list_remove(rip->ri_queue, event);
+ startd_free(event, sizeof (restarter_instance_qentry_t));
+ }
+
+ assert(rip != NULL);
+
+ /*
+ * Try to preserve the thread for a little while for future use.
+ */
+ to.tv_sec = 3;
+ to.tv_nsec = 0;
+ (void) pthread_cond_reltimedwait_np(&rip->ri_queue_cv,
+ &rip->ri_queue_lock, &to);
+
+ if (uu_list_first(rip->ri_queue) != NULL)
+ goto again;
+
+ rip->ri_queue_thread = 0;
+ MUTEX_UNLOCK(&rip->ri_queue_lock);
+out:
+ (void) scf_handle_unbind(h);
+ scf_handle_destroy(h);
+ free(fmri);
+ return (NULL);
+}
+
+static int
+is_admin_event(restarter_event_type_t t) {
+
+ switch (t) {
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON:
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE:
+ case RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF:
+ case RESTARTER_EVENT_TYPE_ADMIN_REFRESH:
+ case RESTARTER_EVENT_TYPE_ADMIN_DEGRADED:
+ case RESTARTER_EVENT_TYPE_ADMIN_RESTART:
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+static void
+restarter_queue_event(restarter_inst_t *ri, restarter_protocol_event_t *e)
+{
+ restarter_instance_qentry_t *qe;
+ int r;
+
+ assert(PTHREAD_MUTEX_HELD(&ri->ri_queue_lock));
+ assert(!PTHREAD_MUTEX_HELD(&ri->ri_lock));
+
+ qe = startd_zalloc(sizeof (restarter_instance_qentry_t));
+ qe->riq_type = e->rpe_type;
+
+ uu_list_node_init(qe, &qe->riq_link, restarter_queue_pool);
+ r = uu_list_insert_before(ri->ri_queue, NULL, qe);
+ assert(r == 0);
+}
+
+/*
+ * void *restarter_event_thread()
+ *
+ * Handle incoming graph events by placing them on a per-instance
+ * queue. We can't lock the main part of the instance structure, so
+ * just modify the seprarately locked event queue portion.
+ */
+/*ARGSUSED*/
+static void *
+restarter_event_thread(void *unused)
+{
+ scf_handle_t *h;
+
+ /*
+ * This is a new thread, and thus, gets its own handle
+ * to the repository.
+ */
+ h = libscf_handle_create_bound_loop();
+
+ MUTEX_LOCK(&ru->restarter_update_lock);
+
+ /*CONSTCOND*/
+ while (1) {
+ restarter_protocol_event_t *e;
+
+ while (ru->restarter_update_wakeup == 0)
+ (void) pthread_cond_wait(&ru->restarter_update_cv,
+ &ru->restarter_update_lock);
+
+ ru->restarter_update_wakeup = 0;
+
+ while ((e = restarter_event_dequeue()) != NULL) {
+ restarter_inst_t *rip;
+ char *fmri;
+
+ MUTEX_UNLOCK(&ru->restarter_update_lock);
+
+ /*
+ * ADD_INSTANCE is special: there's likely no
+ * instance structure yet, so we need to handle the
+ * addition synchronously.
+ */
+ switch (e->rpe_type) {
+ case RESTARTER_EVENT_TYPE_ADD_INSTANCE:
+ if (restarter_insert_inst(h, e->rpe_inst) != 0)
+ log_error(LOG_INFO, "Restarter: "
+ "Could not add %s.\n", e->rpe_inst);
+
+ MUTEX_LOCK(&st->st_load_lock);
+ if (--st->st_load_instances == 0)
+ (void) pthread_cond_broadcast(
+ &st->st_load_cv);
+ MUTEX_UNLOCK(&st->st_load_lock);
+
+ goto nolookup;
+ }
+
+ /*
+ * Lookup the instance, locking only the event queue.
+ * Can't grab ri_lock here because it might be held
+ * by a long-running method.
+ */
+ rip = inst_lookup_queue(e->rpe_inst);
+ if (rip == NULL) {
+ log_error(LOG_INFO, "Restarter: "
+ "Ignoring %s command for unknown service "
+ "%s.\n", event_names[e->rpe_type],
+ e->rpe_inst);
+ goto nolookup;
+ }
+
+ /* Keep ADMIN events from filling up the queue. */
+ if (is_admin_event(e->rpe_type) &&
+ uu_list_numnodes(rip->ri_queue) >
+ RINST_QUEUE_THRESHOLD) {
+ MUTEX_UNLOCK(&rip->ri_queue_lock);
+ log_instance(rip, B_TRUE, "Instance event "
+ "queue overflow. Dropping administrative "
+ "request.");
+ log_framework(LOG_DEBUG, "%s: Instance event "
+ "queue overflow. Dropping administrative "
+ "request.\n", rip->ri_i.i_fmri);
+ goto nolookup;
+ }
+
+ /* Now add the event to the instance queue. */
+ restarter_queue_event(rip, e);
+
+ if (rip->ri_queue_thread == 0) {
+ /*
+ * Start a thread if one isn't already
+ * running.
+ */
+ fmri = safe_strdup(e->rpe_inst);
+ rip->ri_queue_thread = startd_thread_create(
+ restarter_process_events, (void *)fmri);
+ } else {
+ /*
+ * Signal the existing thread that there's
+ * a new event.
+ */
+ (void) pthread_cond_broadcast(
+ &rip->ri_queue_cv);
+ }
+
+ MUTEX_UNLOCK(&rip->ri_queue_lock);
+nolookup:
+ restarter_event_release(e);
+
+ MUTEX_LOCK(&ru->restarter_update_lock);
+ }
+ }
+
+ /*
+ * Unreachable for now -- there's currently no graceful cleanup
+ * called on exit().
+ */
+ (void) scf_handle_unbind(h);
+ scf_handle_destroy(h);
+ return (NULL);
+}
+
+static restarter_inst_t *
+contract_to_inst(ctid_t ctid)
+{
+ restarter_inst_t *inst;
+ int id;
+
+ id = lookup_inst_by_contract(ctid);
+ if (id == -1)
+ return (NULL);
+
+ inst = inst_lookup_by_id(id);
+ if (inst != NULL) {
+ /*
+ * Since ri_lock isn't held by the contract id lookup, this
+ * instance may have been restarted and now be in a new
+ * contract, making the old contract no longer valid for this
+ * instance.
+ */
+ if (ctid != inst->ri_i.i_primary_ctid) {
+ MUTEX_UNLOCK(&inst->ri_lock);
+ inst = NULL;
+ }
+ }
+ return (inst);
+}
+
+/*
+ * void contract_action()
+ * Take action on contract events.
+ */
+static void
+contract_action(scf_handle_t *h, restarter_inst_t *inst, ctid_t id,
+ uint32_t type)
+{
+ const char *fmri = inst->ri_i.i_fmri;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+
+ /*
+ * If startd has stopped this contract, there is no need to
+ * stop it again.
+ */
+ if (inst->ri_i.i_primary_ctid > 0 &&
+ inst->ri_i.i_primary_ctid_stopped)
+ return;
+
+ if ((type & (CT_PR_EV_EMPTY | CT_PR_EV_CORE | CT_PR_EV_SIGNAL
+ | CT_PR_EV_HWERR)) == 0) {
+ /*
+ * There shouldn't be other events, since that's not how we set
+ * the terms. Thus, just log an error and drive on.
+ */
+ log_framework(LOG_NOTICE,
+ "%s: contract %ld received unexpected critical event "
+ "(%d)\n", fmri, id, type);
+ return;
+ }
+
+ assert(instance_in_transition(inst) == 0);
+
+ if (instance_is_wait_style(inst)) {
+ /*
+ * We ignore all events; if they impact the
+ * process we're monitoring, then the
+ * wait_thread will stop the instance.
+ */
+ log_framework(LOG_DEBUG,
+ "%s: ignoring contract event on wait-style service\n",
+ fmri);
+ } else {
+ /*
+ * A CT_PR_EV_EMPTY event is an RSTOP_EXIT request.
+ */
+ switch (type) {
+ case CT_PR_EV_EMPTY:
+ (void) stop_instance(h, inst, RSTOP_EXIT);
+ break;
+ case CT_PR_EV_CORE:
+ (void) stop_instance(h, inst, RSTOP_CORE);
+ break;
+ case CT_PR_EV_SIGNAL:
+ (void) stop_instance(h, inst, RSTOP_SIGNAL);
+ break;
+ case CT_PR_EV_HWERR:
+ (void) stop_instance(h, inst, RSTOP_HWERR);
+ break;
+ }
+ }
+}
+
+/*
+ * void *restarter_contract_event_thread(void *)
+ * Listens to the process contract bundle for critical events, taking action
+ * on events from contracts we know we are responsible for.
+ */
+/*ARGSUSED*/
+static void *
+restarter_contracts_event_thread(void *unused)
+{
+ int fd, err;
+ scf_handle_t *local_handle;
+
+ /*
+ * Await graph load completion. That is, stop here, until we've scanned
+ * the repository for contract - instance associations.
+ */
+ MUTEX_LOCK(&st->st_load_lock);
+ while (!(st->st_load_complete && st->st_load_instances == 0))
+ (void) pthread_cond_wait(&st->st_load_cv, &st->st_load_lock);
+ MUTEX_UNLOCK(&st->st_load_lock);
+
+ /*
+ * This is a new thread, and thus, gets its own handle
+ * to the repository.
+ */
+ if ((local_handle = libscf_handle_create_bound(SCF_VERSION)) == NULL)
+ uu_die("Unable to bind a new repository handle: %s\n",
+ scf_strerror(scf_error()));
+
+ fd = open64(CTFS_ROOT "/process/pbundle", O_RDONLY);
+ if (fd == -1)
+ uu_die("process bundle open failed");
+
+ /*
+ * Make sure we get all events (including those generated by configd
+ * before this thread was started).
+ */
+ err = ct_event_reset(fd);
+ assert(err == 0);
+
+ for (;;) {
+ int efd, sfd;
+ ct_evthdl_t ev;
+ uint32_t type;
+ ctevid_t evid;
+ ct_stathdl_t status;
+ ctid_t ctid;
+ restarter_inst_t *inst;
+ uint64_t cookie;
+
+ if (err = ct_event_read_critical(fd, &ev)) {
+ log_error(LOG_WARNING,
+ "Error reading next contract event: %s",
+ strerror(err));
+ continue;
+ }
+
+ evid = ct_event_get_evid(ev);
+ ctid = ct_event_get_ctid(ev);
+ type = ct_event_get_type(ev);
+
+ /* Fetch cookie. */
+ if ((sfd = contract_open(ctid, "process", "status", O_RDONLY))
+ < 0) {
+ ct_event_free(ev);
+ continue;
+ }
+
+ if (err = ct_status_read(sfd, CTD_COMMON, &status)) {
+ log_framework(LOG_WARNING, "Could not get status for "
+ "contract %ld: %s\n", ctid, strerror(err));
+
+ startd_close(sfd);
+ ct_event_free(ev);
+ continue;
+ }
+
+ cookie = ct_status_get_cookie(status);
+
+ ct_status_free(status);
+
+ startd_close(sfd);
+
+ /*
+ * svc.configd(1M) restart handling performed by the
+ * fork_configd_thread. We don't acknowledge, as that thread
+ * will do so.
+ */
+ if (cookie == CONFIGD_COOKIE) {
+ ct_event_free(ev);
+ continue;
+ }
+
+ inst = contract_to_inst(ctid);
+ if (inst == NULL) {
+ /*
+ * This can happen if we receive an EMPTY
+ * event for an abandoned contract.
+ */
+ log_framework(LOG_DEBUG,
+ "Received event %d for unknown contract id "
+ "%ld\n", type, ctid);
+ } else {
+ log_framework(LOG_DEBUG,
+ "Received event %d for contract id "
+ "%ld (%s)\n", type, ctid,
+ inst->ri_i.i_fmri);
+
+ contract_action(local_handle, inst, ctid, type);
+
+ MUTEX_UNLOCK(&inst->ri_lock);
+ }
+
+ efd = contract_open(ct_event_get_ctid(ev), "process", "ctl",
+ O_WRONLY);
+ if (efd != -1) {
+ (void) ct_ctl_ack(efd, evid);
+ startd_close(efd);
+ }
+
+ ct_event_free(ev);
+
+ }
+
+ /*NOTREACHED*/
+ return (NULL);
+}
+
+/*
+ * Timeout queue, processed by restarter_timeouts_event_thread().
+ */
+timeout_queue_t *timeouts;
+static uu_list_pool_t *timeout_pool;
+
+typedef struct timeout_update {
+ pthread_mutex_t tu_lock;
+ pthread_cond_t tu_cv;
+ int tu_wakeup;
+} timeout_update_t;
+
+timeout_update_t *tu;
+
+static const char *timeout_ovr_svcs[] = {
+ "svc:/system/manifest-import:default",
+ "svc:/network/initial:default",
+ "svc:/network/service:default",
+ "svc:/system/rmtmpfiles:default",
+ "svc:/network/loopback:default",
+ "svc:/network/physical:default",
+ "svc:/system/device/local:default",
+ "svc:/system/metainit:default",
+ "svc:/system/filesystem/usr:default",
+ "svc:/system/filesystem/minimal:default",
+ "svc:/system/filesystem/local:default",
+ NULL
+};
+
+int
+is_timeout_ovr(restarter_inst_t *inst)
+{
+ int i;
+
+ for (i = 0; timeout_ovr_svcs[i] != NULL; ++i) {
+ if (strcmp(inst->ri_i.i_fmri, timeout_ovr_svcs[i]) == 0) {
+ log_instance(inst, B_TRUE, "Timeout override by "
+ "svc.startd. Using infinite timeout");
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+timeout_compare(const void *lc_arg, const void *rc_arg, void *private)
+{
+ hrtime_t t1 = ((const timeout_entry_t *)lc_arg)->te_timeout;
+ hrtime_t t2 = ((const timeout_entry_t *)rc_arg)->te_timeout;
+
+ if (t1 > t2)
+ return (1);
+ else if (t1 < t2)
+ return (-1);
+ return (0);
+}
+
+void
+timeout_init()
+{
+ timeouts = startd_zalloc(sizeof (timeout_queue_t));
+
+ (void) pthread_mutex_init(&timeouts->tq_lock, &mutex_attrs);
+
+ timeout_pool = startd_list_pool_create("timeouts",
+ sizeof (timeout_entry_t), offsetof(timeout_entry_t, te_link),
+ timeout_compare, UU_LIST_POOL_DEBUG);
+ assert(timeout_pool != NULL);
+
+ timeouts->tq_list = startd_list_create(timeout_pool,
+ timeouts, UU_LIST_SORTED);
+ assert(timeouts->tq_list != NULL);
+
+ tu = startd_zalloc(sizeof (timeout_update_t));
+ (void) pthread_cond_init(&tu->tu_cv, NULL);
+ (void) pthread_mutex_init(&tu->tu_lock, &mutex_attrs);
+}
+
+void
+timeout_insert(restarter_inst_t *inst, ctid_t cid, uint64_t timeout_sec)
+{
+ hrtime_t now, timeout;
+ timeout_entry_t *entry;
+ uu_list_index_t idx;
+
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+
+ now = gethrtime();
+
+ /*
+ * If we overflow LLONG_MAX, we're never timing out anyways, so
+ * just return.
+ */
+ if (timeout_sec >= (LLONG_MAX - now) / 1000000000LL) {
+ log_instance(inst, B_TRUE, "timeout_seconds too large, "
+ "treating as infinite.");
+ return;
+ }
+
+ /* hrtime is in nanoseconds. Convert timeout_sec. */
+ timeout = now + (timeout_sec * 1000000000LL);
+
+ entry = startd_alloc(sizeof (timeout_entry_t));
+ entry->te_timeout = timeout;
+ entry->te_ctid = cid;
+ entry->te_fmri = safe_strdup(inst->ri_i.i_fmri);
+ entry->te_logstem = safe_strdup(inst->ri_logstem);
+ entry->te_fired = 0;
+ /* Insert the calculated timeout time onto the queue. */
+ MUTEX_LOCK(&timeouts->tq_lock);
+ (void) uu_list_find(timeouts->tq_list, entry, NULL, &idx);
+ uu_list_node_init(entry, &entry->te_link, timeout_pool);
+ uu_list_insert(timeouts->tq_list, entry, idx);
+ MUTEX_UNLOCK(&timeouts->tq_lock);
+
+ assert(inst->ri_timeout == NULL);
+ inst->ri_timeout = entry;
+
+ MUTEX_LOCK(&tu->tu_lock);
+ tu->tu_wakeup = 1;
+ (void) pthread_cond_broadcast(&tu->tu_cv);
+ MUTEX_UNLOCK(&tu->tu_lock);
+}
+
+
+void
+timeout_remove(restarter_inst_t *inst, ctid_t cid)
+{
+ assert(PTHREAD_MUTEX_HELD(&inst->ri_lock));
+
+ if (inst->ri_timeout == NULL)
+ return;
+
+ assert(inst->ri_timeout->te_ctid == cid);
+
+ MUTEX_LOCK(&timeouts->tq_lock);
+ uu_list_remove(timeouts->tq_list, inst->ri_timeout);
+ MUTEX_UNLOCK(&timeouts->tq_lock);
+
+ free(inst->ri_timeout->te_fmri);
+ free(inst->ri_timeout->te_logstem);
+ startd_free(inst->ri_timeout, sizeof (timeout_entry_t));
+ inst->ri_timeout = NULL;
+}
+
+static int
+timeout_now()
+{
+ timeout_entry_t *e;
+ hrtime_t now;
+ int ret;
+
+ now = gethrtime();
+
+ /*
+ * Walk through the (sorted) timeouts list. While the timeout
+ * at the head of the list is <= the current time, kill the
+ * method.
+ */
+ MUTEX_LOCK(&timeouts->tq_lock);
+
+ for (e = uu_list_first(timeouts->tq_list);
+ e != NULL && e->te_timeout <= now;
+ e = uu_list_next(timeouts->tq_list, e)) {
+ log_framework(LOG_WARNING, "%s: Method or service exit timed "
+ "out. Killing contract %ld.\n", e->te_fmri, e->te_ctid);
+ log_instance_fmri(e->te_fmri, e->te_logstem, B_TRUE,
+ "Method or service exit timed out. Killing contract %ld",
+ e->te_ctid);
+ e->te_fired = 1;
+ (void) contract_kill(e->te_ctid, SIGKILL, e->te_fmri);
+ }
+
+ if (uu_list_numnodes(timeouts->tq_list) > 0)
+ ret = 0;
+ else
+ ret = -1;
+
+ MUTEX_UNLOCK(&timeouts->tq_lock);
+
+ return (ret);
+}
+
+/*
+ * void *restarter_timeouts_event_thread(void *)
+ * Responsible for monitoring the method timeouts. This thread must
+ * be started before any methods are called.
+ */
+/*ARGSUSED*/
+static void *
+restarter_timeouts_event_thread(void *unused)
+{
+ /*
+ * Timeouts are entered on a priority queue, which is processed by
+ * this thread. As timeouts are specified in seconds, we'll do
+ * the necessary processing every second, as long as the queue
+ * is not empty.
+ */
+
+ /*CONSTCOND*/
+ while (1) {
+ /*
+ * As long as the timeout list isn't empty, process it
+ * every second.
+ */
+ if (timeout_now() == 0) {
+ (void) sleep(1);
+ continue;
+ }
+
+ /* The list is empty, wait until we have more timeouts. */
+ MUTEX_LOCK(&tu->tu_lock);
+
+ while (tu->tu_wakeup == 0)
+ (void) pthread_cond_wait(&tu->tu_cv, &tu->tu_lock);
+
+ tu->tu_wakeup = 0;
+ MUTEX_UNLOCK(&tu->tu_lock);
+ }
+
+ return (NULL);
+}
+
+void
+restarter_start()
+{
+ (void) startd_thread_create(restarter_timeouts_event_thread, NULL);
+ (void) startd_thread_create(restarter_event_thread, NULL);
+ (void) startd_thread_create(restarter_contracts_event_thread, NULL);
+ (void) startd_thread_create(wait_thread, NULL);
+}
+
+
+void
+restarter_init()
+{
+ restarter_instance_pool = startd_list_pool_create("restarter_instances",
+ sizeof (restarter_inst_t), offsetof(restarter_inst_t,
+ ri_link), restarter_instance_compare, UU_LIST_POOL_DEBUG);
+ (void) memset(&instance_list, 0, sizeof (instance_list));
+
+ (void) pthread_mutex_init(&instance_list.ril_lock, &mutex_attrs);
+ instance_list.ril_instance_list = startd_list_create(
+ restarter_instance_pool, &instance_list, UU_LIST_SORTED);
+
+ restarter_queue_pool = startd_list_pool_create(
+ "restarter_instance_queue", sizeof (restarter_instance_qentry_t),
+ offsetof(restarter_instance_qentry_t, riq_link), NULL,
+ UU_LIST_POOL_DEBUG);
+
+ contract_list_pool = startd_list_pool_create(
+ "contract_list", sizeof (contract_entry_t),
+ offsetof(contract_entry_t, ce_link), NULL,
+ UU_LIST_POOL_DEBUG);
+ contract_hash_init();
+
+ log_framework(LOG_DEBUG, "Initialized restarter\n");
+}
diff --git a/usr/src/cmd/svc/startd/specials.c b/usr/src/cmd/svc/startd/specials.c
new file mode 100644
index 0000000000..7bbc42ed3f
--- /dev/null
+++ b/usr/src/cmd/svc/startd/specials.c
@@ -0,0 +1,250 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * specials.c - knowledge of special services
+ *
+ * svc.startd(1M) has duties that cannot be carried out without knowledge of the
+ * transition of various services, such as the milestones, to their online
+ * states. Hooks are called with the restarter instance's ri_lock held, so
+ * operations on all instances (or on the graph) should be performed
+ * asynchronously.
+ */
+
+#include <sys/statvfs.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <errno.h>
+#include <libintl.h>
+#include <limits.h>
+#include <locale.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+#include <time.h>
+#include <zone.h>
+
+#include "startd.h"
+
+void
+special_null_transition()
+{
+}
+
+static void
+special_fsroot_post_online()
+{
+ static int once;
+ char *locale;
+
+ /*
+ * /usr, with timezone and locale data, is now available.
+ */
+ if (!st->st_log_timezone_known) {
+ tzset();
+ st->st_log_timezone_known = 1;
+ }
+
+ if (!st->st_log_locale_known) {
+ if (st->st_locale)
+ locale = st->st_locale;
+
+ (void) setlocale(LC_ALL, "");
+ st->st_locale = setlocale(LC_MESSAGES, "");
+ if (st->st_locale) {
+ st->st_locale = safe_strdup(st->st_locale);
+ xstr_sanitize(st->st_locale);
+ free(locale);
+ } else {
+ st->st_locale = locale;
+ }
+
+ (void) textdomain(TEXT_DOMAIN);
+ st->st_log_locale_known = 1;
+ }
+
+ if (once)
+ return;
+
+ /*
+ * ctime(3C) ends with '\n\0'.
+ */
+ once++;
+ log_framework(LOG_INFO, "system start time was %s",
+ ctime(&st->st_start_time.tv_sec));
+}
+
+static void
+special_fsminimal_post_online()
+{
+ ulong_t rfsid, vfsid;
+ pid_t init_pid;
+
+ log_framework(LOG_DEBUG, "special_fsminimal_post_online hook "
+ "executed\n");
+
+ /*
+ * Are / and /var really writeable?
+ */
+ switch (fs_is_read_only("/", &rfsid)) {
+ case 1:
+ return; /* still read-only: install / ro root */
+ case 0:
+ break;
+ case -1:
+ default:
+ log_error(LOG_WARNING, gettext("couldn't check status of "
+ "root filesystem: %s\n"), strerror(errno));
+ break;
+ }
+
+ switch (fs_is_read_only("/var", &vfsid)) {
+ case 1:
+ if (vfsid != rfsid) {
+ log_framework(LOG_WARNING, "/var filesystem "
+ "read-only after system/filesystem/minimal\n");
+ if (fs_remount("/var"))
+ log_framework(LOG_WARNING, "/var "
+ "filesystem remount failed\n");
+ }
+ break;
+ case 0:
+ break;
+ case -1:
+ default:
+ log_error(LOG_WARNING, gettext("couldn't check status of "
+ "/var filesystem: %s\n"), strerror(errno));
+ break;
+ }
+
+ /*
+ * Clear (dead) entries and record boot time.
+ */
+ utmpx_clear_old();
+ utmpx_write_boottime();
+
+ /*
+ * Reinitialize the logs to point to LOG_PREFIX_NORMAL.
+ */
+ log_init();
+
+ /*
+ * Poke init so it will create /etc/initpipe.
+ */
+ if (zone_getattr(getzoneid(), ZONE_ATTR_INITPID, &init_pid,
+ sizeof (init_pid)) != sizeof (init_pid)) {
+ log_error(LOG_WARNING, "Could not get pid of init: %s.\n",
+ strerror(errno));
+ } else {
+ if (kill(init_pid, SIGHUP) != 0) {
+ switch (errno) {
+ case EPERM:
+ case ESRCH:
+ log_error(LOG_WARNING,
+ "Could not signal init: %s.\n",
+ strerror(errno));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("kill", errno);
+ }
+ }
+ }
+
+ /*
+ * Take pending snapshots and create a svc.startd instance.
+ */
+ (void) startd_thread_create(restarter_post_fsminimal_thread, NULL);
+}
+
+static void
+special_single_post_online()
+{
+ int r;
+
+ log_framework(LOG_DEBUG, "special_single_post_online hook executed\n");
+
+ /*
+ * Un-set the special reconfig reboot property.
+ */
+ r = libscf_set_reconfig(0);
+ switch (r) {
+ case 0:
+ case ENOENT:
+ break;
+
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_WARNING, "Could not clear reconfiguration "
+ "property: %s.\n", strerror(r));
+ break;
+
+ default:
+ bad_error("libscf_set_reconfig", r);
+ }
+
+ if (booting_to_single_user)
+ (void) startd_thread_create(single_user_thread, NULL);
+}
+
+static service_hook_assn_t special_svcs[] = {
+ { "svc:/system/filesystem/root:default",
+ special_null_transition,
+ special_fsroot_post_online,
+ special_null_transition },
+ { "svc:/system/filesystem/minimal:default",
+ special_null_transition,
+ special_fsminimal_post_online,
+ special_null_transition },
+ { "svc:/milestone/single-user:default",
+ special_null_transition,
+ special_single_post_online,
+ special_null_transition },
+};
+
+void
+special_online_hooks_get(const char *fmri, instance_hook_t *pre_onp,
+ instance_hook_t *post_onp, instance_hook_t *post_offp)
+{
+ int i;
+
+ for (i = 0; i < sizeof (special_svcs) / sizeof (service_hook_assn_t);
+ i++)
+ if (strcmp(fmri, special_svcs[i].sh_fmri) == 0) {
+ *pre_onp = special_svcs[i].sh_pre_online_hook;
+ *post_onp = special_svcs[i].sh_post_online_hook;
+ *post_offp = special_svcs[i].sh_post_online_hook;
+ return;
+ }
+
+ *pre_onp = *post_onp = *post_offp = special_null_transition;
+}
diff --git a/usr/src/cmd/svc/startd/startd.c b/usr/src/cmd/svc/startd/startd.c
new file mode 100644
index 0000000000..863243bbf7
--- /dev/null
+++ b/usr/src/cmd/svc/startd/startd.c
@@ -0,0 +1,925 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * startd.c - the master restarter
+ *
+ * svc.startd comprises two halves. The graph engine is based in graph.c and
+ * maintains the service dependency graph based on the information in the
+ * repository. For each service it also tracks the current state and the
+ * restarter responsible for the service. Based on the graph, events from the
+ * repository (mostly administrative requests from svcadm), and messages from
+ * the restarters, the graph engine makes decisions about how the services
+ * should be manipulated and sends commands to the appropriate restarters.
+ * Communication between the graph engine and the restarters is embodied in
+ * protocol.c.
+ *
+ * The second half of svc.startd is the restarter for services managed by
+ * svc.startd and is primarily contained in restarter.c. It responds to graph
+ * engine commands by executing methods, updating the repository, and sending
+ * feedback (mostly state updates) to the graph engine.
+ *
+ * Error handling
+ *
+ * In general, when svc.startd runs out of memory it reattempts a few times,
+ * sleeping inbetween, before giving up and exiting (see startd_alloc_retry()).
+ * When a repository connection is broken (libscf calls fail with
+ * SCF_ERROR_CONNECTION_BROKEN, librestart and internal functions return
+ * ECONNABORTED), svc.startd calls libscf_rebind_handle(), which coordinates
+ * with the svc.configd-restarting thread, fork_configd_thread(), via
+ * st->st_configd_live_cv, and rebinds the repository handle. Doing so resets
+ * all libscf state associated with that handle, so functions which do this
+ * should communicate the event to their callers (usually by returning
+ * ECONNRESET) so they may reset their state appropriately.
+ */
+
+#include <stdio.h>
+#include <sys/mnttab.h> /* uses FILE * without including stdio.h */
+#include <alloca.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <ftw.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <poll.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include "startd.h"
+#include "protocol.h"
+
+ssize_t max_scf_name_size;
+ssize_t max_scf_fmri_size;
+ssize_t max_scf_value_size;
+
+mode_t fmask;
+mode_t dmask;
+
+graph_update_t *gu;
+restarter_update_t *ru;
+
+startd_state_t *st;
+
+boolean_t booting_to_single_user = B_FALSE;
+
+const char * const admin_actions[] = {
+ SCF_PROPERTY_DEGRADED,
+ SCF_PROPERTY_MAINT_OFF,
+ SCF_PROPERTY_MAINT_ON,
+ SCF_PROPERTY_MAINT_ON_IMMEDIATE,
+ SCF_PROPERTY_REFRESH,
+ SCF_PROPERTY_RESTART
+};
+
+const int admin_events[NACTIONS] = {
+ RESTARTER_EVENT_TYPE_ADMIN_DEGRADED,
+ RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF,
+ RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON,
+ RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE,
+ RESTARTER_EVENT_TYPE_ADMIN_REFRESH,
+ RESTARTER_EVENT_TYPE_ADMIN_RESTART
+};
+
+const char * const instance_state_str[] = {
+ "none",
+ "uninitialized",
+ "maintenance",
+ "offline",
+ "disabled",
+ "online",
+ "degraded"
+};
+
+static int finished = 0;
+static int opt_reconfig = 0;
+static uint8_t prop_reconfig = 0;
+
+#define INITIAL_REBIND_ATTEMPTS 5
+#define INITIAL_REBIND_DELAY 3
+
+pthread_mutexattr_t mutex_attrs;
+
+const char *
+_umem_debug_init(void)
+{
+ return ("default,verbose"); /* UMEM_DEBUG setting */
+}
+
+const char *
+_umem_logging_init(void)
+{
+ return ("fail,contents"); /* UMEM_LOGGING setting */
+}
+
+/*
+ * startd_alloc_retry()
+ * Wrapper for allocation functions. Retries with a decaying time
+ * value on failure to allocate, and aborts startd if failure is
+ * persistent.
+ */
+void *
+startd_alloc_retry(void *f(size_t, int), size_t sz)
+{
+ void *p;
+ uint_t try, msecs;
+
+ p = f(sz, UMEM_DEFAULT);
+ if (p != NULL || sz == 0)
+ return (p);
+
+ msecs = ALLOC_DELAY;
+
+ for (try = 0; p == NULL && try < ALLOC_RETRY; ++try) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ p = f(sz, UMEM_DEFAULT);
+ if (p != NULL)
+ return (p);
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+}
+
+void *
+safe_realloc(void *p, size_t sz)
+{
+ uint_t try, msecs;
+
+ p = realloc(p, sz);
+ if (p != NULL || sz == 0)
+ return (p);
+
+ msecs = ALLOC_DELAY;
+
+ for (try = 0; errno == EAGAIN && try < ALLOC_RETRY; ++try) {
+ (void) poll(NULL, 0, msecs);
+ p = realloc(p, sz);
+ if (p != NULL)
+ return (p);
+ msecs *= ALLOC_DELAY_MULT;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+}
+
+char *
+safe_strdup(const char *s)
+{
+ uint_t try, msecs;
+ char *d;
+
+ d = strdup(s);
+ if (d != NULL)
+ return (d);
+
+ msecs = ALLOC_DELAY;
+
+ for (try = 0;
+ (errno == EAGAIN || errno == ENOMEM) && try < ALLOC_RETRY;
+ ++try) {
+ (void) poll(NULL, 0, msecs);
+ d = strdup(s);
+ if (d != NULL)
+ return (d);
+ msecs *= ALLOC_DELAY_MULT;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+}
+
+
+void
+startd_free(void *p, size_t sz)
+{
+ umem_free(p, sz);
+}
+
+/*
+ * Creates a uu_list_pool_t with the same retry policy as startd_alloc().
+ * Only returns NULL for UU_ERROR_UNKNOWN_FLAG and UU_ERROR_NOT_SUPPORTED.
+ */
+uu_list_pool_t *
+startd_list_pool_create(const char *name, size_t e, size_t o,
+ uu_compare_fn_t *f, uint32_t flags)
+{
+ uu_list_pool_t *pool;
+ uint_t try, msecs;
+
+ pool = uu_list_pool_create(name, e, o, f, flags);
+ if (pool != NULL)
+ return (pool);
+
+ msecs = ALLOC_DELAY;
+
+ for (try = 0; uu_error() == UU_ERROR_NO_MEMORY && try < ALLOC_RETRY;
+ ++try) {
+ (void) poll(NULL, 0, msecs);
+ pool = uu_list_pool_create(name, e, o, f, flags);
+ if (pool != NULL)
+ return (pool);
+ msecs *= ALLOC_DELAY_MULT;
+ }
+
+ if (try < ALLOC_RETRY)
+ return (NULL);
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+}
+
+/*
+ * Creates a uu_list_t with the same retry policy as startd_alloc(). Only
+ * returns NULL for UU_ERROR_UNKNOWN_FLAG and UU_ERROR_NOT_SUPPORTED.
+ */
+uu_list_t *
+startd_list_create(uu_list_pool_t *pool, void *parent, uint32_t flags)
+{
+ uu_list_t *list;
+ uint_t try, msecs;
+
+ list = uu_list_create(pool, parent, flags);
+ if (list != NULL)
+ return (list);
+
+ msecs = ALLOC_DELAY;
+
+ for (try = 0; uu_error() == UU_ERROR_NO_MEMORY && try < ALLOC_RETRY;
+ ++try) {
+ (void) poll(NULL, 0, msecs);
+ list = uu_list_create(pool, parent, flags);
+ if (list != NULL)
+ return (list);
+ msecs *= ALLOC_DELAY_MULT;
+ }
+
+ if (try < ALLOC_RETRY)
+ return (NULL);
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+}
+
+pthread_t
+startd_thread_create(void *(*func)(void *), void *ptr)
+{
+ int err;
+ pthread_t tid;
+
+ err = pthread_create(&tid, NULL, func, ptr);
+ if (err != 0) {
+ assert(err == EAGAIN);
+ uu_die("Could not create thread.\n");
+ }
+
+ err = pthread_detach(tid);
+ assert(err == 0);
+
+ return (tid);
+}
+
+
+static int
+read_startd_config(int log_args)
+{
+ scf_handle_t *hndl;
+ scf_instance_t *inst;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ scf_iter_t *iter, *piter;
+ instance_data_t idata;
+ char *buf, *vbuf;
+ char *startd_options_fmri = uu_msprintf("%s/:properties/options",
+ SCF_SERVICE_STARTD);
+ char *startd_reconfigure_fmri = uu_msprintf(
+ "%s/:properties/system/reconfigure", SCF_SERVICE_STARTD);
+ char *env_opts, *lasts, *cp;
+ int bind_fails = 0;
+ int ret = 0, r;
+ uint_t count = 0, msecs = ALLOC_DELAY;
+ size_t sz;
+ ctid_t ctid;
+ uint64_t uint64;
+
+ buf = startd_alloc(max_scf_fmri_size);
+
+ if (startd_options_fmri == NULL || startd_reconfigure_fmri == NULL)
+ uu_die("Allocation failure\n");
+
+ st->st_log_prefix = LOG_PREFIX_EARLY;
+
+ if ((st->st_log_file = getenv("STARTD_DEFAULT_LOG")) == NULL) {
+ st->st_log_file = startd_alloc(strlen(STARTD_DEFAULT_LOG) + 1);
+
+ (void) strcpy(st->st_log_file, STARTD_DEFAULT_LOG);
+ }
+
+ st->st_door_path = getenv("STARTD_ALT_DOOR");
+
+ /*
+ * Read "options" property group.
+ */
+ for (hndl = libscf_handle_create_bound(SCF_VERSION); hndl == NULL;
+ hndl = libscf_handle_create_bound(SCF_VERSION), bind_fails++) {
+ (void) sleep(INITIAL_REBIND_DELAY);
+
+ if (bind_fails > INITIAL_REBIND_ATTEMPTS) {
+ /*
+ * In the case that we can't bind to the repository
+ * (which should have been started), we need to allow
+ * the user into maintenance mode to determine what's
+ * failed.
+ */
+ log_framework(LOG_INFO, "Couldn't fetch "
+ "default settings: %s\n",
+ scf_strerror(scf_error()));
+
+ ret = -1;
+
+ goto noscfout;
+ }
+ }
+
+ idata.i_fmri = SCF_SERVICE_STARTD;
+ idata.i_state = RESTARTER_STATE_NONE;
+ idata.i_next_state = RESTARTER_STATE_NONE;
+timestamp:
+ switch (r = _restarter_commit_states(hndl, &idata,
+ RESTARTER_STATE_ONLINE, RESTARTER_STATE_NONE, NULL)) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ ++count;
+ if (count < ALLOC_RETRY) {
+ (void) poll(NULL, 0, msecs);
+ msecs *= ALLOC_DELAY_MULT;
+ goto timestamp;
+ }
+
+ uu_die("Insufficient memory.\n");
+ /* NOTREACHED */
+
+ case ECONNABORTED:
+ libscf_handle_rebind(hndl);
+ goto timestamp;
+
+ case ENOENT:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_INFO, "Could set state of %s: %s.\n",
+ idata.i_fmri, strerror(r));
+ break;
+
+ case EINVAL:
+ default:
+ bad_error("_restarter_commit_states", r);
+ }
+
+ pg = safe_scf_pg_create(hndl);
+ prop = safe_scf_property_create(hndl);
+ val = safe_scf_value_create(hndl);
+ inst = safe_scf_instance_create(hndl);
+
+ /* set startd's restarter properties */
+ if (scf_handle_decode_fmri(hndl, SCF_SERVICE_STARTD, NULL, NULL, inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT) == 0) {
+ (void) libscf_write_start_pid(inst, getpid());
+ ctid = proc_get_ctid();
+ if (ctid != -1) {
+ uint64 = (uint64_t)ctid;
+ (void) libscf_inst_set_count_prop(inst,
+ SCF_PG_RESTARTER, SCF_PG_RESTARTER_TYPE,
+ SCF_PG_RESTARTER_FLAGS, SCF_PROPERTY_CONTRACT,
+ uint64);
+ }
+ (void) libscf_note_method_log(inst, LOG_PREFIX_EARLY,
+ STARTD_DEFAULT_LOG);
+ (void) libscf_note_method_log(inst, LOG_PREFIX_NORMAL,
+ STARTD_DEFAULT_LOG);
+ }
+
+ /* Read reconfigure property for recovery. */
+ if (scf_handle_decode_fmri(hndl, startd_reconfigure_fmri, NULL, NULL,
+ NULL, NULL, prop, NULL) != -1 &&
+ scf_property_get_value(prop, val) == 0)
+ (void) scf_value_get_boolean(val, &prop_reconfig);
+
+ if (scf_handle_decode_fmri(hndl, startd_options_fmri, NULL, NULL, NULL,
+ pg, NULL, SCF_DECODE_FMRI_TRUNCATE) == -1) {
+ /*
+ * No configuration options defined.
+ */
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ uu_warn("Couldn't read configuration from 'options' "
+ "group: %s\n", scf_strerror(scf_error()));
+ goto scfout;
+ }
+
+ /*
+ * If there is no "options" group defined, then our defaults are fine.
+ */
+ if (scf_pg_get_name(pg, NULL, 0) < 0)
+ goto scfout;
+
+ /* Iterate through. */
+ iter = safe_scf_iter_create(hndl);
+
+ (void) scf_iter_pg_properties(iter, pg);
+
+ piter = safe_scf_iter_create(hndl);
+ vbuf = startd_alloc(max_scf_value_size);
+
+ while ((scf_iter_next_property(iter, prop) == 1)) {
+ scf_type_t ty;
+
+ if (scf_property_get_name(prop, buf, max_scf_fmri_size) < 0)
+ continue;
+
+ if (strcmp(buf, "logging") != 0 &&
+ strcmp(buf, "boot_messages") != 0)
+ continue;
+
+ if (scf_property_type(prop, &ty) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ libscf_handle_rebind(hndl);
+ continue;
+
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_type", scf_error());
+ }
+ }
+
+ if (ty != SCF_TYPE_ASTRING) {
+ uu_warn("property \"options/%s\" is not of type "
+ "astring; ignored.\n", buf);
+ continue;
+ }
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ return (0);
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ uu_warn("property \"options/%s\" has multiple "
+ "values; ignored.\n", buf);
+ continue;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ bad_error("scf_property_get_value",
+ scf_error());
+ }
+ }
+
+ if (scf_value_get_astring(val, vbuf, max_scf_value_size) < 0)
+ bad_error("scf_value_get_astring", scf_error());
+
+ if (!log_args && strcmp("logging", buf) == 0) {
+ if (strcmp("verbose", vbuf) == 0) {
+ st->st_boot_flags = STARTD_BOOT_VERBOSE;
+ st->st_log_flags = STARTD_LOG_VERBOSE;
+ st->st_log_level_min = LOG_INFO;
+ } else if (strcmp("debug", vbuf) == 0) {
+ st->st_boot_flags = STARTD_BOOT_VERBOSE;
+ st->st_log_flags = STARTD_LOG_DEBUG;
+ st->st_log_level_min = LOG_DEBUG;
+ } else if (strcmp("quiet", vbuf) == 0) {
+ st->st_log_flags = STARTD_LOG_QUIET;
+ st->st_log_level_min = LOG_NOTICE;
+ } else {
+ uu_warn("unknown options/logging "
+ "value '%s' ignored\n", vbuf);
+ }
+
+ } else if (strcmp("boot_messages", buf) == 0) {
+ if (strcmp("quiet", vbuf) == 0) {
+ st->st_boot_flags = STARTD_BOOT_QUIET;
+ } else if (strcmp("verbose", vbuf) == 0) {
+ st->st_boot_flags = STARTD_BOOT_VERBOSE;
+ } else {
+ log_framework(LOG_NOTICE, "unknown "
+ "options/boot_messages value '%s' "
+ "ignored\n", vbuf);
+ }
+
+ }
+ }
+
+ startd_free(vbuf, max_scf_value_size);
+ scf_iter_destroy(piter);
+
+ scf_iter_destroy(iter);
+
+scfout:
+ scf_value_destroy(val);
+ scf_pg_destroy(pg);
+ scf_property_destroy(prop);
+ scf_instance_destroy(inst);
+ (void) scf_handle_unbind(hndl);
+ scf_handle_destroy(hndl);
+
+noscfout:
+ startd_free(buf, max_scf_fmri_size);
+ uu_free(startd_options_fmri);
+ uu_free(startd_reconfigure_fmri);
+
+ if (booting_to_single_user) {
+ st->st_subgraph = startd_alloc(max_scf_fmri_size);
+ sz = strlcpy(st->st_subgraph, "milestone/single-user:default",
+ max_scf_fmri_size);
+ assert(sz < max_scf_fmri_size);
+ }
+
+ /*
+ * Options passed in as boot arguments override repository defaults.
+ */
+ env_opts = getenv("SMF_OPTIONS");
+ if (env_opts == NULL)
+ return (ret);
+
+ cp = strtok_r(env_opts, ",", &lasts);
+ while (cp != NULL) {
+ if (strcmp(cp, "debug") == 0) {
+ st->st_boot_flags = STARTD_BOOT_VERBOSE;
+ st->st_log_flags = STARTD_LOG_DEBUG;
+ st->st_log_level_min = LOG_DEBUG;
+ } else if (strcmp(cp, "verbose") == 0) {
+ st->st_boot_flags = STARTD_BOOT_VERBOSE;
+ st->st_log_flags = STARTD_LOG_VERBOSE;
+ st->st_log_level_min = LOG_INFO;
+ } else if (strcmp(cp, "seed") == 0) {
+ uu_warn("SMF option \"%s\" unimplemented.\n", cp);
+ } else if (strcmp(cp, "quiet") == 0) {
+ st->st_log_flags = STARTD_LOG_QUIET;
+ st->st_log_level_min = LOG_NOTICE;
+ } else if (strncmp(cp, "milestone=",
+ sizeof ("milestone=") - 1) == 0) {
+ char *mp = cp + sizeof ("milestone=") - 1;
+
+ if (booting_to_single_user)
+ continue;
+
+ if (st->st_subgraph == NULL) {
+ st->st_subgraph =
+ startd_alloc(max_scf_fmri_size);
+ st->st_subgraph[0] = '\0';
+ }
+
+ if (mp[0] == '\0' || strcmp(mp, "all") == 0) {
+ (void) strcpy(st->st_subgraph, "all");
+ } else if (strcmp(mp, "su") == 0 ||
+ strcmp(mp, "single-user") == 0) {
+ (void) strcpy(st->st_subgraph,
+ "milestone/single-user:default");
+ } else if (strcmp(mp, "mu") == 0 ||
+ strcmp(mp, "multi-user") == 0) {
+ (void) strcpy(st->st_subgraph,
+ "milestone/multi-user:default");
+ } else if (strcmp(mp, "mus") == 0 ||
+ strcmp(mp, "multi-user-server") == 0) {
+ (void) strcpy(st->st_subgraph,
+ "milestone/multi-user-server:default");
+ } else if (strcmp(mp, "none") == 0) {
+ (void) strcpy(st->st_subgraph, "none");
+ } else {
+ log_framework(LOG_NOTICE,
+ "invalid milestone option value "
+ "'%s' ignored\n", mp);
+ }
+ } else {
+ uu_warn("Unknown SMF option \"%s\".\n", cp);
+ }
+
+ cp = strtok_r(NULL, ",", &lasts);
+ }
+
+ return (ret);
+}
+
+/*
+ * void set_boot_env()
+ *
+ * If -r was passed or /reconfigure exists, this is a reconfig
+ * reboot. We need to make sure that this information is given
+ * to the appropriate services the first time they're started
+ * by setting the system/reconfigure repository property,
+ * as well as pass the _INIT_RECONFIG variable on to the rcS
+ * start method so that legacy services can continue to use it.
+ *
+ * This function must never be called before contract_init(), as
+ * it sets st_initial. get_startd_config() sets prop_reconfig from
+ * pre-existing repository state.
+ */
+static void
+set_boot_env()
+{
+ struct stat sb;
+ int r;
+
+ /*
+ * Check if property still is set -- indicates we didn't get
+ * far enough previously to unset it. Otherwise, if this isn't
+ * the first startup, don't re-process /reconfigure or the
+ * boot flag.
+ */
+ if (prop_reconfig != 1 && st->st_initial != 1)
+ return;
+
+ /* If /reconfigure exists, also set opt_reconfig. */
+ if (stat("/reconfigure", &sb) != -1)
+ opt_reconfig = 1;
+
+ /* Nothing to do. Just return. */
+ if (opt_reconfig == 0 && prop_reconfig == 0)
+ return;
+
+ /*
+ * Set startd's reconfigure property. This property is
+ * then cleared by successful completion of the single-user
+ * milestone.
+ */
+ if (prop_reconfig != 1) {
+ r = libscf_set_reconfig(1);
+ switch (r) {
+ case 0:
+ break;
+
+ case ENOENT:
+ case EPERM:
+ case EACCES:
+ case EROFS:
+ log_error(LOG_WARNING, "Could not set reconfiguration "
+ "property: %s\n", strerror(r));
+ break;
+
+ default:
+ bad_error("libscf_set_reconfig", r);
+ }
+ }
+}
+
+static void
+startup(int log_args)
+{
+ ctid_t configd_ctid;
+ int err;
+
+ /*
+ * Initialize data structures.
+ */
+ gu = startd_zalloc(sizeof (graph_update_t));
+ ru = startd_zalloc(sizeof (restarter_update_t));
+
+ (void) pthread_cond_init(&st->st_load_cv, NULL);
+ (void) pthread_cond_init(&st->st_configd_live_cv, NULL);
+ (void) pthread_cond_init(&gu->gu_cv, NULL);
+ (void) pthread_cond_init(&gu->gu_freeze_cv, NULL);
+ (void) pthread_cond_init(&ru->restarter_update_cv, NULL);
+ (void) pthread_mutex_init(&st->st_load_lock, &mutex_attrs);
+ (void) pthread_mutex_init(&st->st_configd_live_lock, &mutex_attrs);
+ (void) pthread_mutex_init(&gu->gu_lock, &mutex_attrs);
+ (void) pthread_mutex_init(&gu->gu_freeze_lock, &mutex_attrs);
+ (void) pthread_mutex_init(&ru->restarter_update_lock, &mutex_attrs);
+
+ configd_ctid = contract_init();
+
+ if (configd_ctid != -1)
+ log_framework(LOG_DEBUG, "Existing configd contract %ld; not "
+ "starting svc.configd\n", configd_ctid);
+
+ (void) startd_thread_create(fork_configd_thread, (void *)configd_ctid);
+
+ /*
+ * Await, if necessary, configd's initial arrival.
+ */
+ MUTEX_LOCK(&st->st_configd_live_lock);
+ while (!st->st_configd_lives) {
+ log_framework(LOG_DEBUG, "Awaiting cv signal on "
+ "configd_live_cv\n");
+ err = pthread_cond_wait(&st->st_configd_live_cv,
+ &st->st_configd_live_lock);
+ assert(err == 0);
+ }
+ MUTEX_UNLOCK(&st->st_configd_live_lock);
+
+ utmpx_init();
+ wait_init();
+
+ if (read_startd_config(log_args))
+ log_framework(LOG_INFO, "svc.configd unable to provide startd "
+ "optional settings\n");
+
+ log_init();
+ dict_init();
+ timeout_init();
+ restarter_protocol_init();
+ restarter_init();
+ graph_protocol_init();
+ graph_init();
+
+ init_env();
+
+ set_boot_env();
+ restarter_start();
+ graph_engine_start();
+}
+
+static void
+usage(const char *name)
+{
+ uu_warn(gettext("usage: %s [-dnq]\n"), name);
+ exit(UU_EXIT_USAGE);
+}
+
+static int
+daemonize_start(void)
+{
+ pid_t pid;
+ int fd;
+
+ if ((pid = fork1()) < 0)
+ return (-1);
+
+ if (pid != 0)
+ exit(0);
+
+ (void) close(0);
+
+ if ((fd = open("/dev/null", O_RDONLY)) == -1) {
+ uu_warn(gettext("can't connect stdin to /dev/null"));
+ } else if (fd != 0) {
+ (void) dup2(fd, 0);
+ startd_close(fd);
+ }
+
+ closefrom(3);
+ (void) dup2(2, 1);
+
+ (void) setsid();
+ (void) chdir("/");
+
+ /* Use default umask that init handed us, but 022 to create files. */
+ dmask = umask(022);
+ fmask = umask(dmask);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static void
+die_handler(int sig, siginfo_t *info, void *data)
+{
+ finished = 1;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int opt;
+ int daemonize = 1;
+ int log_args = 0;
+ struct sigaction act;
+ sigset_t nullset;
+ struct stat sb;
+
+ (void) uu_setpname(argv[0]);
+
+ st = startd_zalloc(sizeof (startd_state_t));
+
+ (void) pthread_mutexattr_init(&mutex_attrs);
+#ifndef NDEBUG
+ (void) pthread_mutexattr_settype(&mutex_attrs,
+ PTHREAD_MUTEX_ERRORCHECK);
+#endif
+
+ max_scf_name_size = scf_limit(SCF_LIMIT_MAX_NAME_LENGTH);
+ max_scf_value_size = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH);
+ max_scf_fmri_size = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH);
+
+ if (max_scf_name_size == -1 || max_scf_value_size == -1 ||
+ max_scf_value_size == -1)
+ uu_die("Can't determine repository maximum lengths.\n");
+
+ max_scf_name_size++;
+ max_scf_value_size++;
+ max_scf_fmri_size++;
+
+ st->st_log_flags = STARTD_LOG_FILE;
+ st->st_log_level_min = LOG_INFO;
+
+ while ((opt = getopt(argc, argv, "dnqrs")) != EOF) {
+ switch (opt) {
+ case 'd':
+ st->st_log_flags =
+ STARTD_LOG_FILE | STARTD_LOG_TERMINAL;
+ st->st_log_level_min = LOG_DEBUG;
+ log_args = 1;
+ break;
+ case 'n':
+ daemonize = 0;
+ break;
+ case 'q':
+ st->st_log_flags = 0;
+ st->st_log_level_min = LOG_NOTICE;
+ log_args = 1;
+ break;
+ case 'r': /* reconfiguration boot */
+ opt_reconfig = 1;
+ break;
+ case 's': /* single-user mode */
+ booting_to_single_user = B_TRUE;
+ break;
+ default:
+ usage(argv[0]); /* exits */
+ }
+ }
+
+ if (optind != argc)
+ usage(argv[0]);
+
+ if (daemonize)
+ if (daemonize_start() < 0)
+ uu_die("Can't daemonize\n");
+
+ log_init();
+
+ if (stat("/etc/svc/volatile/resetting", &sb) != -1) {
+ log_framework(LOG_NOTICE, "Restarter quiesced.\n");
+
+ for (;;)
+ (void) pause();
+ }
+
+ act.sa_sigaction = &die_handler;
+ (void) sigfillset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ (void) sigaction(SIGINT, &act, NULL);
+ (void) sigaction(SIGTERM, &act, NULL);
+
+ startup(log_args);
+
+ (void) sigemptyset(&nullset);
+ while (!finished) {
+ log_framework(LOG_DEBUG, "Main thread paused\n");
+ (void) sigsuspend(&nullset);
+ }
+
+ (void) log_framework(LOG_DEBUG, "Restarter exiting.\n");
+ return (0);
+}
diff --git a/usr/src/cmd/svc/startd/startd.h b/usr/src/cmd/svc/startd/startd.h
new file mode 100644
index 0000000000..f000b866a9
--- /dev/null
+++ b/usr/src/cmd/svc/startd/startd.h
@@ -0,0 +1,731 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _STARTD_H
+#define _STARTD_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/time.h>
+#include <librestart.h>
+#include <librestart_priv.h>
+#include <libscf.h>
+#include <libsysevent.h>
+#include <libuutil.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <syslog.h>
+#include <umem.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * We want MUTEX_HELD, but we also want pthreads. So we're stuck with this.
+ */
+#define PTHREAD_MUTEX_HELD(m) _mutex_held((struct _lwp_mutex *)(m))
+
+#ifndef NDEBUG
+
+#define MUTEX_LOCK(mp) { \
+ int err; \
+ if ((err = pthread_mutex_lock((mp))) != 0) { \
+ (void) fprintf(stderr, \
+ "pthread_mutex_lock() failed on %s:%d: %s\n", \
+ __FILE__, __LINE__, strerror(err)); \
+ abort(); \
+ } \
+}
+
+#define MUTEX_UNLOCK(mp) { \
+ int err; \
+ if ((err = pthread_mutex_unlock((mp))) != 0) { \
+ (void) fprintf(stderr, \
+ "pthread_mutex_unlock() failed on %s:%d: %s\n", \
+ __FILE__, __LINE__, strerror(err)); \
+ abort(); \
+ } \
+}
+
+#else
+
+#define MUTEX_LOCK(mp) (void) pthread_mutex_lock((mp))
+#define MUTEX_UNLOCK(mp) (void) pthread_mutex_unlock((mp))
+
+#endif
+
+#ifndef NDEBUG
+#define bad_error(func, err) { \
+ (void) fprintf(stderr, "%s:%d: %s() failed with unexpected " \
+ "error %d. Aborting.\n", __FILE__, __LINE__, (func), (err)); \
+ abort(); \
+}
+#else
+#define bad_error(func, err) abort()
+#endif
+
+
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
+#define FAULT_COUNT_INCR 0
+#define FAULT_COUNT_RESET 1
+
+#define FAULT_THRESHOLD 3
+
+#define MAX_CONFIGD_RETRIES 5
+#define MAX_MOUNT_RETRIES 5
+#define MAX_SULOGIN_RETRIES 5
+
+#define RETURN_SUCCESS 0
+#define RETURN_RETRY -1
+#define RETURN_FATAL -2
+
+#define LIBSCF_SUCCESS 0
+#define LIBSCF_PROPERTY_ABSENT -1
+#define LIBSCF_PGROUP_ABSENT -2
+#define LIBSCF_PROPERTY_ERROR -3
+
+#define METHOD_START 0
+#define METHOD_STOP 1
+#define METHOD_REFRESH 2
+
+#define METHOD_TIMEOUT_INFINITE 0
+
+/*
+ * Contract cookies used by startd.
+ */
+#define CONFIGD_COOKIE 0x10
+#define SULOGIN_COOKIE 0x11
+#define METHOD_START_COOKIE 0x20
+#define METHOD_OTHER_COOKIE 0x21
+#define MONITOR_COOKIE 0x30
+
+
+#define ALLOC_RETRY 3
+#define ALLOC_DELAY 10
+#define ALLOC_DELAY_MULT 10
+
+#define safe_scf_scope_create(h) \
+ libscf_object_create((void *(*)(scf_handle_t *))scf_scope_create, (h))
+#define safe_scf_service_create(h) \
+ libscf_object_create((void *(*)(scf_handle_t *))scf_service_create, (h))
+#define safe_scf_instance_create(h) libscf_object_create( \
+ (void *(*)(scf_handle_t *))scf_instance_create, (h))
+#define safe_scf_snapshot_create(h) libscf_object_create( \
+ (void *(*)(scf_handle_t *))scf_snapshot_create, (h))
+#define safe_scf_snaplevel_create(h) libscf_object_create( \
+ (void *(*)(scf_handle_t *))scf_snaplevel_create, (h))
+#define safe_scf_pg_create(h) \
+ libscf_object_create((void *(*)(scf_handle_t *))scf_pg_create, (h))
+#define safe_scf_property_create(h) libscf_object_create( \
+ (void *(*)(scf_handle_t *))scf_property_create, (h))
+#define safe_scf_value_create(h) \
+ libscf_object_create((void *(*)(scf_handle_t *))scf_value_create, (h))
+#define safe_scf_iter_create(h) \
+ libscf_object_create((void *(*)(scf_handle_t *))scf_iter_create, (h))
+#define safe_scf_transaction_create(h) libscf_object_create( \
+ (void *(*)(scf_handle_t *)) scf_transaction_create, (h))
+#define safe_scf_entry_create(h) \
+ libscf_object_create((void *(*)(scf_handle_t *))scf_entry_create, (h))
+
+#define startd_alloc(sz) \
+ startd_alloc_retry((void *(*)(size_t, int))umem_alloc, (sz))
+#define startd_zalloc(sz) \
+ startd_alloc_retry((void *(*)(size_t, int))umem_zalloc, (sz))
+
+
+extern pthread_mutexattr_t mutex_attrs;
+
+/*
+ * Definitions for administrative actions.
+ * Note that the ordering in admin_action_t, admin_actions, and admin_events
+ * must match. admin_actions and admin_events are defined in startd.c.
+ */
+#define NACTIONS 6
+
+typedef enum {
+ ADMIN_EVENT_DEGRADED = 0x0,
+ ADMIN_EVENT_MAINT_OFF,
+ ADMIN_EVENT_MAINT_ON,
+ ADMIN_EVENT_MAINT_ON_IMMEDIATE,
+ ADMIN_EVENT_REFRESH,
+ ADMIN_EVENT_RESTART
+} admin_action_t;
+
+extern const char * const admin_actions[NACTIONS];
+extern const int admin_events[NACTIONS];
+
+#define LOG_DATE_SIZE 32 /* Max size of timestamp in log output */
+
+extern ssize_t max_scf_name_size;
+extern ssize_t max_scf_value_size;
+extern ssize_t max_scf_fmri_size;
+
+extern mode_t fmask;
+extern mode_t dmask;
+
+#define LOG_PREFIX_EARLY "/etc/svc/volatile/"
+#define LOG_PREFIX_NORMAL "/var/svc/log/"
+
+#define LOG_SUFFIX ".log"
+
+#define STARTD_DEFAULT_LOG "svc.startd.log"
+
+extern const char *log_directory; /* Current log directory path */
+
+#define FS_TIMEZONE_DIR "/usr/share/lib/zoneinfo"
+#define FS_LOCALE_DIR "/usr/lib/locale"
+
+/*
+ * Simple dictionary representation.
+ */
+typedef struct dictionary {
+ uu_list_t *dict_list;
+ int dict_new_id;
+ pthread_mutex_t dict_lock;
+} dictionary_t;
+
+typedef struct dict_entry {
+ int de_id;
+ const char *de_name;
+ uu_list_node_t de_link;
+} dict_entry_t;
+
+extern dictionary_t *dictionary;
+
+typedef struct timeout_queue {
+ uu_list_t *tq_list;
+ pthread_mutex_t tq_lock;
+} timeout_queue_t;
+
+typedef struct timeout_entry {
+ hrtime_t te_timeout; /* timeout expiration time */
+ ctid_t te_ctid;
+ char *te_fmri;
+ char *te_logstem;
+ volatile int te_fired;
+ uu_list_node_t te_link;
+} timeout_entry_t;
+
+extern timeout_queue_t *timeouts;
+
+/*
+ * State definitions.
+ */
+typedef enum {
+ STATE_NONE = 0x0,
+ STATE_UNINIT,
+ STATE_MAINT,
+ STATE_OFFLINE,
+ STATE_DISABLED,
+ STATE_ONLINE,
+ STATE_DEGRADED
+} instance_state_t;
+
+#define STATE_MAX (STATE_DEGRADED + 1)
+
+extern const char * const instance_state_str[STATE_MAX];
+
+typedef enum {
+ GVT_UNSUPPORTED = -1,
+ GVT_UNKNOWN = 0,
+ GVT_SVC, /* service */
+ GVT_INST, /* instance */
+ GVT_FILE, /* file: */
+ GVT_GROUP /* dependency group */
+} gv_type_t;
+
+typedef enum {
+ DEPGRP_UNSUPPORTED = -1,
+ DEPGRP_REQUIRE_ANY = 1,
+ DEPGRP_REQUIRE_ALL,
+ DEPGRP_EXCLUDE_ALL,
+ DEPGRP_OPTIONAL_ALL
+} depgroup_type_t;
+
+typedef enum {
+ METHOD_RESTART_UNKNOWN = -1,
+ METHOD_RESTART_ALL = 0,
+ METHOD_RESTART_EXTERNAL_FAULT,
+ METHOD_RESTART_ANY_FAULT,
+ METHOD_RESTART_OTHER
+} method_restart_t;
+
+/*
+ * Graph representation.
+ */
+#define GV_CONFIGURED 0x01 /* Service exists in repository, ready */
+#define GV_ENABLED 0x02 /* Service should be online */
+#define GV_ENBLD_NOOVR 0x04 /* GV_ENABLED, ignoring override */
+#define GV_INSUBGRAPH 0x08 /* Current milestone depends on service */
+
+/* ID must come first to support search */
+typedef struct graph_vertex {
+ int gv_id;
+ char *gv_name;
+ uu_list_node_t gv_link;
+
+ uint_t gv_flags;
+ restarter_instance_state_t gv_state;
+
+ gv_type_t gv_type;
+
+ depgroup_type_t gv_depgroup;
+ restarter_error_t gv_restart;
+
+ void (*gv_start_f)(struct graph_vertex *);
+ void (*gv_post_online_f)(void);
+ void (*gv_post_disable_f)(void);
+
+ int gv_restarter_id;
+ evchan_t *gv_restarter_channel;
+
+ int gv_delegate_initialized;
+ evchan_t *gv_delegate_channel;
+
+ uu_list_t *gv_dependencies;
+ uu_list_t *gv_dependents;
+} graph_vertex_t;
+
+typedef struct graph_edge {
+ graph_vertex_t *ge_vertex;
+ uu_list_node_t ge_link;
+ graph_vertex_t *ge_parent;
+} graph_edge_t;
+
+
+/*
+ * Start method outcomes
+ */
+typedef enum {
+ START_REQUESTED,
+ START_FAILED_REPEATEDLY,
+ START_FAILED_CONFIGURATION,
+ START_FAILED_FATAL,
+ START_FAILED_TIMEOUT_FATAL,
+ START_FAILED_OTHER
+} start_outcome_t;
+
+typedef void (*instance_hook_t)(void);
+
+typedef struct service_hook_assn {
+ char *sh_fmri;
+ instance_hook_t sh_pre_online_hook;
+ instance_hook_t sh_post_online_hook;
+ instance_hook_t sh_post_offline_hook;
+} service_hook_assn_t;
+
+/*
+ * Restarter instance stop reasons.
+ */
+typedef enum {
+ RSTOP_EXIT = 0x0, /* exited or empty */
+ RSTOP_CORE, /* core dumped */
+ RSTOP_SIGNAL, /* external fatal signal received */
+ RSTOP_HWERR, /* uncorrectable hardware error */
+ RSTOP_DEPENDENCY, /* dependency activity caused stop */
+ RSTOP_DISABLE, /* disabled */
+ RSTOP_RESTART /* restart requested */
+} stop_cause_t;
+
+/*
+ * Restarter instance maintenance clear reasons.
+ */
+typedef enum {
+ RUNMAINT_CLEAR = 0x0,
+ RUNMAINT_DISABLE
+} unmaint_cause_t;
+
+/*
+ * Restarter instance flags
+ */
+#define RINST_CONTRACT 0x00000000 /* progeny constitute inst */
+#define RINST_TRANSIENT 0x10000000 /* inst operates momentarily */
+#define RINST_WAIT 0x20000000 /* child constitutes inst */
+#define RINST_STYLE_MASK 0xf0000000
+
+#define RINST_RETAKE_RUNNING 0x01000000 /* pending running snapshot */
+#define RINST_RETAKE_START 0x02000000 /* pending start snapshot */
+
+#define RINST_RETAKE_MASK 0x0f000000
+
+#define RINST_START_TIMES 10 /* failures to consider */
+#define RINST_FAILURE_RATE_NS 1000000000LL /* 1 failure/second */
+
+/* Number of events in the queue when we start dropping ADMIN events. */
+#define RINST_QUEUE_THRESHOLD 100
+
+typedef struct restarter_inst {
+ int ri_id;
+ instance_data_t ri_i;
+ char *ri_common_name; /* template localized name */
+ char *ri_C_common_name; /* C locale name */
+
+ char *ri_logstem; /* logfile name */
+ char *ri_utmpx_prefix;
+ uint_t ri_flags;
+ instance_hook_t ri_pre_online_hook;
+ instance_hook_t ri_post_online_hook;
+ instance_hook_t ri_post_offline_hook;
+
+ hrtime_t ri_start_time[RINST_START_TIMES];
+ uint_t ri_start_index; /* times started */
+
+ uu_list_node_t ri_link;
+ pthread_mutex_t ri_lock;
+
+ /*
+ * When we start a thread to we execute a method for this instance, we
+ * put the thread id in ri_method_thread. Threads with ids other than
+ * this which acquire ri_lock while ri_method_thread is nonzero should
+ * wait on ri_method_cv. ri_method_waiters should be incremented while
+ * waiting so the instance won't be deleted.
+ */
+ pthread_t ri_method_thread;
+ pthread_cond_t ri_method_cv;
+ uint_t ri_method_waiters;
+
+ /*
+ * These fields are provided so functions can operate on this structure
+ * and the repository without worrying about whether the instance has
+ * been deleted from the repository (this is possible because
+ * ri_i.i_fmri names the instance this structure represents -- see
+ * libscf_reget_inst()). ri_m_inst is the scf_instance_t for the
+ * instance, and ri_mi_deleted is true if the instance has been deleted.
+ */
+ scf_instance_t *ri_m_inst;
+ boolean_t ri_mi_deleted;
+
+ /*
+ * We maintain a pointer to any pending timeout for this instance
+ * for quick reference/deletion.
+ */
+ timeout_entry_t *ri_timeout;
+
+ /*
+ * Instance event queue. Graph events are queued here as a list
+ * of restarter_instance_qentry_t's, and the lock is held separately.
+ * If both ri_lock and ri_queue_lock are grabbed, ri_lock must be
+ * grabbed first. ri_queue_lock protects all ri_queue_* structure
+ * members.
+ */
+ pthread_mutex_t ri_queue_lock;
+ pthread_cond_t ri_queue_cv;
+ uu_list_t *ri_queue;
+ int ri_queue_thread;
+
+} restarter_inst_t;
+
+typedef struct restarter_instance_list {
+ uu_list_t *ril_instance_list;
+ pthread_mutex_t ril_lock;
+} restarter_instance_list_t;
+
+typedef struct restarter_instance_qentry {
+ restarter_event_type_t riq_type;
+ uu_list_node_t riq_link;
+} restarter_instance_qentry_t;
+
+typedef struct fork_info {
+ int sf_id;
+ int sf_method_type;
+ restarter_error_t sf_event_type;
+} fork_info_t;
+
+typedef struct wait_info {
+ uu_list_node_t wi_link;
+
+ int wi_fd; /* psinfo file descriptor */
+ id_t wi_pid; /* process ID */
+ const char *wi_fmri; /* instance FMRI */
+ int wi_parent; /* startd is parent */
+} wait_info_t;
+
+#define STARTD_LOG_FILE 0x1
+#define STARTD_LOG_TERMINAL 0x2
+#define STARTD_LOG_SYSLOG 0x4
+
+#define STARTD_BOOT 0x1
+#define STARTD_DEBUG 0x2
+
+#define STARTD_BOOT_QUIET 0x1
+#define STARTD_BOOT_VERBOSE 0x2
+
+#define STARTD_LOG_QUIET 0x1
+#define STARTD_LOG_VERBOSE 0x2
+#define STARTD_LOG_DEBUG 0x3
+
+typedef struct startd_state {
+ /* Logging configuration */
+ char *st_log_prefix; /* directory prefix */
+ char *st_log_file; /* startd file in above dir */
+ uint_t st_log_flags; /* message destination */
+ int st_log_level_min; /* minimum required to log */
+ int st_log_timezone_known; /* timezone is available */
+ int st_log_locale_known; /* locale is available */
+ int st_log_login_reached; /* login service reached */
+
+ /* Boot configuration */
+ uint_t st_boot_flags; /* serial boot, etc. */
+ uint_t st_initial; /* first startd on system */
+
+ /* System configuration */
+ char *st_subgraph; /* milestone subgraph request */
+
+ uint_t st_load_complete; /* graph load completed */
+ uint_t st_load_instances; /* restarter instances to load */
+ pthread_mutex_t st_load_lock;
+ pthread_cond_t st_load_cv;
+
+ /* Repository configuration */
+ pid_t st_configd_pid; /* PID of our svc.configd */
+ /* instance */
+ int st_configd_lives; /* configd started */
+ pthread_mutex_t st_configd_live_lock;
+ pthread_cond_t st_configd_live_cv;
+
+ char *st_door_path;
+
+ /* General information */
+ uint_t st_flags;
+ struct timeval st_start_time; /* effective system start time */
+ char *st_locale;
+} startd_state_t;
+
+extern startd_state_t *st;
+
+extern boolean_t booting_to_single_user;
+
+extern const char *event_names[];
+
+/*
+ * Structures for contract to instance hash table, implemented in
+ * contract.c and used by restarter.c and method.c
+ */
+typedef struct contract_entry {
+ ctid_t ce_ctid;
+ int ce_instid;
+
+ uu_list_node_t ce_link;
+} contract_entry_t;
+
+uu_list_pool_t *contract_list_pool;
+
+/* contract.c */
+ctid_t contract_init(void);
+void contract_abandon(ctid_t);
+int contract_kill(ctid_t, int, const char *);
+int contract_is_empty(ctid_t);
+void contract_hash_init();
+void contract_hash_store(ctid_t, int);
+void contract_hash_remove(ctid_t);
+int lookup_inst_by_contract(ctid_t);
+
+/* dict.c */
+void dict_init(void);
+int dict_lookup_byname(const char *);
+int dict_insert(const char *);
+
+/* expand.c */
+int expand_method_tokens(const char *, scf_instance_t *,
+ scf_snapshot_t *, int, char **);
+
+/* env.c */
+void init_env(void);
+char **set_smf_env(char **, size_t, const char *,
+ const restarter_inst_t *, const char *);
+
+/* file.c */
+int file_ready(graph_vertex_t *);
+
+/* fork.c */
+int fork_mount(char *, char *);
+void fork_sulogin(boolean_t, const char *, ...);
+void fork_rc_script(char, const char *, boolean_t);
+
+void *fork_configd_thread(void *);
+
+pid_t startd_fork1(int *);
+
+/* graph.c */
+void graph_init(void);
+void *single_user_thread(void *);
+void *graph_thread(void *);
+void *graph_event_thread(void *);
+void *repository_event_thread(void *);
+int dgraph_add_instance(const char *, scf_instance_t *, boolean_t);
+void graph_engine_start(void);
+
+/* libscf.c - common */
+char *inst_fmri_to_svc_fmri(const char *);
+void *libscf_object_create(void *(*)(scf_handle_t *), scf_handle_t *);
+int libscf_instance_get_fmri(scf_instance_t *, char **);
+int libscf_fmri_get_instance(scf_handle_t *, const char *, scf_instance_t **);
+int libscf_lookup_instance(const char *, scf_instance_t *);
+int libscf_set_reconfig(int);
+scf_snapshot_t *libscf_get_or_make_running_snapshot(scf_instance_t *,
+ const char *, boolean_t);
+int libscf_inst_set_count_prop(scf_instance_t *, const char *,
+ const char *pgtype, uint32_t, const char *, uint64_t);
+
+/* libscf.c - used by graph.c */
+int libscf_get_basic_instance_data(scf_handle_t *, scf_instance_t *,
+ const char *, int *, int *, char **);
+int libscf_inst_get_or_add_pg(scf_instance_t *, const char *, const char *,
+ uint32_t, scf_propertygroup_t *);
+int libscf_read_states(const scf_propertygroup_t *,
+ restarter_instance_state_t *, restarter_instance_state_t *);
+int depgroup_empty(scf_handle_t *, scf_propertygroup_t *);
+gv_type_t depgroup_read_scheme(scf_handle_t *, scf_propertygroup_t *);
+depgroup_type_t depgroup_read_grouping(scf_handle_t *, scf_propertygroup_t *);
+restarter_error_t depgroup_read_restart(scf_handle_t *, scf_propertygroup_t *);
+int libscf_set_enable_ovr(scf_instance_t *, int);
+int libscf_inst_delete_prop(scf_instance_t *, const char *, const char *);
+int libscf_delete_enable_ovr(scf_instance_t *);
+int libscf_get_milestone(scf_instance_t *, scf_property_t *, scf_value_t *,
+ char *, size_t);
+int libscf_extract_runlevel(scf_property_t *, char *);
+int libscf_clear_runlevel(scf_propertygroup_t *, const char *milestone);
+
+typedef int (*callback_t)(void *, void *);
+
+int walk_dependency_pgs(scf_instance_t *, callback_t, void *);
+int walk_property_astrings(scf_property_t *, callback_t, void *);
+
+/* libscf.c - used by restarter.c/method.c/expand.c */
+char *libscf_get_method(scf_handle_t *, int, restarter_inst_t *,
+ scf_snapshot_t *, method_restart_t *, uint_t *, uint8_t *, uint64_t *,
+ uint8_t *);
+void libscf_populate_graph(scf_handle_t *h);
+int update_fault_count(restarter_inst_t *, int);
+int libscf_unset_action(scf_handle_t *, scf_propertygroup_t *, admin_action_t,
+ int64_t);
+int libscf_get_startd_properties(scf_instance_t *, scf_snapshot_t *, uint_t *,
+ char **);
+int libscf_get_template_values(scf_instance_t *, scf_snapshot_t *, char **,
+ char **);
+
+int libscf_read_method_ids(scf_handle_t *, scf_instance_t *, const char *,
+ ctid_t *, ctid_t *, pid_t *);
+int libscf_write_start_pid(scf_instance_t *, pid_t);
+int libscf_write_method_status(scf_instance_t *, const char *, int);
+int libscf_note_method_log(scf_instance_t *, const char *, const char *);
+
+scf_handle_t *libscf_handle_create_bound(scf_version_t);
+void libscf_handle_rebind(scf_handle_t *);
+scf_handle_t *libscf_handle_create_bound_loop(void);
+
+scf_snapshot_t *libscf_get_running_snapshot(scf_instance_t *);
+int libscf_snapshots_poststart(scf_handle_t *, const char *, boolean_t);
+int libscf_snapshots_refresh(scf_instance_t *, const char *);
+
+int instance_is_transient_style(restarter_inst_t *);
+int instance_is_wait_style(restarter_inst_t *);
+
+int libscf_create_self(scf_handle_t *);
+
+void libscf_reget_instance(restarter_inst_t *);
+
+/* log.c */
+void log_init();
+void log_error(int, const char *, ...);
+void log_framework(int, const char *, ...);
+void log_console(int, const char *, ...);
+void log_preexec(void);
+void setlog(const char *);
+void log_transition(const restarter_inst_t *, start_outcome_t);
+void log_instance(const restarter_inst_t *, boolean_t, const char *, ...);
+void log_instance_fmri(const char *, const char *, boolean_t,
+ const char *, ...);
+
+/* method.c */
+void *method_thread(void *);
+void method_remove_contract(restarter_inst_t *, boolean_t, boolean_t);
+
+/* misc.c */
+void startd_close(int);
+void startd_fclose(FILE *);
+int fmri_canonify(const char *, char **, boolean_t);
+int fs_is_read_only(char *, ulong_t *);
+int fs_remount(char *);
+void xstr_sanitize(char *);
+
+/* restarter.c */
+void restarter_init(void);
+void restarter_start(void);
+int instance_in_transition(restarter_inst_t *);
+int restarter_instance_update_states(scf_handle_t *, restarter_inst_t *,
+ restarter_instance_state_t, restarter_instance_state_t, restarter_error_t,
+ char *);
+int stop_instance_fmri(scf_handle_t *, const char *, uint_t);
+restarter_inst_t *inst_lookup_by_id(int);
+void restarter_mark_pending_snapshot(const char *, uint_t);
+void *restarter_post_fsminimal_thread(void *);
+void timeout_insert(restarter_inst_t *, ctid_t, uint64_t);
+void timeout_remove(restarter_inst_t *, ctid_t);
+void timeout_init(void);
+int is_timeout_ovr(restarter_inst_t *);
+
+/* startd.c */
+void *safe_realloc(void *, size_t);
+char *safe_strdup(const char *s);
+void *startd_alloc_retry(void *(*)(size_t, int), size_t);
+void startd_free(void *, size_t);
+uu_list_pool_t *startd_list_pool_create(const char *, size_t, size_t,
+ uu_compare_fn_t *, uint32_t);
+uu_list_t *startd_list_create(uu_list_pool_t *, void *, uint32_t);
+pthread_t startd_thread_create(void *(*)(void *), void *);
+
+/* special.c */
+void special_null_transition(void);
+void special_online_hooks_get(const char *, instance_hook_t *,
+ instance_hook_t *, instance_hook_t *);
+
+/* utmpx.c */
+void utmpx_init(void);
+void utmpx_clear_old(void);
+int utmpx_mark_init(pid_t, char *);
+void utmpx_mark_dead(pid_t, int, boolean_t);
+char utmpx_get_runlevel(void);
+void utmpx_set_runlevel(char, char, boolean_t);
+void utmpx_write_boottime(void);
+
+/* wait.c */
+void wait_init(void);
+void wait_prefork(void);
+void wait_postfork(pid_t);
+int wait_register(pid_t, const char *, int, int);
+void *wait_thread(void *);
+
+/* proc.c */
+ctid_t proc_get_ctid();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STARTD_H */
diff --git a/usr/src/cmd/svc/startd/utmpx.c b/usr/src/cmd/svc/startd/utmpx.c
new file mode 100644
index 0000000000..83b9380271
--- /dev/null
+++ b/usr/src/cmd/svc/startd/utmpx.c
@@ -0,0 +1,420 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * utmpx.c - utmpx utility routines
+ *
+ * Since svc.startd(1M) places utmpx records for its launched instances, it must
+ * also mark them as dead once completed.
+ */
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <pthread.h>
+#include <sac.h>
+#include <string.h>
+#include <strings.h>
+#include <time.h>
+#include <unistd.h>
+#include <utmpx.h>
+#include <fcntl.h>
+
+#include "startd.h"
+
+static const char rlevels[] = { 'S', '0', '1', '2', '3', '4', '5', '6', 0 };
+static int n_prev[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+static pthread_mutex_t utmpx_lock;
+static int utmpx_truncated = 0;
+
+#define USEC_PER_MSEC 1000
+
+int
+utmpx_mark_init(pid_t pid, char *prefix)
+{
+ struct utmpx ut, *oldu;
+ int tmplen;
+ int ret;
+
+ while (st->st_initial && !utmpx_truncated)
+ (void) usleep(200 * USEC_PER_MSEC);
+
+ /*
+ * Clean out any preexisting records for this PID, as they must be
+ * inaccurate.
+ */
+ utmpx_mark_dead(pid, 0, B_TRUE);
+
+ /*
+ * Construct a new record with the appropriate prefix.
+ */
+ (void) memset(&ut, 0, sizeof (ut));
+ (void) strncpy(ut.ut_user, ".startd", sizeof (ut.ut_user));
+ ut.ut_pid = pid;
+
+ ut.ut_id[0] = ut.ut_id[1] = ut.ut_id[2] = ut.ut_id[3] = (char)SC_WILDC;
+
+ for (ret = 0; ret < strlen(prefix); ret++)
+ ut.ut_id[ret] = prefix[ret];
+
+ ut.ut_type = INIT_PROCESS;
+ (void) time(&ut.ut_tv.tv_sec);
+
+ for (;;) {
+ MUTEX_LOCK(&utmpx_lock);
+ setutxent();
+
+ if ((oldu = getutxid(&ut)) != NULL) {
+ /*
+ * Copy in the old "line" and "host" fields.
+ */
+ bcopy(oldu->ut_line, ut.ut_line, sizeof (ut.ut_line));
+ bcopy(oldu->ut_host, ut.ut_host, sizeof (ut.ut_host));
+ ut.ut_syslen = (tmplen = strlen(ut.ut_host)) ?
+ min(tmplen + 1, sizeof (ut.ut_host)) : 0;
+ }
+
+ if (makeutx(&ut) != NULL)
+ break;
+
+ if (errno != EROFS)
+ log_framework(LOG_WARNING,
+ "makeutx failed, retrying: %s\n", strerror(errno));
+
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ (void) sleep(1);
+ }
+
+ updwtmpx(WTMPX_FILE, &ut);
+
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ return (ret);
+}
+
+void
+utmpx_mark_dead(pid_t pid, int status, boolean_t blocking)
+{
+ struct utmpx *up;
+ int logged = 0;
+
+ for (;;) {
+ int found = 0;
+
+ MUTEX_LOCK(&utmpx_lock);
+ setutxent();
+
+ while (up = getutxent()) {
+ if (up->ut_pid == pid) {
+ found = 1;
+
+ if (up->ut_type == DEAD_PROCESS) {
+ /*
+ * Cleaned up elsewhere.
+ */
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+ return;
+ }
+
+ up->ut_type = DEAD_PROCESS;
+ up->ut_exit.e_termination = WTERMSIG(status);
+ up->ut_exit.e_exit = WEXITSTATUS(status);
+ (void) time(&up->ut_tv.tv_sec);
+
+ if (pututxline(up) != NULL) {
+ /*
+ * Now attempt to add to the end of the
+ * wtmp and wtmpx files. Do not create
+ * if they don't already exist.
+ */
+ updwtmpx(WTMPX_FILE, up);
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ return;
+ }
+ }
+ }
+
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ if (!found || !blocking)
+ return;
+
+ if (!logged) {
+ log_framework(LOG_INFO, "retrying utmpx_dead on PID "
+ "%ld\n", pid);
+ logged++;
+ }
+
+ (void) sleep(1);
+ }
+}
+
+static void
+utmpx_check()
+{
+ struct stat sb;
+
+ if (stat(_UTMPX_FILE, &sb) == 0 &&
+ sb.st_mode != (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))
+ (void) chmod(_UTMPX_FILE, S_IRUSR | S_IWUSR | S_IRGRP |
+ S_IROTH);
+
+ if (stat(_WTMPX_FILE, &sb) == 0 &&
+ sb.st_mode != (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))
+ (void) chmod(_WTMPX_FILE, S_IRUSR | S_IWUSR | S_IRGRP |
+ S_IROTH);
+}
+
+/*
+ * Retrieve the runlevel utmpx entry if there is one; used to recover
+ * state when svc.startd is restarted.
+ */
+char
+utmpx_get_runlevel(void)
+{
+ struct utmpx *up;
+ char rl = '\0';
+
+ MUTEX_LOCK(&utmpx_lock);
+ setutxent();
+
+ while (up = getutxent()) {
+ if (up->ut_type == RUN_LVL &&
+ sscanf(up->ut_line, RUNLVL_MSG, &rl) == 1)
+ break;
+ }
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ return (rl);
+}
+
+void
+utmpx_set_runlevel(char runlevel, char oldrl, boolean_t do_bump)
+{
+ struct utmpx u;
+ struct utmpx *oup;
+ size_t tmplen;
+ int i;
+
+ if (runlevel == 's')
+ runlevel = 'S';
+ if (oldrl == 's')
+ oldrl = 'S';
+
+ bzero(&u, sizeof (struct utmpx));
+
+ u.ut_id[0] = u.ut_id[1] = u.ut_id[2] = u.ut_id[3] = '\0';
+ u.ut_pid = 0;
+ u.ut_type = RUN_LVL;
+
+ (void) time(&u.ut_tv.tv_sec);
+
+ MUTEX_LOCK(&utmpx_lock);
+ setutxent();
+
+ if ((oup = getutxid(&u)) != NULL) {
+ bcopy(oup->ut_host, u.ut_host, sizeof (u.ut_host));
+ bcopy(oup->ut_line, u.ut_line, sizeof (u.ut_line));
+ bcopy(oup->ut_user, u.ut_user, sizeof (u.ut_user));
+
+ tmplen = strlen(u.ut_host);
+ if (tmplen)
+ u.ut_syslen = min(tmplen + 1, sizeof (u.ut_host));
+ else
+ u.ut_syslen = 0;
+ }
+
+ if (oldrl != '\0')
+ u.ut_exit.e_exit = oldrl;
+ else if (oup != NULL)
+ u.ut_exit.e_exit = oup->ut_exit.e_termination;
+ else
+ u.ut_exit.e_exit = '0';
+
+ u.ut_exit.e_termination = runlevel;
+
+ for (i = 0; rlevels[i] != '\0'; ++i) {
+ if (rlevels[i] == runlevel)
+ break;
+ }
+
+ u.ut_pid = n_prev[i];
+
+ if (do_bump) {
+ for (i = 0; rlevels[i] != '\0'; ++i) {
+ if (rlevels[i] == u.ut_exit.e_exit)
+ break;
+ }
+
+ ++n_prev[i];
+ }
+
+ (void) sprintf(u.ut_line, RUNLVL_MSG, runlevel);
+
+ if (pututxline(&u) == NULL) {
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ return;
+ }
+
+ updwtmpx(WTMPX_FILE, &u);
+
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ utmpx_check();
+}
+
+static void
+utmpx_write_entry(short type, const char *msg, time_t tstamp)
+{
+ struct utmpx u;
+ struct utmpx *oup;
+ size_t tmplen;
+
+ bzero(&u, sizeof (struct utmpx));
+
+ u.ut_id[0] = u.ut_id[1] = u.ut_id[2] = u.ut_id[3] = '\0';
+ u.ut_pid = 0;
+
+ u.ut_exit.e_termination = WTERMSIG(0);
+ u.ut_exit.e_exit = WEXITSTATUS(0);
+ u.ut_type = type;
+ u.ut_tv.tv_sec = tstamp;
+
+ MUTEX_LOCK(&utmpx_lock);
+ setutxent();
+
+ if ((oup = getutxid(&u)) != NULL) {
+ bcopy(oup->ut_user, u.ut_user, sizeof (u.ut_user));
+ bcopy(oup->ut_line, u.ut_line, sizeof (u.ut_line));
+ bcopy(oup->ut_host, u.ut_host, sizeof (u.ut_host));
+
+ tmplen = strlen(u.ut_host);
+ if (tmplen)
+ u.ut_syslen = min(tmplen + 1, sizeof (u.ut_host));
+ else
+ u.ut_syslen = 0;
+ }
+
+ (void) sprintf(u.ut_line, "%.12s", msg);
+
+ if (pututxline(&u) == NULL) {
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ return;
+ }
+
+ updwtmpx(WTMPX_FILE, &u);
+
+ endutxent();
+ MUTEX_UNLOCK(&utmpx_lock);
+
+ utmpx_check();
+}
+
+void
+utmpx_write_boottime(void)
+{
+ time_t tstamp;
+ struct stat stbuf;
+
+ /*
+ * The DOWN_TIME record tracks when the OS became unavailable
+ * during the previous boot. We stat(2) WTMPX and check its
+ * attributes to determine when (and how) the OS became
+ * unavailable. If the file is empty, skip writing a DOWN_TIME
+ * record. Otherwise, check the access and modify times and
+ * use whichever is latest as the time that the OS became
+ * unavailable. If st_atime is latest, the instance crashed or
+ * the machine lost power. If st_mtime is latest, the shutdown
+ * was controlled.
+ */
+ if (stat(WTMPX_FILE, &stbuf) == 0 && stbuf.st_size != 0) {
+ tstamp = (stbuf.st_atime >= stbuf.st_mtime) ?
+ stbuf.st_atime : stbuf.st_mtime;
+ utmpx_write_entry(DOWN_TIME, DOWN_MSG, tstamp);
+ }
+
+ /*
+ * The boot time (or start time, for a non-global zone) is retrieved in
+ * log_init().
+ */
+ tstamp = st->st_start_time.tv_sec;
+
+ utmpx_write_entry(BOOT_TIME, BOOT_MSG, tstamp);
+}
+
+/*
+ * void utmpx_clear_old(void)
+ * At boot and only at boot, truncate the utmpx file.
+ *
+ */
+void
+utmpx_clear_old(void)
+{
+ int fd;
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+
+ if (!st->st_initial || utmpx_truncated)
+ return;
+
+ MUTEX_LOCK(&utmpx_lock);
+
+ if ((fd = open(_UTMPX_FILE,
+ O_WRONLY | O_CREAT | O_TRUNC, mode)) != -1) {
+ (void) fchmod(fd, mode); /* force mode regardless of umask() */
+ (void) fchown(fd, 0, 2); /* force owner to root/bin */
+ (void) close(fd);
+ } else {
+ log_framework(LOG_NOTICE, "Unable to create %s: %s\n",
+ _UTMPX_FILE, strerror(errno));
+ }
+
+ utmpx_truncated = 1;
+
+ MUTEX_UNLOCK(&utmpx_lock);
+}
+
+void
+utmpx_init()
+{
+ (void) pthread_mutex_init(&utmpx_lock, &mutex_attrs);
+}
diff --git a/usr/src/cmd/svc/startd/wait.c b/usr/src/cmd/svc/startd/wait.c
new file mode 100644
index 0000000000..09419a8959
--- /dev/null
+++ b/usr/src/cmd/svc/startd/wait.c
@@ -0,0 +1,343 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * wait.c - asynchronous monitoring of "wait registered" start methods
+ *
+ * Use event ports to poll on the set of fds representing the /proc/[pid]/psinfo
+ * files. If one of these fds returns an event, then we inform the restarter
+ * that it has stopped.
+ *
+ * The wait_info_list holds the series of processes currently being monitored
+ * for exit. The wi_fd member, which contains the file descriptor of the psinfo
+ * file being polled upon ("event ported upon"), will be set to -1 if the file
+ * descriptor is inactive (already closed or not yet opened).
+ */
+
+#ifdef _FILE_OFFSET_BITS
+#undef _FILE_OFFSET_BITS
+#endif /* _FILE_OFFSET_BITS */
+
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libuutil.h>
+#include <poll.h>
+#include <port.h>
+#include <pthread.h>
+#include <procfs.h>
+#include <string.h>
+#include <stropts.h>
+#include <unistd.h>
+
+#include "startd.h"
+
+#define WAIT_FILES 262144 /* reasonably high maximum */
+
+static int port_fd;
+static scf_handle_t *wait_hndl;
+static struct rlimit init_fd_rlimit;
+
+static uu_list_pool_t *wait_info_pool;
+static uu_list_t *wait_info_list;
+
+static pthread_mutex_t wait_info_lock;
+
+/*
+ * void wait_remove(wait_info_t *, int)
+ * Remove the given wait_info structure from our list, performing various
+ * cleanup operations along the way. If the direct flag is false (meaning
+ * that we are being called with from restarter instance list context), then
+ * notify the restarter that the associated instance has exited.
+ *
+ * Since we may no longer be the startd that started this process, we only are
+ * concerned with a waitpid(3C) failure if the wi_parent field is non-zero.
+ */
+static void
+wait_remove(wait_info_t *wi, int direct)
+{
+ int status;
+
+ if (waitpid(wi->wi_pid, &status, 0) == -1) {
+ if (wi->wi_parent)
+ log_framework(LOG_INFO,
+ "instance %s waitpid failure: %s\n", wi->wi_fmri,
+ strerror(errno));
+ } else {
+ if (WEXITSTATUS(status) != 0) {
+ log_framework(LOG_NOTICE,
+ "instance %s exited with status %d\n", wi->wi_fmri,
+ WEXITSTATUS(status));
+ }
+ }
+
+ MUTEX_LOCK(&wait_info_lock);
+ uu_list_remove(wait_info_list, wi);
+ MUTEX_UNLOCK(&wait_info_lock);
+
+ /*
+ * Make an attempt to clear out any utmpx record associated with this
+ * PID.
+ */
+ utmpx_mark_dead(wi->wi_pid, status, B_FALSE);
+
+ if (!direct) {
+ /*
+ * Bind wait_hndl lazily.
+ */
+ if (wait_hndl == NULL) {
+ for (wait_hndl =
+ libscf_handle_create_bound(SCF_VERSION);
+ wait_hndl == NULL;
+ wait_hndl =
+ libscf_handle_create_bound(SCF_VERSION)) {
+ log_error(LOG_INFO, "[wait_remove] Unable to "
+ "bind a new repository handle: %s\n",
+ scf_strerror(scf_error()));
+ (void) sleep(2);
+ }
+ }
+
+ log_framework(LOG_DEBUG,
+ "wait_remove requesting stop of %s\n", wi->wi_fmri);
+ (void) stop_instance_fmri(wait_hndl, wi->wi_fmri, RSTOP_EXIT);
+ }
+
+ uu_list_node_fini(wi, &wi->wi_link, wait_info_pool);
+ startd_free(wi, sizeof (wait_info_t));
+}
+
+/*
+ * int wait_register(pid_t, char *, int, int)
+ * wait_register is called after we have called fork(2), and know which pid we
+ * wish to monitor. However, since the child may have already exited by the
+ * time we are called, we must handle the error cases from open(2)
+ * appropriately. The am_parent flag is recorded to handle waitpid(2)
+ * behaviour on removal; similarly, the direct flag is passed through to a
+ * potential call to wait_remove() to govern its behaviour in different
+ * contexts.
+ *
+ * Returns 0 if registration successful, 1 if child pid did not exist, and -1
+ * if a different error occurred.
+ */
+int
+wait_register(pid_t pid, const char *inst_fmri, int am_parent, int direct)
+{
+ char *fname = uu_msprintf("/proc/%ld/psinfo", pid);
+ int fd;
+ wait_info_t *wi;
+
+ assert(pid != 0);
+
+ if (fname == NULL)
+ return (-1);
+
+ wi = startd_alloc(sizeof (wait_info_t));
+
+ uu_list_node_init(wi, &wi->wi_link, wait_info_pool);
+
+ wi->wi_fd = -1;
+ wi->wi_pid = pid;
+ wi->wi_fmri = inst_fmri;
+ wi->wi_parent = am_parent;
+
+ MUTEX_LOCK(&wait_info_lock);
+ (void) uu_list_insert_before(wait_info_list, NULL, wi);
+ MUTEX_UNLOCK(&wait_info_lock);
+
+ if ((fd = open(fname, O_RDONLY)) == -1) {
+ if (errno == ENOENT) {
+ /*
+ * Child has already exited.
+ */
+ wait_remove(wi, direct);
+ uu_free(fname);
+ return (1);
+ } else {
+ log_error(LOG_WARNING,
+ "open %s failed; not monitoring %s: %s\n", fname,
+ inst_fmri, strerror(errno));
+ uu_free(fname);
+ return (-1);
+ }
+ }
+
+ uu_free(fname);
+
+ wi->wi_fd = fd;
+
+ if (port_associate(port_fd, PORT_SOURCE_FD, fd, 0, wi)) {
+ log_error(LOG_WARNING,
+ "initial port_association of %d / %s failed: %s\n", fd,
+ inst_fmri, strerror(errno));
+ return (-1);
+ }
+
+ log_framework(LOG_DEBUG, "monitoring PID %ld on fd %d (%s)\n", pid, fd,
+ inst_fmri);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+void *
+wait_thread(void *args)
+{
+ for (;;) {
+ port_event_t pe;
+ int fd;
+ wait_info_t *wi;
+ struct timespec ts;
+
+ ts.tv_sec = 1;
+ ts.tv_nsec = 0;
+
+ if (port_get(port_fd, &pe, &ts) == -1)
+ if (errno == EINTR || errno == ETIME)
+ continue;
+ else
+ log_error(LOG_WARNING,
+ "port_get returned %s\n", strerror(errno));
+
+ fd = pe.portev_object;
+ wi = pe.portev_user;
+
+ if ((pe.portev_events & POLLHUP) == POLLHUP) {
+ psinfo_t psi;
+
+ if (lseek(fd, 0, SEEK_SET) != 0 ||
+ read(fd, &psi, sizeof (psinfo_t)) !=
+ sizeof (psinfo_t)) {
+ log_framework(LOG_WARNING,
+ "couldn't get psinfo data for %s (%s); "
+ "assuming failed\n", wi->wi_fmri,
+ strerror(errno));
+ goto err_remove;
+ }
+
+ if (psi.pr_nlwp != 0 ||
+ psi.pr_nzomb != 0 ||
+ psi.pr_lwp.pr_lwpid != 0) {
+ /*
+ * We have determined, in accordance with the
+ * definition in proc(4), this process is not a
+ * zombie. Reassociate.
+ */
+ if (port_associate(port_fd, PORT_SOURCE_FD, fd,
+ 0, wi))
+ log_error(LOG_WARNING,
+ "port_association of %d / %s "
+ "failed\n", fd, wi->wi_fmri);
+ continue;
+ }
+ } else if (
+ (pe.portev_events & POLLERR) == 0) {
+ if (port_associate(port_fd, PORT_SOURCE_FD, fd, 0, wi))
+ log_error(LOG_WARNING,
+ "port_association of %d / %s "
+ "failed\n", fd, wi->wi_fmri);
+ continue;
+ }
+
+err_remove:
+ startd_close(fd);
+ wi->wi_fd = -1;
+
+ wait_remove(wi, 0);
+ }
+
+ /*LINTED E_FUNC_HAS_NO_RETURN_STMT*/
+}
+
+void
+wait_prefork()
+{
+ MUTEX_LOCK(&wait_info_lock);
+}
+
+void
+wait_postfork(pid_t pid)
+{
+ wait_info_t *wi;
+
+ MUTEX_UNLOCK(&wait_info_lock);
+
+ if (pid != 0)
+ return;
+
+ /*
+ * Close all of the child's wait-related fds. The wait_thread() is
+ * gone, so no need to worry about returning events. We always exec(2)
+ * after a fork request, so we needn't free the list elements
+ * themselves.
+ */
+
+ for (wi = uu_list_first(wait_info_list);
+ wi != NULL;
+ wi = uu_list_next(wait_info_list, wi)) {
+ if (wi->wi_fd != -1)
+ startd_close(wi->wi_fd);
+ }
+
+ startd_close(port_fd);
+
+ (void) setrlimit(RLIMIT_NOFILE, &init_fd_rlimit);
+}
+
+void
+wait_init()
+{
+ struct rlimit fd_new;
+
+ (void) getrlimit(RLIMIT_NOFILE, &init_fd_rlimit);
+ (void) getrlimit(RLIMIT_NOFILE, &fd_new);
+
+ fd_new.rlim_max = fd_new.rlim_cur = WAIT_FILES;
+
+ (void) setrlimit(RLIMIT_NOFILE, &fd_new);
+
+ if ((port_fd = port_create()) == -1)
+ uu_die("wait_init couldn't port_create");
+
+ wait_info_pool = uu_list_pool_create("wait_info", sizeof (wait_info_t),
+ offsetof(wait_info_t, wi_link), NULL, UU_LIST_POOL_DEBUG);
+ if (wait_info_pool == NULL)
+ uu_die("wait_init couldn't create wait_info_pool");
+
+ wait_info_list = uu_list_create(wait_info_pool, wait_info_list, 0);
+ if (wait_info_list == NULL)
+ uu_die("wait_init couldn't create wait_info_list");
+
+ (void) pthread_mutex_init(&wait_info_lock, &mutex_attrs);
+}
diff --git a/usr/src/cmd/svc/svcadm/Makefile b/usr/src/cmd/svc/svcadm/Makefile
new file mode 100644
index 0000000000..a1a1299dda
--- /dev/null
+++ b/usr/src/cmd/svc/svcadm/Makefile
@@ -0,0 +1,58 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PROG = svcadm
+OBJS = svcadm.o synch.o
+SRCS = $(OBJS:%.o=%.c)
+POFILES = $(OBJS:.o=.po)
+
+include ../../Makefile.cmd
+
+POFILE = $(PROG)_all.po
+LDLIBS += -lscf -luutil
+
+lint := LINTFLAGS = -ux
+
+.KEEP_STATE:
+
+all: $(PROG)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS)
+ $(POST_PROCESS)
+
+$(POFILE): $(POFILES)
+ cat $(POFILES) > $(POFILE)
+
+install: all $(ROOTUSRSBINPROG)
+
+clean:
+ $(RM) $(OBJS)
+
+lint: lint_SRCS
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/svcadm/svcadm.c b/usr/src/cmd/svc/svcadm/svcadm.c
new file mode 100644
index 0000000000..796f43965c
--- /dev/null
+++ b/usr/src/cmd/svc/svcadm/svcadm.c
@@ -0,0 +1,2370 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * svcadm - request adminstrative actions for service instances
+ */
+
+#include <locale.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+#ifndef TEXT_DOMAIN
+#define TEXT_DOMAIN "SUNW_OST_OSCMD"
+#endif /* TEXT_DOMAIN */
+
+/* Must be a power of two */
+#define HT_BUCKETS 64
+
+/*
+ * Exit codes for enable and disable -s.
+ */
+#define EXIT_SVC_FAILURE 3
+#define EXIT_DEP_FAILURE 4
+
+/*
+ * How long we will wait (in seconds) for a service to change state
+ * before re-checking its dependencies.
+ */
+#define WAIT_INTERVAL 3
+
+#ifndef NDEBUG
+#define bad_error(func, err) { \
+ uu_warn("%s:%d: %s() failed with unexpected error %d.\n", \
+ __FILE__, __LINE__, (func), (err)); \
+ abort(); \
+}
+#else
+#define bad_error(func, err) abort()
+#endif
+
+
+struct ht_elt {
+ struct ht_elt *next;
+ boolean_t active;
+ char str[1];
+};
+
+
+scf_handle_t *h;
+ssize_t max_scf_fmri_sz;
+static const char *emsg_permission_denied;
+static const char *emsg_nomem;
+static const char *emsg_create_pg_perm_denied;
+static const char *emsg_pg_perm_denied;
+static const char *emsg_prop_perm_denied;
+static const char *emsg_no_service;
+
+static int exit_status = 0;
+static int verbose = 0;
+static char *scratch_fmri;
+
+static struct ht_elt **visited;
+
+/*
+ * Visitors from synch.c, needed for enable -s and disable -s.
+ */
+extern int is_enabled(scf_instance_t *);
+extern int has_potential(scf_instance_t *, int);
+
+void
+do_scfdie(int lineno)
+{
+ scf_error_t err;
+
+ switch (err = scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ uu_die(gettext("Connection to repository server broken. "
+ "Exiting.\n"));
+ /* NOTREACHED */
+
+ case SCF_ERROR_BACKEND_READONLY:
+ uu_die(gettext("Repository is read-only. Exiting.\n"));
+ /* NOTREACHED */
+
+ default:
+#ifdef NDEBUG
+ uu_die(gettext("Unexpected libscf error: %s. Exiting.\n"),
+ scf_strerror(err));
+#else
+ uu_die("Unexpected libscf error on line %d: %s.\n", lineno,
+ scf_strerror(err));
+#endif
+ }
+}
+
+#define scfdie() do_scfdie(__LINE__)
+
+static void
+usage()
+{
+ (void) fprintf(stderr, gettext(
+ "Usage: %1$s [-v] [cmd [args ... ]]\n\n"
+ "\t%1$s enable [-rst] <service> ...\t- enable and online service(s)\n"
+ "\t%1$s disable [-st] <service> ...\t- disable and offline service(s)\n"
+ "\t%1$s restart <service> ...\t\t- restart specified service(s)\n"
+ "\t%1$s refresh <service> ...\t\t- re-read service configuration\n"
+ "\t%1$s mark [-It] <state> <service> ...\t- set maintenance state\n"
+ "\t%1$s clear <service> ...\t\t- clear maintenance state\n"
+ "\t%1$s milestone [-d] <milestone>\t- advance to a service milestone\n"
+ "\n\t"
+ "Services can be specified using an FMRI, abbreviation, or fnmatch(5)\n"
+ "\tpattern, as shown in these examples for svc:/network/smtp:sendmail\n"
+ "\n"
+ "\t%1$s <cmd> svc:/network/smtp:sendmail\n"
+ "\t%1$s <cmd> network/smtp:sendmail\n"
+ "\t%1$s <cmd> network/*mail\n"
+ "\t%1$s <cmd> network/smtp\n"
+ "\t%1$s <cmd> smtp:sendmail\n"
+ "\t%1$s <cmd> smtp\n"
+ "\t%1$s <cmd> sendmail\n"), uu_getpname());
+
+ exit(UU_EXIT_USAGE);
+}
+
+
+/*
+ * FMRI hash table for recursive enable.
+ */
+
+static uint32_t
+hash_fmri(const char *str)
+{
+ uint32_t h = 0, g;
+ const char *p;
+
+ /* Generic hash function from uts/common/os/modhash.c . */
+ for (p = str; *p != '\0'; ++p) {
+ h = (h << 4) + *p;
+ if ((g = (h & 0xf0000000)) != 0) {
+ h ^= (g >> 24);
+ h ^= g;
+ }
+ }
+
+ return (h);
+}
+
+/*
+ * Return 1 if str has been visited, 0 if it has not, and -1 if memory could not
+ * be allocated.
+ */
+static int
+visited_find_or_add(const char *str, struct ht_elt **hep)
+{
+ uint32_t h;
+ uint_t i;
+ struct ht_elt *he;
+
+ h = hash_fmri(str);
+ i = h & (HT_BUCKETS - 1);
+
+ for (he = visited[i]; he != NULL; he = he->next) {
+ if (strcmp(he->str, str) == 0) {
+ if (hep)
+ *hep = he;
+ return (1);
+ }
+ }
+
+ he = malloc(offsetof(struct ht_elt, str) + strlen(str) + 1);
+ if (he == NULL)
+ return (-1);
+
+ (void) strcpy(he->str, str);
+
+ he->next = visited[i];
+ visited[i] = he;
+
+ if (hep)
+ *hep = he;
+ return (0);
+}
+
+
+/*
+ * Returns 0, ECANCELED if pg is deleted, ENOENT if propname doesn't exist,
+ * EINVAL if the property is not of boolean type or has no values, and E2BIG
+ * if it has more than one value. *bp is set if 0 or E2BIG is returned.
+ */
+int
+get_bool_prop(scf_propertygroup_t *pg, const char *propname, uint8_t *bp)
+{
+ scf_property_t *prop;
+ scf_value_t *val;
+ int ret;
+
+ if ((prop = scf_property_create(h)) == NULL ||
+ (val = scf_value_create(h)) == NULL)
+ scfdie();
+
+ if (scf_pg_get_property(pg, propname, prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_property_get_value(prop, val) == 0) {
+ ret = 0;
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ENOENT;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = EINVAL;
+ goto out;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ ret = E2BIG;
+ break;
+
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_value_get_boolean(val, bp) != 0) {
+ if (scf_error() != SCF_ERROR_TYPE_MISMATCH)
+ scfdie();
+
+ ret = EINVAL;
+ goto out;
+ }
+
+out:
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ return (ret);
+}
+
+/*
+ * Returns 0, EPERM, or EROFS.
+ */
+static int
+set_bool_prop(scf_propertygroup_t *pg, const char *propname, boolean_t b)
+{
+ scf_value_t *v;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *ent;
+ int ret = 0, r;
+
+ if ((tx = scf_transaction_create(h)) == NULL ||
+ (ent = scf_entry_create(h)) == NULL ||
+ (v = scf_value_create(h)) == NULL)
+ scfdie();
+
+ scf_value_set_boolean(v, b);
+
+ for (;;) {
+ if (scf_transaction_start(tx, pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_transaction_property_change_type(tx, ent, propname,
+ SCF_TYPE_BOOLEAN) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (scf_transaction_property_new(tx, ent, propname,
+ SCF_TYPE_BOOLEAN) != 0)
+ scfdie();
+ }
+
+ r = scf_entry_add_value(ent, v);
+ assert(r == 0);
+
+ r = scf_transaction_commit(tx);
+ if (r == 1)
+ break;
+
+ scf_transaction_reset(tx);
+
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_pg_update(pg) == -1)
+ scfdie();
+ }
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(ent);
+ scf_value_destroy(v);
+ return (ret);
+}
+
+/*
+ * Gets the single astring value of the propname property of pg. prop & v are
+ * scratch space. Returns the length of the string on success or
+ * -ENOENT - pg has no property named propname
+ * -E2BIG - property has no values or multiple values
+ * -EINVAL - property type is not compatible with astring
+ */
+ssize_t
+get_astring_prop(const scf_propertygroup_t *pg, const char *propname,
+ scf_property_t *prop, scf_value_t *v, char *buf, size_t bufsz)
+{
+ ssize_t sz;
+
+ if (scf_pg_get_property(pg, propname, prop) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ return (-ENOENT);
+ }
+
+ if (scf_property_get_value(prop, v) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (-E2BIG);
+
+ default:
+ scfdie();
+ }
+ }
+
+ sz = scf_value_get_astring(v, buf, bufsz);
+ if (sz < 0) {
+ if (scf_error() != SCF_ERROR_TYPE_MISMATCH)
+ scfdie();
+
+ return (-EINVAL);
+ }
+
+ return (sz);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECANCELED - pg was deleted
+ * EPERM - permission denied
+ * EACCES - access denied
+ * EROFS - readonly
+ */
+static int
+delete_prop(scf_propertygroup_t *pg, const char *propname)
+{
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *ent;
+ int ret = 0, r;
+
+ if ((tx = scf_transaction_create(h)) == NULL ||
+ (ent = scf_entry_create(h)) == NULL)
+ scfdie();
+
+ for (;;) {
+ if (scf_transaction_start(tx, pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_IN_USE:
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_transaction_property_delete(tx, ent, propname) == -1)
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ ret = 0;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ scfdie();
+ }
+
+ r = scf_transaction_commit(tx);
+ if (r == 1)
+ break;
+
+ scf_transaction_reset(tx);
+
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ ret = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ ret = EPERM;
+ goto out;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ ret = EACCES;
+ goto out;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ ret = EROFS;
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_pg_update(pg) == -1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ ret = ECANCELED;
+ goto out;
+ }
+ }
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(ent);
+ return (ret);
+}
+
+/*
+ * Returns 0 or EPERM.
+ */
+static int
+pg_get_or_add(scf_instance_t *inst, const char *pgname, const char *pgtype,
+ uint32_t pgflags, scf_propertygroup_t *pg)
+{
+again:
+ if (scf_instance_get_pg(inst, pgname, pg) == 0)
+ return (0);
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (scf_instance_add_pg(inst, pgname, pgtype, pgflags, pg) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_EXISTS:
+ goto again;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ return (EPERM);
+
+ default:
+ scfdie();
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Enable or disable inst, per enable. If temp is true, set
+ * general_ovr/enabled. Otherwise set general/enabled and delete
+ * general_ovr/enabled if it exists (order is important here: we don't want the
+ * enabled status to glitch).
+ */
+static void
+set_inst_enabled(const char *fmri, scf_instance_t *inst, boolean_t temp,
+ boolean_t enable)
+{
+ scf_propertygroup_t *pg;
+ uint8_t b;
+ const char *pgname = NULL; /* For emsg_pg_perm_denied */
+ int r;
+
+ pg = scf_pg_create(h);
+ if (pg == NULL)
+ scfdie();
+
+ if (temp) {
+ /* Set general_ovr/enabled */
+ pgname = SCF_PG_GENERAL_OVR;
+ if (pg_get_or_add(inst, pgname, SCF_PG_GENERAL_OVR_TYPE,
+ SCF_PG_GENERAL_OVR_FLAGS, pg) != 0)
+ goto eperm;
+
+ switch (set_bool_prop(pg, SCF_PROPERTY_ENABLED, enable) != 0) {
+ case 0:
+ break;
+
+ case EPERM:
+ goto eperm;
+
+ case EROFS:
+ /* Shouldn't happen, but it can. */
+ if (!verbose)
+ uu_warn(gettext("%s: Repository read-only.\n"),
+ fmri);
+ else
+ uu_warn(gettext("%s: Could not set %s/%s "
+ "(repository read-only).\n"), fmri,
+ SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED);
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ if (verbose)
+ (void) printf(enable ?
+ gettext("%s temporarily enabled.\n") :
+ gettext("%s temporarily disabled.\n"), fmri);
+ } else {
+again:
+ pgname = SCF_PG_GENERAL;
+ if (pg_get_or_add(inst, pgname, SCF_PG_GENERAL_TYPE,
+ SCF_PG_GENERAL_FLAGS, pg) != 0)
+ goto eperm;
+
+ switch (set_bool_prop(pg, SCF_PROPERTY_ENABLED, enable)) {
+ case 0:
+ break;
+
+ case EPERM:
+ goto eperm;
+
+ case EROFS:
+ /*
+ * If general/enabled is already set the way we want,
+ * proceed.
+ */
+ switch (get_bool_prop(pg, SCF_PROPERTY_ENABLED, &b)) {
+ case 0:
+ if ((b != 0) == (enable != B_FALSE))
+ break;
+ /* FALLTHROUGH */
+
+ case ENOENT:
+ case EINVAL:
+ case E2BIG:
+ if (!verbose)
+ uu_warn(gettext("%s: Repository "
+ "read-only.\n"), fmri);
+ else
+ uu_warn(gettext("%s: Could not set "
+ "%s/%s (repository read-only).\n"),
+ fmri, SCF_PG_GENERAL,
+ SCF_PROPERTY_ENABLED);
+ goto out;
+
+ case ECANCELED:
+ goto again;
+
+ default:
+ assert(0);
+ abort();
+ }
+ break;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ pgname = SCF_PG_GENERAL_OVR;
+ if (scf_instance_get_pg(inst, pgname, pg) == 0) {
+ r = delete_prop(pg, SCF_PROPERTY_ENABLED);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ uu_warn(emsg_no_service, fmri);
+ goto out;
+
+ case EPERM:
+ goto eperm;
+
+ case EACCES:
+ uu_warn(gettext("Could not delete %s/%s "
+ "property of %s: backend access denied.\n"),
+ pgname, SCF_PROPERTY_ENABLED, fmri);
+ goto out;
+
+ case EROFS:
+ uu_warn(gettext("Could not delete %s/%s "
+ "property of %s: backend is read-only.\n"),
+ pgname, SCF_PROPERTY_ENABLED, fmri);
+ goto out;
+
+ default:
+ bad_error("delete_prop", r);
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ /* Print something? */
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ default:
+ scfdie();
+ }
+ }
+
+ if (verbose)
+ (void) printf(enable ? gettext("%s enabled.\n") :
+ gettext("%s disabled.\n"), fmri);
+ }
+
+ scf_pg_destroy(pg);
+ return;
+
+eperm:
+ assert(pgname != NULL);
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_pg_perm_denied, fmri, pgname);
+
+out:
+ scf_pg_destroy(pg);
+ exit_status = 1;
+}
+
+/*
+ * Set inst to the instance which corresponds to fmri. If fmri identifies
+ * a service with a single instance, get that instance.
+ *
+ * Fails with
+ * ENOTSUP - fmri has an unsupported scheme
+ * EINVAL - fmri is invalid
+ * ENOTDIR - fmri does not identify a service or instance
+ * ENOENT - could not locate instance
+ * E2BIG - fmri is a service with multiple instances (warning not printed)
+ */
+static int
+get_inst_mult(const char *fmri, scf_instance_t *inst)
+{
+ char *cfmri;
+ const char *svc_name, *inst_name, *pg_name;
+ scf_service_t *svc;
+ scf_instance_t *inst2;
+ scf_iter_t *iter;
+ int ret;
+
+ if (strncmp(fmri, "lrc:", sizeof ("lrc:") - 1) == 0) {
+ uu_warn(gettext("FMRI \"%s\" is a legacy service.\n"), fmri);
+ exit_status = 1;
+ return (ENOTSUP);
+ }
+
+ cfmri = strdup(fmri);
+ if (cfmri == NULL)
+ uu_die(emsg_nomem);
+
+ if (scf_parse_svc_fmri(cfmri, NULL, &svc_name, &inst_name, &pg_name,
+ NULL) != SCF_SUCCESS) {
+ free(cfmri);
+ uu_warn(gettext("FMRI \"%s\" is invalid.\n"), fmri);
+ exit_status = 1;
+ return (EINVAL);
+ }
+
+ free(cfmri);
+
+ if (svc_name == NULL || pg_name != NULL) {
+ uu_warn(gettext(
+ "FMRI \"%s\" does not designate a service or instance.\n"),
+ fmri);
+ exit_status = 1;
+ return (ENOTDIR);
+ }
+
+ if (inst_name != NULL) {
+ if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) == 0)
+ return (0);
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ uu_warn(gettext("No such instance \"%s\".\n"), fmri);
+ exit_status = 1;
+
+ return (ENOENT);
+ }
+
+ if ((svc = scf_service_create(h)) == NULL ||
+ (inst2 = scf_instance_create(h)) == NULL ||
+ (iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ uu_warn(emsg_no_service, fmri);
+ exit_status = 1;
+
+ ret = ENOENT;
+ goto out;
+ }
+
+ /* If the service has only one child, use it. */
+ if (scf_iter_service_instances(iter, svc) != SCF_SUCCESS)
+ scfdie();
+
+ ret = scf_iter_next_instance(iter, inst);
+ if (ret < 0)
+ scfdie();
+ if (ret != 1) {
+ uu_warn(gettext("Service \"%s\" has no instances.\n"),
+ fmri);
+ exit_status = 1;
+ ret = ENOENT;
+ goto out;
+ }
+
+ ret = scf_iter_next_instance(iter, inst2);
+ if (ret < 0)
+ scfdie();
+
+ if (ret != 0) {
+ ret = E2BIG;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ scf_iter_destroy(iter);
+ scf_instance_destroy(inst2);
+ scf_service_destroy(svc);
+ return (ret);
+}
+
+/*
+ * Same as get_inst_mult(), but on E2BIG prints a warning and returns ENOENT.
+ */
+static int
+get_inst(const char *fmri, scf_instance_t *inst)
+{
+ int r;
+
+ r = get_inst_mult(fmri, inst);
+ if (r != E2BIG)
+ return (r);
+
+ uu_warn(gettext("operation on service %s is ambiguous; "
+ "instance specification needed.\n"), fmri);
+ return (ENOENT);
+}
+
+static char *
+inst_get_fmri(const scf_instance_t *inst)
+{
+ ssize_t sz;
+
+ sz = scf_instance_to_fmri(inst, scratch_fmri, max_scf_fmri_sz);
+ if (sz < 0)
+ scfdie();
+ if (sz >= max_scf_fmri_sz)
+ uu_die(gettext("scf_instance_to_fmri() returned unexpectedly "
+ "long value.\n"));
+
+ return (scratch_fmri);
+}
+
+static ssize_t
+dep_get_astring(const char *fmri, const char *pgname,
+ const scf_propertygroup_t *pg, const char *propname, scf_property_t *prop,
+ scf_value_t *v, char *buf, size_t bufsz)
+{
+ ssize_t sz;
+
+ sz = get_astring_prop(pg, propname, prop, v, buf, bufsz);
+ if (sz >= 0)
+ return (sz);
+
+ switch (-sz) {
+ case ENOENT:
+ uu_warn(gettext("\"%s\" is misconfigured (\"%s\" dependency "
+ "lacks \"%s\" property.)\n"), fmri, pgname, propname);
+ return (-1);
+
+ case E2BIG:
+ uu_warn(gettext("\"%s\" is misconfigured (\"%s/%s\" property "
+ "is not single-valued.)\n"), fmri, pgname, propname);
+ return (-1);
+
+ case EINVAL:
+ uu_warn(gettext("\"%s\" is misconfigured (\"%s/%s\" property "
+ "is not of astring type.)\n"), fmri, pgname, propname);
+ return (-1);
+
+ default:
+ assert(0);
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+static boolean_t
+multiple_instances(scf_iter_t *iter, scf_value_t *v, char *buf)
+{
+ int count = 0, r;
+ boolean_t ret;
+ scf_instance_t *inst;
+
+ inst = scf_instance_create(h);
+ if (inst == NULL)
+ scfdie();
+
+ for (;;) {
+ r = scf_iter_next_value(iter, v);
+ if (r == 0) {
+ ret = B_FALSE;
+ goto out;
+ }
+ if (r != 1)
+ scfdie();
+
+ if (scf_value_get_astring(v, buf, max_scf_fmri_sz) < 0)
+ scfdie();
+
+ switch (get_inst_mult(buf, inst)) {
+ case 0:
+ ++count;
+ if (count > 1) {
+ ret = B_TRUE;
+ goto out;
+ }
+ break;
+
+ case ENOTSUP:
+ case EINVAL:
+ case ENOTDIR:
+ case ENOENT:
+ continue;
+
+ case E2BIG:
+ ret = B_TRUE;
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+
+out:
+ scf_instance_destroy(inst);
+ return (ret);
+}
+
+/*
+ * Enable the service or instance identified by fmri and its dependencies,
+ * recursively. Specifically, call get_inst(fmri), enable the result, and
+ * recurse on its restarter and the dependencies. To avoid duplication of
+ * effort or looping around a dependency cycle, each FMRI is entered into the
+ * "visited" hash table. While recursing, the hash table entry is marked
+ * "active", so that if we come upon it again, we know we've hit a cycle.
+ * exclude_all and optional_all dependencies are ignored. require_any
+ * dependencies are followed only if they comprise a single service; otherwise
+ * the user is warned.
+ *
+ * fmri must point to a writable max_scf_fmri_sz buffer. Returns EINVAL if fmri
+ * is invalid, E2BIG if fmri identifies a service with multiple instances, ELOOP
+ * on cycle detection, or 0 on success.
+ */
+static int
+enable_fmri_rec(char *fmri, boolean_t temp)
+{
+ scf_instance_t *inst;
+ scf_snapshot_t *snap;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *v;
+ scf_iter_t *pg_iter, *val_iter;
+ scf_type_t ty;
+ char *buf, *pgname;
+ ssize_t name_sz, len, sz;
+ int ret;
+ struct ht_elt *he;
+
+ len = scf_canonify_fmri(fmri, fmri, max_scf_fmri_sz);
+ if (len < 0) {
+ assert(scf_error() == SCF_ERROR_INVALID_ARGUMENT);
+ return (EINVAL);
+ }
+ assert(len < max_scf_fmri_sz);
+
+ switch (visited_find_or_add(fmri, &he)) {
+ case 0:
+ he->active = B_TRUE;
+ break;
+
+ case 1:
+ return (he->active ? ELOOP : 0);
+
+ case -1:
+ uu_die(emsg_nomem);
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ inst = scf_instance_create(h);
+ if (inst == NULL)
+ scfdie();
+
+ switch (get_inst_mult(fmri, inst)) {
+ case 0:
+ break;
+
+ case E2BIG:
+ he->active = B_FALSE;
+ return (E2BIG);
+
+ default:
+ he->active = B_FALSE;
+ return (0);
+ }
+
+ set_inst_enabled(fmri, inst, temp, B_TRUE);
+
+ if ((snap = scf_snapshot_create(h)) == NULL ||
+ (pg = scf_pg_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL ||
+ (v = scf_value_create(h)) == NULL ||
+ (pg_iter = scf_iter_create(h)) == NULL ||
+ (val_iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ buf = malloc(max_scf_fmri_sz);
+ if (buf == NULL)
+ uu_die(emsg_nomem);
+
+ name_sz = scf_limit(SCF_LIMIT_MAX_NAME_LENGTH);
+ if (name_sz < 0)
+ scfdie();
+ ++name_sz;
+ pgname = malloc(name_sz);
+ if (pgname == NULL)
+ uu_die(emsg_nomem);
+
+ if (scf_instance_get_snapshot(inst, "running", snap) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_snapshot_destroy(snap);
+ snap = NULL;
+ }
+
+ /* Enable restarter */
+ if (scf_instance_get_pg_composed(inst, snap, SCF_PG_GENERAL, pg) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ uu_warn(gettext("\"%s\" is misconfigured (lacks \"%s\" "
+ "property group).\n"), fmri, SCF_PG_GENERAL);
+ ret = 0;
+ goto out;
+ }
+
+ sz = get_astring_prop(pg, SCF_PROPERTY_RESTARTER, prop, v, buf,
+ max_scf_fmri_sz);
+ if (sz > max_scf_fmri_sz) {
+ uu_warn(gettext("\"%s\" is misconfigured (the value of "
+ "\"%s/%s\" is too long).\n"), fmri, SCF_PG_GENERAL,
+ SCF_PROPERTY_RESTARTER);
+ ret = 0;
+ goto out;
+ } else if (sz >= 0) {
+ switch (enable_fmri_rec(buf, temp)) {
+ case 0:
+ break;
+
+ case EINVAL:
+ uu_warn(gettext("Restarter FMRI for \"%s\" is "
+ "invalid.\n"), fmri);
+ break;
+
+ case E2BIG:
+ uu_warn(gettext("Restarter FMRI for \"%s\" identifies "
+ "a service with multiple instances.\n"), fmri);
+ break;
+
+ case ELOOP:
+ ret = ELOOP;
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+ } else if (sz < 0) {
+ switch (-sz) {
+ case ENOENT:
+ break;
+
+ case E2BIG:
+ uu_warn(gettext("\"%s\" is misconfigured (\"%s/%s\" "
+ "property is not single-valued).\n"), fmri,
+ SCF_PG_GENERAL, SCF_PROPERTY_RESTARTER);
+ ret = 0;
+ goto out;
+
+ case EINVAL:
+ uu_warn(gettext("\"%s\" is misconfigured (\"%s/%s\" "
+ "property is not of astring type).\n"), fmri,
+ SCF_PG_GENERAL, SCF_PROPERTY_RESTARTER);
+ ret = 0;
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+
+ if (scf_iter_instance_pgs_typed_composed(pg_iter, inst, snap,
+ SCF_GROUP_DEPENDENCY) == -1)
+ scfdie();
+
+ while (scf_iter_next_pg(pg_iter, pg) > 0) {
+ len = scf_pg_get_name(pg, pgname, name_sz);
+ if (len < 0)
+ scfdie();
+ assert(len < name_sz);
+
+ if (dep_get_astring(fmri, pgname, pg, SCF_PROPERTY_TYPE, prop,
+ v, buf, max_scf_fmri_sz) < 0)
+ continue;
+
+ if (strcmp(buf, "service") != 0)
+ continue;
+
+ if (dep_get_astring(fmri, pgname, pg, SCF_PROPERTY_GROUPING,
+ prop, v, buf, max_scf_fmri_sz) < 0)
+ continue;
+
+ if (strcmp(buf, SCF_DEP_EXCLUDE_ALL) == 0 ||
+ strcmp(buf, SCF_DEP_OPTIONAL_ALL) == 0)
+ continue;
+
+ if (strcmp(buf, SCF_DEP_REQUIRE_ALL) != 0 &&
+ strcmp(buf, SCF_DEP_REQUIRE_ANY) != 0) {
+ uu_warn(gettext("Dependency \"%s\" of \"%s\" has "
+ "unknown type \"%s\".\n"), pgname, fmri, buf);
+ continue;
+ }
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_ENTITIES, prop) ==
+ -1) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ uu_warn(gettext("\"%s\" is misconfigured (\"%s\" "
+ "dependency lacks \"%s\" property.)\n"), fmri,
+ pgname, SCF_PROPERTY_ENTITIES);
+ continue;
+ }
+
+ if (scf_property_type(prop, &ty) != SCF_SUCCESS)
+ scfdie();
+
+ if (ty != SCF_TYPE_FMRI) {
+ uu_warn(gettext("\"%s\" is misconfigured (property "
+ "\"%s/%s\" is not of fmri type).\n"), fmri, pgname,
+ SCF_PROPERTY_ENTITIES);
+ continue;
+ }
+
+ if (scf_iter_property_values(val_iter, prop) == -1)
+ scfdie();
+
+ if (strcmp(buf, SCF_DEP_REQUIRE_ANY) == 0) {
+ if (multiple_instances(val_iter, v, buf)) {
+ (void) printf(gettext("%s requires one of:\n"),
+ fmri);
+
+ if (scf_iter_property_values(val_iter, prop) !=
+ 0)
+ scfdie();
+
+ for (;;) {
+ int r;
+
+ r = scf_iter_next_value(val_iter, v);
+ if (r == 0)
+ break;
+ if (r != 1)
+ scfdie();
+
+ if (scf_value_get_astring(v, buf,
+ max_scf_fmri_sz) < 0)
+ scfdie();
+
+ (void) fputs(" ", stdout);
+ (void) puts(buf);
+ }
+
+ continue;
+ }
+
+ /*
+ * Since there's only one instance, we can enable it.
+ * Reset val_iter and continue.
+ */
+ if (scf_iter_property_values(val_iter, prop) != 0)
+ scfdie();
+ }
+
+ for (;;) {
+ ret = scf_iter_next_value(val_iter, v);
+ if (ret == 0)
+ break;
+ if (ret != 1)
+ scfdie();
+
+ if (scf_value_get_astring(v, buf, max_scf_fmri_sz) ==
+ -1)
+ scfdie();
+
+ switch (enable_fmri_rec(buf, temp)) {
+ case 0:
+ break;
+
+ case EINVAL:
+ uu_warn(gettext("\"%s\" dependency of \"%s\" "
+ "has invalid FMRI \"%s\".\n"), pgname,
+ fmri, buf);
+ break;
+
+ case E2BIG:
+ uu_warn(gettext("%s depends on %s, which has "
+ "multiple instances.\n"), fmri, buf);
+ break;
+
+ case ELOOP:
+ ret = ELOOP;
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ he->active = B_FALSE;
+
+ free(buf);
+ free(pgname);
+
+ (void) scf_value_destroy(v);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ scf_snapshot_destroy(snap);
+ scf_iter_destroy(pg_iter);
+ scf_iter_destroy(val_iter);
+
+ return (ret);
+}
+
+/*
+ * fmri here is only used for verbose messages.
+ */
+static void
+set_inst_action(const char *fmri, const scf_instance_t *inst,
+ const char *action)
+{
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *ent;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *v;
+ int ret;
+ int64_t t;
+ hrtime_t timestamp;
+
+ const char * const scf_pg_restarter_actions = SCF_PG_RESTARTER_ACTIONS;
+
+ if ((pg = scf_pg_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL ||
+ (v = scf_value_create(h)) == NULL ||
+ (tx = scf_transaction_create(h)) == NULL ||
+ (ent = scf_entry_create(h)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, scf_pg_restarter_actions, pg) == -1) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ /* Try creating the restarter_actions property group. */
+ if (scf_instance_add_pg(inst, scf_pg_restarter_actions,
+ SCF_PG_RESTARTER_ACTIONS_TYPE,
+ SCF_PG_RESTARTER_ACTIONS_FLAGS, pg) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_EXISTS:
+ /* Someone must have added it. */
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_create_pg_perm_denied,
+ fmri, scf_pg_restarter_actions);
+ goto out;
+
+ default:
+ scfdie();
+ }
+ }
+ }
+
+ /*
+ * If we lose the transaction race and need to retry, there are 2
+ * potential other winners:
+ * - another process setting actions
+ * - the restarter marking the action complete
+ * Therefore, re-read the property every time through the loop before
+ * making any decisions based on their values.
+ */
+ do {
+ timestamp = gethrtime();
+
+ if (scf_transaction_start(tx, pg) == -1) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_pg_perm_denied, fmri,
+ scf_pg_restarter_actions);
+ goto out;
+ }
+
+ if (scf_pg_get_property(pg, action, prop) == -1) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ if (scf_transaction_property_new(tx, ent,
+ action, SCF_TYPE_INTEGER) == -1)
+ scfdie();
+ goto action_set;
+ } else {
+ if (scf_transaction_property_change_type(tx, ent,
+ action, SCF_TYPE_INTEGER) == -1)
+ scfdie();
+ }
+
+ if (scf_property_get_value(prop, v) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_NOT_FOUND:
+ /* Misconfigured, so set anyway. */
+ goto action_set;
+
+ default:
+ scfdie();
+ }
+ } else {
+ if (scf_value_get_integer(v, &t) == -1) {
+ assert(scf_error() == SCF_ERROR_TYPE_MISMATCH);
+ goto action_set;
+ }
+ if (t > timestamp)
+ break;
+ }
+
+action_set:
+ scf_value_set_integer(v, timestamp);
+ if (scf_entry_add_value(ent, v) == -1)
+ scfdie();
+
+ ret = scf_transaction_commit(tx);
+ if (ret == -1) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_prop_perm_denied, fmri,
+ scf_pg_restarter_actions, action);
+ scf_transaction_reset(tx);
+ goto out;
+ }
+
+ scf_transaction_reset(tx);
+
+ if (ret == 0) {
+ if (scf_pg_update(pg) == -1)
+ scfdie();
+ }
+ } while (ret == 0);
+
+ if (verbose)
+ (void) printf(gettext("Action %s set for %s.\n"), action, fmri);
+
+out:
+ scf_value_destroy(v);
+ scf_entry_destroy(ent);
+ scf_transaction_destroy(tx);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+}
+
+/*
+ * Get the state of inst. state should point to a buffer of
+ * MAX_SCF_STATE_STRING_SZ bytes. Returns 0 on success or -1 if
+ * no restarter property group
+ * no state property
+ * state property is misconfigured (wrong type, not single-valued)
+ * state value is too long
+ * In these cases, fmri is used to print a warning.
+ *
+ * If pgp is non-NULL, a successful call to inst_get_state will store
+ * the SCF_PG_RESTARTER property group in *pgp, and the caller will be
+ * responsible for calling scf_pg_destroy on the property group.
+ */
+int
+inst_get_state(scf_instance_t *inst, char *state, const char *fmri,
+ scf_propertygroup_t **pgp)
+{
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ int ret = -1;
+ ssize_t szret;
+
+ if ((pg = scf_pg_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL ||
+ (val = scf_value_create(h)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ uu_warn(gettext("%s is misconfigured (lacks \"%s\" property "
+ "group).\n"), fmri ? fmri : inst_get_fmri(inst),
+ SCF_PG_RESTARTER);
+ goto out;
+ }
+
+ szret = get_astring_prop(pg, SCF_PROPERTY_STATE, prop, val, state,
+ MAX_SCF_STATE_STRING_SZ);
+ if (szret < 0) {
+ switch (-szret) {
+ case ENOENT:
+ uu_warn(gettext("%s is misconfigured (\"%s\" property "
+ "group lacks \"%s\" property).\n"),
+ fmri ? fmri : inst_get_fmri(inst), SCF_PG_RESTARTER,
+ SCF_PROPERTY_STATE);
+ goto out;
+
+ case E2BIG:
+ uu_warn(gettext("%s is misconfigured (\"%s/%s\" "
+ "property is not single-valued).\n"),
+ fmri ? fmri : inst_get_fmri(inst), SCF_PG_RESTARTER,
+ SCF_PROPERTY_STATE);
+ goto out;
+
+ case EINVAL:
+ uu_warn(gettext("%s is misconfigured (\"%s/%s\" "
+ "property is not of type astring).\n"),
+ fmri ? fmri : inst_get_fmri(inst), SCF_PG_RESTARTER,
+ SCF_PROPERTY_STATE);
+ goto out;
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+ if (szret >= MAX_SCF_STATE_STRING_SZ) {
+ uu_warn(gettext("%s is misconfigured (\"%s/%s\" property value "
+ "is too long).\n"), fmri ? fmri : inst_get_fmri(inst),
+ SCF_PG_RESTARTER, SCF_PROPERTY_STATE);
+ goto out;
+ }
+
+ ret = 0;
+ if (pgp)
+ *pgp = pg;
+
+out:
+ (void) scf_value_destroy(val);
+ scf_property_destroy(prop);
+ if (ret || pgp == NULL)
+ scf_pg_destroy(pg);
+ return (ret);
+}
+
+static void
+set_astring_prop(const char *fmri, const char *pgname, const char *pgtype,
+ uint32_t pgflags, const char *propname, const char *str)
+{
+ scf_instance_t *inst;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *txent;
+ int ret;
+
+ inst = scf_instance_create(h);
+ if (inst == NULL)
+ scfdie();
+
+ if (get_inst(fmri, inst) != 0)
+ return;
+
+ if ((pg = scf_pg_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL ||
+ (val = scf_value_create(h)) == NULL ||
+ (tx = scf_transaction_create(h)) == NULL ||
+ (txent = scf_entry_create(h)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, pgname, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (scf_instance_add_pg(inst, pgname, pgtype, pgflags, pg) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_EXISTS:
+ if (scf_instance_get_pg(inst, pgname, pg) !=
+ SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ uu_warn(gettext("Repository write "
+ "contention.\n"));
+ goto out;
+ }
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_create_pg_perm_denied,
+ fmri, pgname);
+ goto out;
+
+ default:
+ scfdie();
+ }
+ }
+ }
+
+ do {
+ if (scf_transaction_start(tx, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_pg_perm_denied, fmri, pgname);
+ goto out;
+ }
+
+ if (scf_transaction_property_change_type(tx, txent, propname,
+ SCF_TYPE_ASTRING) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (scf_transaction_property_new(tx, txent, propname,
+ SCF_TYPE_ASTRING) != 0)
+ scfdie();
+ }
+
+ if (scf_value_set_astring(val, str) != SCF_SUCCESS)
+ scfdie();
+
+ if (scf_entry_add_value(txent, val) != SCF_SUCCESS)
+ scfdie();
+
+ ret = scf_transaction_commit(tx);
+ if (ret == -1) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ if (!verbose)
+ uu_warn(emsg_permission_denied, fmri);
+ else
+ uu_warn(emsg_prop_perm_denied, fmri, pgname,
+ propname);
+ goto out;
+ }
+
+ if (ret == 0) {
+ scf_transaction_reset(tx);
+
+ if (scf_pg_update(pg) != SCF_SUCCESS)
+ scfdie();
+ }
+ } while (ret == 0);
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(txent);
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ scf_instance_destroy(inst);
+}
+
+
+/*
+ * Flags to control enable and disable actions.
+ */
+#define SET_ENABLED 0x1
+#define SET_TEMPORARY 0x2
+#define SET_RECURSIVE 0x4
+
+static int
+set_fmri_enabled(void *data, scf_walkinfo_t *wip)
+{
+ int flags = (int)data;
+
+ assert(wip->inst != NULL);
+ assert(wip->pg == NULL);
+
+ if (flags & SET_RECURSIVE) {
+ char *fmri_buf = malloc(max_scf_fmri_sz);
+ if (fmri_buf == NULL)
+ uu_die(emsg_nomem);
+
+ visited = calloc(HT_BUCKETS, sizeof (*visited));
+ if (visited == NULL)
+ uu_die(emsg_nomem);
+
+ /* scf_walk_fmri() guarantees that fmri isn't too long */
+ assert(strlen(wip->fmri) <= max_scf_fmri_sz);
+ (void) strlcpy(fmri_buf, wip->fmri, max_scf_fmri_sz);
+
+ switch (enable_fmri_rec(fmri_buf, (flags & SET_TEMPORARY))) {
+ case E2BIG:
+ uu_warn(gettext("operation on service %s is ambiguous; "
+ "instance specification needed.\n"), fmri_buf);
+ break;
+
+ case ELOOP:
+ uu_warn(gettext("%s: Dependency cycle detected.\n"),
+ fmri_buf);
+ }
+
+ free(visited);
+ free(fmri_buf);
+
+ } else {
+ set_inst_enabled(wip->fmri, wip->inst,
+ (flags & SET_TEMPORARY) != 0, (flags & SET_ENABLED) != 0);
+ }
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+wait_fmri_enabled(void *data, scf_walkinfo_t *wip)
+{
+ scf_propertygroup_t *pg = NULL;
+ char state[MAX_SCF_STATE_STRING_SZ];
+
+ assert(wip->inst != NULL);
+ assert(wip->pg == NULL);
+
+ do {
+ if (pg)
+ scf_pg_destroy(pg);
+ if (inst_get_state(wip->inst, state, wip->fmri, &pg) != 0) {
+ exit_status = EXIT_SVC_FAILURE;
+ return (0);
+ }
+
+ if (strcmp(state, SCF_STATE_STRING_ONLINE) == 0 ||
+ strcmp(state, SCF_STATE_STRING_DEGRADED) == 0) {
+ /*
+ * We're done.
+ */
+ goto out;
+ }
+
+ if (strcmp(state, SCF_STATE_STRING_MAINT) == 0) {
+ /*
+ * The service is ill.
+ */
+ uu_warn(gettext("Instance \"%s\" is in maintenance"
+ " state.\n"), wip->fmri);
+ exit_status = EXIT_SVC_FAILURE;
+ goto out;
+ }
+
+ if (!is_enabled(wip->inst)) {
+ /*
+ * Someone stepped in and disabled the service.
+ */
+ uu_warn(gettext("Instance \"%s\" has been disabled"
+ " by another entity.\n"), wip->fmri);
+ exit_status = EXIT_SVC_FAILURE;
+ goto out;
+ }
+
+ if (!has_potential(wip->inst, B_FALSE)) {
+ /*
+ * Our dependencies aren't met. We'll never
+ * amount to anything.
+ */
+ uu_warn(gettext("Instance \"%s\" has unsatisfied"
+ " dependencies.\n"), wip->fmri);
+ /*
+ * EXIT_SVC_FAILURE takes precedence over
+ * EXIT_DEP_FAILURE
+ */
+ if (exit_status == 0)
+ exit_status = EXIT_DEP_FAILURE;
+ goto out;
+ }
+ } while (_scf_pg_wait(pg, WAIT_INTERVAL) >= 0);
+ scfdie();
+ /* NOTREACHED */
+
+out:
+ scf_pg_destroy(pg);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+wait_fmri_disabled(void *data, scf_walkinfo_t *wip)
+{
+ scf_propertygroup_t *pg = NULL;
+ char state[MAX_SCF_STATE_STRING_SZ];
+
+ assert(wip->inst != NULL);
+ assert(wip->pg == NULL);
+
+ do {
+ if (pg)
+ scf_pg_destroy(pg);
+ if (inst_get_state(wip->inst, state, wip->fmri, &pg) != 0) {
+ exit_status = EXIT_SVC_FAILURE;
+ return (0);
+ }
+
+ if (strcmp(state, SCF_STATE_STRING_DISABLED) == 0) {
+ /*
+ * We're done.
+ */
+ goto out;
+ }
+
+ if (strcmp(state, SCF_STATE_STRING_MAINT) == 0) {
+ /*
+ * The service is ill.
+ */
+ uu_warn(gettext("Instance \"%s\" is in maintenance"
+ " state.\n"), wip->fmri);
+ exit_status = EXIT_SVC_FAILURE;
+ goto out;
+ }
+
+ if (is_enabled(wip->inst)) {
+ /*
+ * Someone stepped in and enabled the service.
+ */
+ uu_warn(gettext("Instance \"%s\" has been enabled"
+ " by another entity.\n"), wip->fmri);
+ exit_status = EXIT_SVC_FAILURE;
+ goto out;
+ }
+
+ if (!has_potential(wip->inst, B_TRUE)) {
+ /*
+ * Our restarter is hopeless.
+ */
+ uu_warn(gettext("Restarter for instance \"%s\" is"
+ " unavailable.\n"), wip->fmri);
+ /*
+ * EXIT_SVC_FAILURE takes precedence over
+ * EXIT_DEP_FAILURE
+ */
+ if (exit_status == 0)
+ exit_status = EXIT_DEP_FAILURE;
+ goto out;
+ }
+
+ } while (_scf_pg_wait(pg, WAIT_INTERVAL) >= 0);
+ scfdie();
+ /* NOTREACHED */
+
+out:
+ scf_pg_destroy(pg);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+clear_instance(void *data, scf_walkinfo_t *wip)
+{
+ char state[MAX_SCF_STATE_STRING_SZ];
+
+ assert(wip->inst != NULL);
+ assert(wip->pg == NULL);
+
+ if (inst_get_state(wip->inst, state, wip->fmri, NULL) != 0)
+ return (0);
+
+ if (strcmp(state, SCF_STATE_STRING_MAINT) == 0) {
+ set_inst_action(wip->fmri, wip->inst, SCF_PROPERTY_MAINT_OFF);
+ } else if (strcmp(state, SCF_STATE_STRING_DEGRADED) ==
+ 0) {
+ set_inst_action(wip->fmri, wip->inst, SCF_PROPERTY_RESTORE);
+ } else {
+ uu_warn(gettext("Instance \"%s\" is not in a "
+ "maintenance or degraded state.\n"), wip->fmri);
+
+ exit_status = 1;
+ }
+
+ return (0);
+}
+
+static int
+set_fmri_action(void *action, scf_walkinfo_t *wip)
+{
+ assert(wip->inst != NULL && wip->pg == NULL);
+
+ set_inst_action(wip->fmri, wip->inst, action);
+
+ return (0);
+}
+
+/*
+ * Flags to control 'mark' action.
+ */
+#define MARK_IMMEDIATE 0x1
+#define MARK_TEMPORARY 0x2
+
+static int
+force_degraded(void *data, scf_walkinfo_t *wip)
+{
+ int flags = (int)data;
+ char state[MAX_SCF_STATE_STRING_SZ];
+
+ if (inst_get_state(wip->inst, state, wip->fmri, NULL) != 0) {
+ exit_status = 1;
+ return (0);
+ }
+
+ if (strcmp(state, SCF_STATE_STRING_ONLINE) != 0) {
+ uu_warn(gettext("Instance \"%s\" is not online.\n"), wip->fmri);
+ exit_status = 1;
+ return (0);
+ }
+
+ set_inst_action(wip->fmri, wip->inst, (flags & MARK_IMMEDIATE) ?
+ SCF_PROPERTY_DEGRADE_IMMEDIATE : SCF_PROPERTY_DEGRADED);
+
+ return (0);
+}
+
+static int
+force_maintenance(void *data, scf_walkinfo_t *wip)
+{
+ int flags = (int)data;
+ const char *prop;
+
+ if (flags & MARK_IMMEDIATE) {
+ prop = (flags & MARK_TEMPORARY) ?
+ SCF_PROPERTY_MAINT_ON_IMMTEMP :
+ SCF_PROPERTY_MAINT_ON_IMMEDIATE;
+ } else {
+ prop = (flags & MARK_TEMPORARY) ?
+ SCF_PROPERTY_MAINT_ON_TEMPORARY :
+ SCF_PROPERTY_MAINT_ON;
+ }
+
+ set_inst_action(wip->fmri, wip->inst, prop);
+
+ return (0);
+}
+
+static void
+set_milestone(const char *fmri, boolean_t temporary)
+{
+ scf_instance_t *inst;
+ scf_propertygroup_t *pg;
+ int r;
+
+ if (temporary) {
+ set_astring_prop(SCF_SERVICE_STARTD, SCF_PG_OPTIONS_OVR,
+ SCF_PG_OPTIONS_OVR_TYPE, SCF_PG_OPTIONS_OVR_FLAGS,
+ SCF_PROPERTY_MILESTONE, fmri);
+ return;
+ }
+
+ if ((inst = scf_instance_create(h)) == NULL ||
+ (pg = scf_pg_create(h)) == NULL)
+ scfdie();
+
+ if (get_inst(SCF_SERVICE_STARTD, inst) != 0) {
+ scf_instance_destroy(inst);
+ return;
+ }
+
+ /*
+ * Set the persistent milestone before deleting the override so we don't
+ * glitch.
+ */
+ set_astring_prop(SCF_SERVICE_STARTD, SCF_PG_OPTIONS,
+ SCF_PG_OPTIONS_TYPE, SCF_PG_OPTIONS_FLAGS, SCF_PROPERTY_MILESTONE,
+ fmri);
+
+ if (scf_instance_get_pg(inst, SCF_PG_OPTIONS_OVR, pg) == 0) {
+ r = delete_prop(pg, SCF_PROPERTY_MILESTONE);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ uu_warn(emsg_no_service, fmri);
+ exit_status = 1;
+ goto out;
+
+ case EPERM:
+ uu_warn(gettext("Could not delete %s/%s property of "
+ "%s: permission denied.\n"), SCF_PG_OPTIONS_OVR,
+ SCF_PROPERTY_MILESTONE, SCF_SERVICE_STARTD);
+ exit_status = 1;
+ goto out;
+
+ case EACCES:
+ uu_warn(gettext("Could not delete %s/%s property of "
+ "%s: access denied.\n"), SCF_PG_OPTIONS_OVR,
+ SCF_PROPERTY_MILESTONE, SCF_SERVICE_STARTD);
+ exit_status = 1;
+ goto out;
+
+ case EROFS:
+ uu_warn(gettext("Could not delete %s/%s property of "
+ "%s: backend read-only.\n"), SCF_PG_OPTIONS_OVR,
+ SCF_PROPERTY_MILESTONE, SCF_SERVICE_STARTD);
+ exit_status = 1;
+ goto out;
+
+ default:
+ bad_error("delete_prop", r);
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ uu_warn(emsg_no_service, fmri);
+ exit_status = 1;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ scfdie();
+ }
+ }
+
+out:
+ scf_pg_destroy(pg);
+ scf_instance_destroy(inst);
+}
+
+static char const *milestones[] = {
+ SCF_MILESTONE_SINGLE_USER,
+ SCF_MILESTONE_MULTI_USER,
+ SCF_MILESTONE_MULTI_USER_SERVER,
+ NULL
+};
+
+static void
+usage_milestone()
+{
+ const char **ms;
+
+ (void) fprintf(stderr, gettext(
+ "Usage: svcadm milestone [-d] <milestone>\n\n"
+ "\t-d\tmake the specified milestone the default for system boot\n\n"
+ "\tMilestones can be specified using an FMRI or abbreviation.\n"
+ "\tThe major milestones are as follows:\n\n"
+ "\tall\n"
+ "\tnone\n"));
+
+ for (ms = milestones; *ms != NULL; ms++)
+ (void) fprintf(stderr, "\t%s\n", *ms);
+
+ exit(UU_EXIT_USAGE);
+}
+
+static const char *
+validate_milestone(const char *milestone)
+{
+ const char **ms;
+ const char *tmp;
+ size_t len;
+
+ if (strcmp(milestone, "all") == 0)
+ return (milestone);
+
+ if (strcmp(milestone, "none") == 0)
+ return (milestone);
+
+ /*
+ * Determine if this is a full or partial milestone
+ */
+ for (ms = milestones; *ms != NULL; ms++) {
+ if ((tmp = strstr(*ms, milestone)) != NULL) {
+ len = strlen(milestone);
+
+ /*
+ * The beginning of the string must align with the start
+ * of a milestone fmri, or on the boundary between
+ * elements. The end of the string must align with the
+ * end of the milestone, or at the instance boundary.
+ */
+ if ((tmp == *ms || tmp[-1] == '/') &&
+ (tmp[len] == '\0' || tmp[len] == ':'))
+ return (*ms);
+ }
+ }
+
+ (void) fprintf(stderr,
+ gettext("\"%s\" is not a valid major milestone.\n"), milestone);
+
+ usage_milestone();
+ /* NOTREACHED */
+}
+
+/*ARGSUSED*/
+static void
+quiet(const char *fmt, ...)
+{
+ /* Do nothing */
+}
+
+int
+main(int argc, char *argv[])
+{
+ int o;
+ int err;
+
+ (void) setlocale(LC_ALL, "");
+ (void) textdomain(TEXT_DOMAIN);
+
+ (void) uu_setpname(argv[0]);
+
+ if (argc < 2)
+ usage();
+
+ max_scf_fmri_sz = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH);
+ if (max_scf_fmri_sz < 0)
+ scfdie();
+ ++max_scf_fmri_sz;
+
+ scratch_fmri = malloc(max_scf_fmri_sz);
+ if (scratch_fmri == NULL)
+ uu_die(emsg_nomem);
+
+ h = scf_handle_create(SCF_VERSION);
+ if (h == NULL)
+ scfdie();
+
+ if (scf_handle_bind(h) == -1)
+ uu_die(gettext("Couldn't bind to svc.configd.\n"));
+
+ while ((o = getopt(argc, argv, "v")) != -1) {
+ if (o == 'v')
+ verbose = 1;
+ else
+ usage();
+ }
+
+ if (optind >= argc)
+ usage();
+
+ emsg_permission_denied = gettext("%s: Permission denied.\n");
+ emsg_nomem = gettext("Out of memory.\n");
+ emsg_create_pg_perm_denied = gettext("%s: Couldn't create \"%s\" "
+ "property group (permission denied).\n");
+ emsg_pg_perm_denied = gettext("%s: Couldn't modify \"%s\" property "
+ "group (permission denied).\n");
+ emsg_prop_perm_denied = gettext("%s: Couldn't modify \"%s/%s\" "
+ "property (permission denied).\n");
+ emsg_no_service = gettext("No such service \"%s\".\n");
+
+ if (strcmp(argv[optind], "enable") == 0) {
+ int flags = SET_ENABLED;
+ int wait = 0;
+ int error = 0;
+
+ ++optind;
+
+ while ((o = getopt(argc, argv, "rst")) != -1) {
+ if (o == 'r')
+ flags |= SET_RECURSIVE;
+ else if (o == 't')
+ flags |= SET_TEMPORARY;
+ else if (o == 's')
+ wait = 1;
+ else if (o == '?')
+ usage();
+ else {
+ assert(0);
+ abort();
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ usage();
+
+ /*
+ * We want to continue with -s processing if we had
+ * invalid options, but not if an enable failed. We
+ * squelch output the second time we walk fmris; we saw
+ * the errors the first time.
+ */
+ if ((err = scf_walk_fmri(h, argc, argv, 0, set_fmri_enabled,
+ (void *)flags, &error, uu_warn)) != 0) {
+
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+
+ } else if (wait && exit_status == 0 &&
+ (err = scf_walk_fmri(h, argc, argv, 0, wait_fmri_enabled,
+ (void *)flags, &error, quiet)) != 0) {
+
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ if (error > 0)
+ exit_status = error;
+
+ } else if (strcmp(argv[optind], "disable") == 0) {
+ int flags = 0;
+ int wait = 0;
+ int error = 0;
+
+ ++optind;
+
+ while ((o = getopt(argc, argv, "st")) != -1) {
+ if (o == 't')
+ flags |= SET_TEMPORARY;
+ else if (o == 's')
+ wait = 1;
+ else if (o == '?')
+ usage();
+ else {
+ assert(0);
+ abort();
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ usage();
+
+ /*
+ * We want to continue with -s processing if we had
+ * invalid options, but not if a disable failed. We
+ * squelch output the second time we walk fmris; we saw
+ * the errors the first time.
+ */
+ if ((err = scf_walk_fmri(h, argc, argv, 0, set_fmri_enabled,
+ (void *)flags, &exit_status, uu_warn)) != 0) {
+
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+
+ } else if (wait && exit_status == 0 &&
+ (err = scf_walk_fmri(h, argc, argv, 0, wait_fmri_disabled,
+ (void *)flags, &error, quiet)) != 0) {
+
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ if (error > 0)
+ exit_status = error;
+
+ } else if (strcmp(argv[optind], "restart") == 0) {
+ ++optind;
+
+ if (optind >= argc)
+ usage();
+
+ if ((err = scf_walk_fmri(h, argc - optind, argv + optind, 0,
+ set_fmri_action, (void *)SCF_PROPERTY_RESTART,
+ &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ } else if (strcmp(argv[optind], "refresh") == 0) {
+ ++optind;
+
+ if (optind >= argc)
+ usage();
+
+ if ((err = scf_walk_fmri(h, argc - optind, argv + optind, 0,
+ set_fmri_action, (void *)SCF_PROPERTY_REFRESH,
+ &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(scf_error()));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ } else if (strcmp(argv[optind], "mark") == 0) {
+ int flags = 0;
+ scf_walk_callback callback;
+
+ ++optind;
+
+ while ((o = getopt(argc, argv, "It")) != -1) {
+ if (o == 'I')
+ flags |= MARK_IMMEDIATE;
+ else if (o == 't')
+ flags |= MARK_TEMPORARY;
+ else if (o == '?')
+ usage();
+ else {
+ assert(0);
+ abort();
+ }
+ }
+
+ if (argc - optind < 2)
+ usage();
+
+ if (strcmp(argv[optind], "degraded") == 0) {
+ if (flags & MARK_TEMPORARY)
+ uu_xdie(UU_EXIT_USAGE, gettext("-t may not be "
+ "used with degraded.\n"));
+ callback = force_degraded;
+
+ } else if (strcmp(argv[optind], "maintenance") == 0) {
+ callback = force_maintenance;
+ } else {
+ usage();
+ }
+
+ if ((err = scf_walk_fmri(h, argc - optind - 1,
+ argv + optind + 1, 0, callback, NULL, &exit_status,
+ uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"),
+ scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ } else if (strcmp(argv[optind], "clear") == 0) {
+ ++optind;
+
+ if (optind >= argc)
+ usage();
+
+ if ((err = scf_walk_fmri(h, argc - optind, argv + optind, 0,
+ clear_instance, NULL, &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ } else if (strcmp(argv[optind], "milestone") == 0) {
+ boolean_t temporary = B_TRUE;
+ const char *milestone;
+
+ ++optind;
+
+ while ((o = getopt(argc, argv, "d")) != -1) {
+ if (o == 'd')
+ temporary = B_FALSE;
+ else if (o == '?')
+ usage_milestone();
+ else {
+ assert(0);
+ abort();
+ }
+ }
+
+ if (optind >= argc)
+ usage_milestone();
+
+ milestone = validate_milestone(argv[optind]);
+
+ set_milestone(milestone, temporary);
+ } else if (strcmp(argv[optind], "_smf_backup") == 0) {
+ const char *reason = NULL;
+
+ ++optind;
+
+ if (optind != argc - 1)
+ usage();
+
+ if ((err = _scf_request_backup(h, argv[optind])) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ scfdie();
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ reason = scf_strerror(scf_error());
+ break;
+
+ case SCF_ERROR_INTERNAL:
+ reason =
+ "unknown error (see console for details)";
+ break;
+ }
+ uu_warn("failed to backup repository: %s\n", reason);
+ exit_status = UU_EXIT_FATAL;
+ }
+ } else {
+ usage();
+ }
+
+ if (scf_handle_unbind(h) == -1)
+ scfdie();
+ scf_handle_destroy(h);
+
+ return (exit_status);
+}
diff --git a/usr/src/cmd/svc/svcadm/synch.c b/usr/src/cmd/svc/svcadm/synch.c
new file mode 100644
index 0000000000..74cf81fbb3
--- /dev/null
+++ b/usr/src/cmd/svc/svcadm/synch.c
@@ -0,0 +1,588 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * synchronous svcadm logic
+ */
+
+#include <locale.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/stat.h>
+
+
+/*
+ * Definitions from svcadm.c.
+ */
+extern scf_handle_t *h;
+extern ssize_t max_scf_fmri_sz;
+
+extern void do_scfdie(int);
+extern int inst_get_state(scf_instance_t *, char *, const char *,
+ scf_propertygroup_t **);
+extern ssize_t get_astring_prop(const scf_propertygroup_t *, const char *,
+ scf_property_t *, scf_value_t *, char *, size_t);
+extern int get_bool_prop(scf_propertygroup_t *, const char *, uint8_t *);
+
+#define scfdie() do_scfdie(__LINE__)
+
+int has_potential(scf_instance_t *, int);
+
+/*
+ * Determines if the specified instance is enabled, composing the
+ * general and general_ovr property groups. For simplicity, we map
+ * most errors to "not enabled".
+ */
+int
+is_enabled(scf_instance_t *inst)
+{
+ scf_propertygroup_t *pg;
+ uint8_t bp;
+
+ if ((pg = scf_pg_create(h)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, SCF_PG_GENERAL_OVR, pg) == 0 &&
+ get_bool_prop(pg, SCF_PROPERTY_ENABLED, &bp) == 0) {
+ scf_pg_destroy(pg);
+ return (bp);
+ }
+
+ if (scf_instance_get_pg(inst, SCF_PG_GENERAL, pg) == 0 &&
+ get_bool_prop(pg, SCF_PROPERTY_ENABLED, &bp) == 0) {
+ scf_pg_destroy(pg);
+ return (bp);
+ }
+
+ scf_pg_destroy(pg);
+ return (B_FALSE);
+}
+
+/*
+ * Reads an astring property from a property group. If the named
+ * property doesn't exist, returns NULL. The result of a successful
+ * call should be freed.
+ */
+static char *
+read_astring_prop(scf_propertygroup_t *pg, scf_value_t *val,
+ scf_property_t *prop, const char *name)
+{
+ char *value;
+ size_t value_sz;
+
+ if (scf_pg_get_property(pg, name, prop) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_DELETED:
+ return (NULL);
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_property_get_value(prop, val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (NULL);
+ default:
+ scfdie();
+ }
+ }
+
+ value_sz = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH);
+ if ((value = malloc(value_sz)) == NULL)
+ scfdie();
+
+ if (scf_value_get_astring(val, value, value_sz) <= 0) {
+ free(value);
+ return (NULL);
+ }
+
+ return (value);
+}
+
+/*
+ * Creates and returns an scf_iter for the values of the named
+ * multi-value property. Returns NULL on failure.
+ */
+static scf_iter_t *
+prop_walk_init(scf_propertygroup_t *pg, const char *name)
+{
+ scf_iter_t *iter;
+ scf_property_t *prop;
+
+ if ((iter = scf_iter_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL)
+ scfdie();
+
+ if (scf_pg_get_property(pg, name, prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_DELETED:
+ goto error;
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_iter_property_values(iter, prop) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ goto error;
+ }
+
+ scf_property_destroy(prop);
+ return (iter);
+error:
+ scf_property_destroy(prop);
+ scf_iter_destroy(iter);
+ return (NULL);
+}
+
+/*
+ * Reads the next value from the multi-value property using the
+ * scf_iter obtained by prop_walk_init, and places it in the buffer
+ * pointed to by fmri. Returns -1 on failure, 0 when done, and non-0
+ * when returning a value.
+ */
+static int
+prop_walk_step(scf_iter_t *iter, char *fmri, size_t len)
+{
+ int r;
+ scf_value_t *val;
+
+ if ((val = scf_value_create(h)) == NULL)
+ scfdie();
+
+ r = scf_iter_next_value(iter, val);
+ if (r == 0)
+ goto out;
+ if (r == -1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ goto out;
+ }
+ if (scf_value_get_astring(val, fmri, len) <= 0) {
+ r = -1;
+ goto out;
+ }
+
+out:
+ scf_value_destroy(val);
+ return (r);
+}
+
+/*
+ * Determines if a file dependency is satisfied, taking into account
+ * whether it is an exclusion dependency or not. If we can't access
+ * the file, we err on the side of caution and assume the dependency
+ * isn't satisfied.
+ */
+static int
+file_has_potential(char *fmri, int exclude)
+{
+ const char *path;
+ struct stat st;
+
+ int good = exclude ? B_FALSE : B_TRUE;
+
+ if (scf_parse_file_fmri(fmri, NULL, &path) != 0)
+ return (good);
+
+ if (stat(path, &st) == 0)
+ return (good);
+
+ if (errno == EACCES) {
+ uu_warn(gettext("Unable to access \"%s\".\n"), path);
+ return (B_FALSE);
+ }
+
+ return (!good);
+}
+
+/*
+ * Determines if a dependency on a service instance is satisfiable.
+ * Returns 0 if not, 1 if it is, or 2 if it is an optional or exclude
+ * dependency and the service only "weakly" satisfies (i.e. is disabled
+ * or is in maintenance state).
+ */
+static int
+inst_has_potential(scf_instance_t *inst, int enabled, int optional, int exclude)
+{
+ char state[MAX_SCF_STATE_STRING_SZ];
+
+ if (!enabled)
+ return ((optional || exclude) ? 2 : 0);
+
+ /*
+ * Normally we would return a positive value on failure;
+ * relying on startd to place the service in maintenance. But
+ * if we can't read a service's state, we have to assume it is
+ * out to lunch.
+ */
+ if (inst_get_state(inst, state, NULL, NULL) != 0)
+ return (0);
+
+ /*
+ * Optional dependencies which are offline always have a possibility of
+ * coming online.
+ */
+ if (optional && strcmp(state, SCF_STATE_STRING_OFFLINE) == 0)
+ return (2);
+
+ if (strcmp(state, SCF_STATE_STRING_MAINT) == 0) {
+ /*
+ * Enabled services in maintenance state satisfy
+ * optional-all dependencies.
+ */
+ return ((optional || exclude) ? 2 : 0);
+ }
+
+ /*
+ * We're enabled and not in maintenance.
+ */
+ if (exclude)
+ return (0);
+
+ if (strcmp(state, SCF_STATE_STRING_ONLINE) == 0 ||
+ strcmp(state, SCF_STATE_STRING_DEGRADED) == 0)
+ return (1);
+
+ return (has_potential(inst, B_FALSE));
+}
+
+/*
+ * Determines if a dependency on an fmri is satisfiable, handling the
+ * separate cases for file, service, and instance fmris. Returns false
+ * if not, or true if it is. Takes into account if the dependency is
+ * an optional or exclusive one.
+ */
+static int
+fmri_has_potential(char *fmri, int isfile, int optional, int exclude,
+ int restarter)
+{
+ scf_instance_t *inst;
+ scf_service_t *svc;
+ scf_iter_t *iter;
+ int good = exclude ? B_FALSE : B_TRUE;
+ int enabled;
+ int r, result;
+ int optbad;
+
+ assert(!optional || !exclude);
+
+ if (isfile)
+ return (file_has_potential(fmri, exclude));
+
+ if ((inst = scf_instance_create(h)) == NULL ||
+ (svc = scf_service_create(h)) == NULL ||
+ (iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) == 0) {
+ enabled = is_enabled(inst);
+ result =
+ (inst_has_potential(inst, enabled, optional, exclude) != 0);
+ goto out;
+ }
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) != 0) {
+ /*
+ * If we are checking a restarter dependency, a bad
+ * or nonexistent service will never be noticed.
+ */
+ result = restarter ? B_FALSE : good;
+ goto out;
+ }
+
+ if (scf_iter_service_instances(iter, svc) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ result = good;
+ goto out;
+ }
+
+ optbad = 0;
+ for (;;) {
+ r = scf_iter_next_instance(iter, inst);
+ if (r == 0) {
+ result = exclude || (optional && !optbad);
+ goto out;
+ }
+ if (r == -1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ result = good;
+ goto out;
+ }
+
+ enabled = is_enabled(inst);
+ r = inst_has_potential(inst, enabled, optional, exclude);
+
+ /*
+ * Exclusion dependencies over services map to
+ * require-none for its instances.
+ */
+ if (exclude)
+ r = (r == 0);
+
+ if (r == 1) {
+ /*
+ * Remember, if this is an exclusion dependency
+ * (which means we are here because there
+ * exists an instance which wasn't satisfiable
+ * in that regard), good means bad.
+ */
+ result = good;
+ goto out;
+ }
+
+ if (optional && r == 0)
+ optbad = 1;
+ }
+
+out:
+ scf_instance_destroy(inst);
+ scf_service_destroy(svc);
+ scf_iter_destroy(iter);
+ return (result);
+}
+
+static int
+eval_require_any(scf_iter_t *iter, char *value, size_t value_sz, int isfile)
+{
+ int r, empty = B_TRUE;
+
+ for (;;) {
+ /*
+ * For reasons unknown, an empty require_any dependency
+ * group is considered by startd to be satisfied.
+ * This insanity fortunately doesn't extend to
+ * dependencies on services with no instances.
+ */
+ if ((r = prop_walk_step(iter, value, value_sz)) <= 0)
+ return ((r == 0 && empty) ? B_TRUE : r);
+ if (fmri_has_potential(value, isfile, B_FALSE, B_FALSE,
+ B_FALSE))
+ return (1);
+ empty = B_FALSE;
+ }
+}
+
+static int
+eval_all(scf_iter_t *iter, char *value, size_t value_sz,
+ int isfile, int optional, int exclude)
+{
+ int r;
+
+ for (;;) {
+ if ((r = prop_walk_step(iter, value, value_sz)) <= 0)
+ return ((r == 0) ? 1 : r);
+ if (!fmri_has_potential(value, isfile, optional, exclude,
+ B_FALSE))
+ return (0);
+ }
+}
+
+static int
+eval_require_all(scf_iter_t *iter, char *value, size_t value_sz, int isfile)
+{
+ return (eval_all(iter, value, value_sz, isfile, B_FALSE, B_FALSE));
+}
+
+static int
+eval_optional_all(scf_iter_t *iter, char *value, size_t value_sz, int isfile)
+{
+ return (eval_all(iter, value, value_sz, isfile, B_TRUE, B_FALSE));
+}
+
+static int
+eval_exclude_all(scf_iter_t *iter, char *value, size_t value_sz, int isfile)
+{
+ return (eval_all(iter, value, value_sz, isfile, B_FALSE, B_TRUE));
+}
+
+/*
+ * Examines the state and health of an instance's restarter and
+ * dependencies, and determines the impact of both on the instance's
+ * ability to be brought on line. A true return value indicates that
+ * instance appears to be a likely candidate for the online club.
+ * False indicates that there is no hope for the instance.
+ */
+int
+has_potential(scf_instance_t *inst, int restarter_only)
+{
+ scf_snapshot_t *snap;
+ scf_iter_t *iter, *viter = NULL;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ char *type = NULL, *grouping = NULL;
+ char *value;
+ size_t value_sz;
+ int result = B_TRUE, r;
+ int isfile;
+
+ value_sz = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH);
+ if ((iter = scf_iter_create(h)) == NULL ||
+ (snap = scf_snapshot_create(h)) == NULL ||
+ (pg = scf_pg_create(h)) == NULL ||
+ (val = scf_value_create(h)) == NULL ||
+ (prop = scf_property_create(h)) == NULL ||
+ (value = malloc(value_sz)) == NULL)
+ scfdie();
+
+ /*
+ * First we check our restarter as an implicit dependency.
+ */
+ if (scf_instance_get_pg_composed(inst, NULL, SCF_PG_GENERAL, pg) != 0)
+ scfdie();
+
+ r = get_astring_prop(pg, SCF_PROPERTY_RESTARTER, prop, val, value,
+ value_sz);
+ if (r == -ENOENT) {
+ (void) strlcpy(value, SCF_SERVICE_STARTD, value_sz);
+ } else if (r < 0 || r > max_scf_fmri_sz) {
+ /*
+ * Normally we would return true and let the restarter
+ * tell our caller there is a problem by changing the
+ * instance's state, but that's not going to happen if
+ * the restarter is invalid.
+ */
+ result = B_FALSE;
+ goto out;
+ }
+
+ if (!fmri_has_potential(value, B_FALSE, B_FALSE, B_FALSE, B_TRUE)) {
+ result = B_FALSE;
+ goto out;
+ }
+
+ if (restarter_only)
+ goto out;
+
+ /*
+ * Now we check explicit dependencies.
+ */
+ if (scf_instance_get_snapshot(inst, "running", snap) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ scf_snapshot_destroy(snap);
+ snap = NULL;
+ }
+
+ if (scf_iter_instance_pgs_typed_composed(iter, inst, snap,
+ SCF_GROUP_DEPENDENCY) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ goto out;
+ }
+
+ for (;;) {
+ r = scf_iter_next_pg(iter, pg);
+ if (r == 0)
+ break;
+ if (r == -1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ goto out;
+ }
+
+ if ((grouping = read_astring_prop(pg, val, prop,
+ SCF_PROPERTY_GROUPING)) == NULL)
+ goto out;
+
+ if ((type = read_astring_prop(pg, val, prop,
+ SCF_PROPERTY_TYPE)) == NULL)
+ goto out;
+
+ if (strcmp(type, "path") == 0) {
+ isfile = B_TRUE;
+ } else if (strcmp(type, "service") == 0) {
+ isfile = B_FALSE;
+ } else {
+ free(type);
+ goto out;
+ }
+ free(type);
+
+ if ((viter = prop_walk_init(pg, SCF_PROPERTY_ENTITIES)) == NULL)
+ goto out;
+
+ if (strcmp(grouping, SCF_DEP_REQUIRE_ALL) == 0) {
+ r = eval_require_all(viter, value, value_sz, isfile);
+ } else if (strcmp(grouping, SCF_DEP_REQUIRE_ANY) == 0) {
+ r = eval_require_any(viter, value, value_sz, isfile);
+ } else if (strcmp(grouping, SCF_DEP_EXCLUDE_ALL) == 0) {
+ r = eval_exclude_all(viter, value, value_sz, isfile);
+ } else if (strcmp(grouping, SCF_DEP_OPTIONAL_ALL) == 0) {
+ r = eval_optional_all(viter, value, value_sz, isfile);
+ } else {
+ scf_iter_destroy(viter);
+ free(grouping);
+ grouping = NULL;
+ goto out;
+ }
+
+ scf_iter_destroy(viter);
+ free(grouping);
+ grouping = NULL;
+
+ if (r == 0) {
+ result = B_FALSE;
+ goto out;
+ } else if (r == -1) {
+ goto out;
+ }
+ }
+
+out:
+ free(value);
+ scf_property_destroy(prop);
+ scf_value_destroy(val);
+ scf_pg_destroy(pg);
+ if (snap != NULL)
+ scf_snapshot_destroy(snap);
+ if (grouping != NULL)
+ free(grouping);
+ scf_iter_destroy(iter);
+ return (result);
+}
diff --git a/usr/src/cmd/svc/svccfg/Makefile b/usr/src/cmd/svc/svccfg/Makefile
new file mode 100644
index 0000000000..69e838e9b8
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/Makefile
@@ -0,0 +1,159 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+MYPROG = svccfg
+PROG = $(MYPROG)
+
+SRCS = svccfg_main.c \
+ svccfg_engine.c \
+ svccfg_internal.c \
+ svccfg_libscf.c \
+ svccfg_xml.c \
+ svccfg_help.c
+
+LNTS = $(SRCS:%.c=%.ln) \
+ manifest_hash.ln
+
+MYOBJS = $(SRCS:%.c=%.o) \
+ svccfg_grammar.o \
+ svccfg_lex.o \
+ manifest_hash.o
+OBJS = $(MYOBJS)
+
+POFILES = $(SRCS:%.c=%.po) \
+ svccfg_grammar.po \
+ svccfg_lex.po \
+ ../common/manifest_hash.po
+
+include ../../Makefile.cmd
+include ../Makefile.ctf
+
+POFILE = $(PROG)_all.po
+
+NATIVE_BUILD=$(POUND_SIGN)
+$(NATIVE_BUILD)NOT_NATIVE=$(POUND_SIGN)
+
+$(NATIVE_BUILD)PROG = $(MYPROG:%=%-native)
+$(NATIVE_BUILD)OBJS = $(MYOBJS:%.o=%-native.o)
+
+MYCPPFLAGS = -I ../common -I/usr/include/libxml2
+CPPFLAGS += $(MYCPPFLAGS)
+
+LFLAGS = -t
+YFLAGS = -d
+
+CLOBBERFILES += svccfg_lex.c svccfg_grammar.c svccfg_grammar.h \
+ $(MYPROG:%=%-native)
+
+LAZYLIBS = -z lazyload -lxml2 -z nolazyload
+SVCCFG_EXTRA_LIBS = $(LAZYLIBS) -lscf -ll -luutil -lumem -lmd5
+$(NOT_NATIVE)SVCCFG_EXTRA_LIBS += -ltecla
+
+LIBSCF = $(SRC)/lib/libscf
+LIBTECLA = $(SRC)/lib/libtecla # just for the header
+LIBUUTIL = $(SRC)/lib/libuutil
+
+debug := COPTFLAG = -g
+
+lint := LINTFLAGS = -mux
+lint := SVCCFG_EXTRA_LIBS = -lscf -ll -luutil -lumem -lmd5
+
+LDLIBS += $(SVCCFG_EXTRA_LIBS)
+
+$(NATIVE_BUILD)CC = $(NATIVECC)
+$(NATIVE_BUILD)LD = $(NATIVELD)
+$(NATIVE_BUILD)CFLAGS = $(NATIVE_CFLAGS)
+$(NATIVE_BUILD)CPPFLAGS = \
+ -DNATIVE_BUILD \
+ $(MYCPPFLAGS) \
+ -I$(LIBSCF)/inc \
+ -I$(LIBTECLA) \
+ -I$(LIBUUTIL)/common
+$(NATIVE_BUILD)LDFLAGS =
+$(NATIVE_BUILD)LDLIBS = \
+ -L$(LIBUUTIL)/native -R $(LIBUUTIL)/native \
+ -L$(LIBSCF)/native -R $(LIBSCF)/native \
+ $(SVCCFG_EXTRA_LIBS) -ldoor
+
+svccfg_lex.o svccfg_grammar.o := CCVERBOSE =
+
+svccfg_help.po := XGETFLAGS = -a
+
+.KEEP_STATE:
+.PARALLEL: $(OBJS) $(LNTS)
+
+all debug: $(PROG)
+
+native: FRC
+ @cd $(LIBUUTIL)/native; pwd; $(MAKE) $(MFLAGS) install
+ @cd $(LIBSCF)/native; pwd; $(MAKE) $(MFLAGS) install
+ @NATIVE_BUILD= $(MAKE) $(MFLAGS) all
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS) $(CTFMERGE_HOOK)
+ $(POST_PROCESS)
+
+$(POFILE): $(POFILES)
+ cat $(POFILES) > $(POFILE)
+
+install: all $(ROOTUSRSBINPROG)
+
+svccfg_lex.c: svccfg.l svccfg_grammar.h
+ $(LEX) $(LFLAGS) svccfg.l > $@
+
+svccfg_help.o: svccfg_grammar.h
+svccfg_help-native.o: svccfg_grammar.h
+
+svccfg_grammar.h svccfg_grammar.c: svccfg.y
+ $(YACC) $(YFLAGS) svccfg.y
+ @$(MV) y.tab.h svccfg_grammar.h
+ @$(MV) y.tab.c svccfg_grammar.c
+
+clean: FRC
+ $(RM) $(MYOBJS) $(MYOBJS:%.o=%-native.o) $(LNTS)
+
+lint: $(LNTS)
+ $(LINT.c) $(LINTFLAGS) $(LNTS) $(LDLIBS)
+
+%-native.o: %.c
+ $(COMPILE.c) -o $@ $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+%-native.o: ../common/%.c
+ $(COMPILE.c) -o $@ $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+%.o: ../common/%.c
+ $(COMPILE.c) $(OUTPUT_OPTION) $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+%.ln: ../common/%.c
+ $(LINT.c) $(OUTPUT_OPTION) -c $<
+
+include ../../Makefile.targ
+
+FRC:
diff --git a/usr/src/cmd/svc/svccfg/svccfg.h b/usr/src/cmd/svc/svccfg/svccfg.h
new file mode 100644
index 0000000000..2ed85ecf66
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg.h
@@ -0,0 +1,362 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _CMD_SVCCFG_H
+#define _CMD_SVCCFG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+
+#include <libxml/tree.h>
+
+#include <libscf.h>
+#include <libtecla.h>
+#include <libuutil.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Command scope bits for command tab completion */
+#define CS_SCOPE 0x01
+#define CS_SVC 0x02
+#define CS_INST 0x04
+#define CS_SNAP 0x08
+#define CS_GLOBAL 0x0f
+
+/* Flags for lscf_bundle_import() & co. */
+#define SCI_NOREFRESH 0x01 /* Don't refresh instances */
+#define SCI_GENERALLAST 0x04 /* Add general property group last */
+#define SCI_NOENABLED 0x08 /* Don't import general/enabled. */
+#define SCI_FRESH 0x10 /* Freshly imported service */
+#define SCI_FORCE 0x20 /* Override-import. */
+#define SCI_KEEP 0x40 /* Don't delete when SCI_FORCEing */
+
+#ifdef lint
+extern int yyerror(const char *);
+extern int yyparse(void);
+#endif /* lint */
+
+extern int lex_lineno;
+
+#define MANIFEST_DTD_PATH "/usr/share/lib/xml/dtd/service_bundle.dtd.1"
+/*
+ * The following list must be kept in the same order as that of
+ * lxml_prop_types[]
+ */
+typedef enum element {
+ SC_ASTRING = 0x0, SC_BOOLEAN, SC_COMMON_NAME, SC_COUNT,
+ SC_INSTANCE_CREATE_DEFAULT, SC_DEPENDENCY, SC_DEPENDENT, SC_DESCRIPTION,
+ SC_DOC_LINK, SC_DOCUMENTATION, SC_ENABLED, SC_EXEC_METHOD, SC_FMRI,
+ SC_HOST, SC_HOSTNAME, SC_INSTANCE, SC_INTEGER, SC_LOCTEXT, SC_MANPAGE,
+ SC_METHOD_CONTEXT, SC_METHOD_CREDENTIAL, SC_METHOD_PROFILE,
+ SC_METHOD_ENVIRONMENT, SC_METHOD_ENVVAR, SC_NET_ADDR_V4, SC_NET_ADDR_V6,
+ SC_OPAQUE, SC_PROPERTY, SC_PROPERTY_GROUP, SC_PROPVAL, SC_RESTARTER,
+ SC_SERVICE, SC_SERVICE_BUNDLE, SC_SERVICE_FMRI, SC_INSTANCE_SINGLE,
+ SC_STABILITY, SC_TEMPLATE, SC_TIME, SC_URI, SC_USTRING, SC_VALUE_NODE,
+ SC_XI_FALLBACK, SC_XI_INCLUDE
+} element_t;
+
+typedef enum bundle_type {
+ SVCCFG_UNKNOWN_BUNDLE, SVCCFG_MANIFEST, SVCCFG_PROFILE, SVCCFG_ARCHIVE
+} bundle_type_t;
+
+typedef struct bundle {
+ uu_list_t *sc_bundle_services;
+
+ xmlChar *sc_bundle_name;
+ bundle_type_t sc_bundle_type;
+} bundle_t;
+
+typedef enum service_type {
+ SVCCFG_UNKNOWN_SERVICE = 0x0, SVCCFG_SERVICE, SVCCFG_RESTARTER,
+ SVCCFG_MILESTONE
+} service_type_t;
+
+typedef enum entity_type {
+ SVCCFG_SERVICE_OBJECT = 0x0, SVCCFG_INSTANCE_OBJECT,
+ SVCCFG_TEMPLATE_OBJECT
+} entity_type_t;
+
+enum import_state {
+ IMPORT_NONE = 0,
+ IMPORT_PREVIOUS,
+ IMPORT_PROP_BEGUN,
+ IMPORT_PROP_DONE,
+ IMPORT_COMPLETE,
+ IMPORT_REFRESHED
+};
+
+typedef struct entity {
+ uu_list_node_t sc_node;
+ entity_type_t sc_etype;
+
+ /* Common fields to all entities. */
+ const char *sc_name;
+ const char *sc_fmri;
+ uu_list_t *sc_pgroups;
+ uu_list_t *sc_dependents;
+ struct entity *sc_parent;
+ enum import_state sc_import_state;
+ int sc_seen;
+
+ union {
+ struct {
+ uu_list_t *sc_service_instances;
+ service_type_t sc_service_type;
+ uint_t sc_service_version;
+
+ struct entity *sc_service_template;
+ } sc_service;
+ struct {
+ uint_t sc_instance_dummy;
+ } sc_instance;
+ struct {
+ uint_t sc_template_dummy;
+ } sc_template;
+ } sc_u;
+} entity_t;
+
+typedef struct pgroup {
+ uu_list_node_t sc_node;
+ uu_list_t *sc_pgroup_props;
+
+ const char *sc_pgroup_name;
+ const char *sc_pgroup_type;
+ uint_t sc_pgroup_flags;
+ struct entity *sc_parent;
+
+ int sc_pgroup_delete;
+ int sc_pgroup_override;
+ const char *sc_pgroup_fmri; /* Used for dependents */
+
+ int sc_pgroup_seen;
+} pgroup_t;
+
+typedef struct property {
+ uu_list_node_t sc_node;
+ uu_list_t *sc_property_values;
+
+ char *sc_property_name;
+ scf_type_t sc_value_type;
+
+ int sc_property_override;
+ int sc_seen;
+} property_t;
+
+typedef struct value {
+ uu_list_node_t sc_node;
+
+ scf_type_t sc_type;
+
+ void (*sc_free)(struct value *);
+
+ union {
+ uint64_t sc_count;
+ int64_t sc_integer;
+ char *sc_string;
+ } sc_u;
+} value_t;
+
+typedef struct scf_callback {
+ scf_handle_t *sc_handle;
+ void *sc_parent; /* immediate parent: scope, service, */
+ /* instance, property group, property */
+ scf_transaction_t *sc_trans;
+ int sc_service; /* True if sc_parent is a service. */
+ uint_t sc_flags;
+ pgroup_t *sc_general; /* pointer to general property group */
+
+ const char *sc_source_fmri;
+ const char *sc_target_fmri;
+ int sc_err;
+} scf_callback_t;
+
+#ifndef NDEBUG
+#define bad_error(func, err) { \
+ (void) fprintf(stderr, "%s:%d: %s() failed with unexpected " \
+ "error %d. Aborting.\n", __FILE__, __LINE__, (func), (err)); \
+ abort(); \
+}
+#else
+#define bad_error(func, err) abort()
+#endif
+
+#define SC_CMD_LINE 0x0
+#define SC_CMD_FILE 0x1
+#define SC_CMD_EOF 0x2
+#define SC_CMD_IACTIVE 0x4
+#define SC_CMD_DONT_EXIT 0x8
+
+typedef struct engine_state {
+ uint_t sc_cmd_flags;
+ FILE *sc_cmd_file;
+ uint_t sc_cmd_lineno;
+ const char *sc_cmd_filename;
+ char *sc_cmd_buf;
+ size_t sc_cmd_bufsz;
+ off_t sc_cmd_bufoff;
+ GetLine *sc_gl;
+
+ pid_t sc_repo_pid;
+ const char *sc_repo_filename;
+ const char *sc_repo_doordir;
+ const char *sc_repo_doorname;
+ const char *sc_repo_server;
+} engine_state_t;
+
+extern engine_state_t *est;
+
+typedef struct string_list {
+ uu_list_node_t node;
+ char *str;
+} string_list_t;
+
+extern uu_list_pool_t *string_pool;
+
+struct help_message {
+ int token;
+ const char *message;
+};
+
+extern struct help_message help_messages[];
+
+extern scf_handle_t *g_hndl; /* global repcached connection handle */
+extern int g_exitcode;
+extern int g_verbose;
+
+extern ssize_t max_scf_fmri_len;
+extern ssize_t max_scf_name_len;
+extern ssize_t max_scf_value_len;
+extern ssize_t max_scf_pg_type_len;
+
+/* Common strings */
+extern const char * const name_attr;
+extern const char * const type_attr;
+extern const char * const value_attr;
+extern const char * const enabled_attr;
+extern const char * const scf_pg_general;
+extern const char * const scf_group_framework;
+extern const char * const true;
+extern const char * const false;
+
+#define uu_list_append(list, elem) uu_list_insert_before(list, NULL, elem)
+#define uu_list_prepend(list, elem) uu_list_insert_after(list, NULL, elem)
+
+void *safe_malloc(size_t);
+char *safe_strdup(const char *);
+void warn(const char *, ...);
+void synerr(int);
+void semerr(const char *, ...);
+
+void internal_init(void);
+void internal_dump(bundle_t *);
+
+int value_cmp(const void *, const void *, void *);
+
+bundle_t *internal_bundle_new(void);
+void internal_bundle_free(bundle_t *);
+entity_t *internal_service_new(const char *);
+void internal_service_free(entity_t *);
+entity_t *internal_instance_new(const char *);
+void internal_instance_free(entity_t *);
+entity_t *internal_template_new(void);
+pgroup_t *internal_pgroup_new(void);
+void internal_pgroup_free(pgroup_t *);
+pgroup_t *internal_pgroup_find(entity_t *, const char *, const char *);
+pgroup_t *internal_dependent_find(entity_t *, const char *);
+pgroup_t *internal_pgroup_find_or_create(entity_t *, const char *,
+ const char *);
+property_t *internal_property_new(void);
+void internal_property_free(property_t *);
+property_t *internal_property_find(pgroup_t *, const char *);
+property_t *internal_property_create(const char *, scf_type_t, uint_t, ...);
+value_t *internal_value_new(void);
+
+int internal_attach_service(bundle_t *, entity_t *);
+int internal_attach_entity(entity_t *, entity_t *);
+int internal_attach_pgroup(entity_t *, pgroup_t *);
+int internal_attach_dependent(entity_t *, pgroup_t *);
+int internal_attach_property(pgroup_t *, property_t *);
+void internal_attach_value(property_t *, value_t *);
+
+int load_init(void);
+void load_fini(void);
+int load_pg_attrs(const scf_propertygroup_t *, pgroup_t **);
+int load_pg(const scf_propertygroup_t *, pgroup_t **, const char *,
+ const char *);
+int prop_equal(property_t *, property_t *, const char *, const char *, int);
+int pg_attrs_equal(pgroup_t *, pgroup_t *, const char *, int);
+int pg_equal(pgroup_t *, pgroup_t *);
+
+void lscf_cleanup(void);
+void lscf_prep_hndl(void);
+void lscf_init(void);
+int lscf_bundle_import(bundle_t *, const char *, uint_t);
+int lscf_bundle_apply(bundle_t *);
+void lscf_delete(const char *, int);
+void lscf_list(const char *);
+void lscf_select(const char *);
+void lscf_unselect();
+void lscf_get_selection_str(char *, size_t);
+void lscf_add(const char *);
+void lscf_listpg(const char *);
+void lscf_addpg(const char *, const char *, const char *);
+void lscf_delpg(char *);
+void lscf_listprop(const char *);
+void lscf_addprop(char *, const char *, const uu_list_t *);
+void lscf_delprop(char *);
+void lscf_listsnap();
+void lscf_selectsnap(const char *);
+void lscf_revert(const char *);
+char *filename_to_propname(const char *);
+int lscf_retrieve_hash(const char *, unsigned char *);
+int lscf_store_hash(const char *, unsigned char *);
+CPL_MATCH_FN(complete_select);
+CPL_MATCH_FN(complete_command);
+
+int lxml_init(void);
+int lxml_get_bundle_file(bundle_t *, const char *, int);
+
+void engine_init(void);
+int engine_exec_cmd(void);
+int engine_exec(char *);
+int add_cmd_matches(WordCompletion *, const char *, int, uint32_t);
+int engine_interp(void);
+int engine_source(const char *, boolean_t);
+int engine_import(uu_list_t *);
+void help(int);
+
+int engine_cmd_getc(engine_state_t *);
+int engine_cmd_ungetc(engine_state_t *, char);
+void engine_cmd_nputs(engine_state_t *, char *, size_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMD_SVCCFG_H */
diff --git a/usr/src/cmd/svc/svccfg/svccfg.l b/usr/src/cmd/svc/svccfg/svccfg.l
new file mode 100644
index 0000000000..6d06f48a8b
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg.l
@@ -0,0 +1,229 @@
+%{
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#pragma error_messages(off, E_BLOCK_DECL_UNUSED)
+#pragma error_messages(off, E_EQUALITY_NOT_ASSIGNMENT)
+#pragma error_messages(off, E_FUNC_RET_MAYBE_IGNORED2)
+#pragma error_messages(off, E_STMT_NOT_REACHED)
+
+#include <libintl.h>
+#include <string.h>
+
+#include "svccfg.h"
+#include "svccfg_grammar.h"
+
+/*
+ * We need to undefine lex's input, unput, and output macros so that references
+ * to these call the functions we provide at the end of this source file,
+ * instead of the default versions based on libc's stdio.
+ */
+#ifdef input
+#undef input
+#endif
+
+#ifdef unput
+#undef unput
+#endif
+
+#ifdef output
+#undef output
+#endif
+
+static int input(void);
+static void unput(int);
+static void output(int);
+
+int parens = 0;
+
+extern int yyerror(const char *);
+
+%}
+
+/*
+ * Since command tokens are only valid at the beginning of the command (or
+ * after help), we'll only return them in the INITIAL state, and report them
+ * as SCV_WORDs afterwards.
+ */
+%Start WORD
+
+%%
+
+#.*$ ; /* comments */
+
+<INITIAL>validate { BEGIN WORD; return (SCC_VALIDATE); }
+<INITIAL>import { BEGIN WORD; return (SCC_IMPORT); }
+<INITIAL>export { BEGIN WORD; return (SCC_EXPORT); }
+<INITIAL>archive { BEGIN WORD; return (SCC_ARCHIVE); }
+<INITIAL>apply { BEGIN WORD; return (SCC_APPLY); }
+<INITIAL>extract { BEGIN WORD; return (SCC_EXTRACT); }
+<INITIAL>repository { BEGIN WORD; return (SCC_REPOSITORY); }
+<INITIAL>inventory { BEGIN WORD; return (SCC_INVENTORY); }
+<INITIAL>set { BEGIN WORD; return (SCC_SET); }
+<INITIAL>end { BEGIN WORD; return (SCC_END); }
+<INITIAL>exit { BEGIN WORD; return (SCC_END); }
+<INITIAL>quit { BEGIN WORD; return (SCC_END); }
+<INITIAL>help { return (SCC_HELP); }
+
+<INITIAL>list { BEGIN WORD; return (SCC_LIST); }
+<INITIAL>add { BEGIN WORD; return (SCC_ADD); }
+<INITIAL>delete { BEGIN WORD; return (SCC_DELETE); }
+<INITIAL>select { BEGIN WORD; return (SCC_SELECT); }
+<INITIAL>unselect { BEGIN WORD; return (SCC_UNSELECT); }
+
+<INITIAL>listpg { BEGIN WORD; return (SCC_LISTPG); }
+<INITIAL>addpg { BEGIN WORD; return (SCC_ADDPG); }
+<INITIAL>delpg { BEGIN WORD; return (SCC_DELPG); }
+<INITIAL>listprop { BEGIN WORD; return (SCC_LISTPROP); }
+<INITIAL>setprop { BEGIN WORD; return (SCC_SETPROP); }
+<INITIAL>delprop { BEGIN WORD; return (SCC_DELPROP); }
+<INITIAL>editprop { BEGIN WORD; return (SCC_EDITPROP); }
+<INITIAL>addpropvalue { BEGIN WORD; return (SCC_ADDPROPVALUE); }
+<INITIAL>delpropvalue { BEGIN WORD; return (SCC_DELPROPVALUE); }
+<INITIAL>setenv { BEGIN WORD; return (SCC_SETENV); }
+<INITIAL>unsetenv { BEGIN WORD; return (SCC_UNSETENV); }
+
+<INITIAL>listsnap { BEGIN WORD; return (SCC_LISTSNAP); }
+<INITIAL>selectsnap { BEGIN WORD; return (SCC_SELECTSNAP); }
+<INITIAL>revert { BEGIN WORD; return (SCC_REVERT); }
+
+[^ \t\n">=()]+ {
+ if ((yylval.str = strdup(yytext)) == NULL) {
+ yyerror(gettext("Out of memory"));
+ exit(UU_EXIT_FATAL);
+ }
+
+ return SCV_WORD;
+ }
+
+\"([^"\\]|\\.)*\" {
+ /*
+ * double-quoted strings start at a
+ * double-quote, include characters other than
+ * double-quote and backslash, and
+ * backslashed-characters, and end with a
+ * double-quote.
+ */
+
+ char *str, *cp;
+ int shift;
+
+ if ((str = strdup(yytext)) == NULL) {
+ yyerror(gettext("Out of memory"));
+ exit(UU_EXIT_FATAL);
+ }
+
+ /* Strip out the backslashes. */
+ for (cp = str, shift = 0; *cp != '\0'; ++cp) {
+ if (*cp == '\\') {
+ ++cp;
+
+ /*
+ * This can't be null because
+ * the string always ends with
+ * a double-quote.
+ */
+
+ ++shift;
+ *(cp - shift) = *cp;
+ } else if (shift != 0)
+ *(cp - shift) = *cp;
+ }
+
+ /* Nullify everything after trailing quote */
+ *(cp - shift) = '\0';
+
+ yylval.str = str;
+ return SCV_STRING;
+ }
+
+\n {
+ est->sc_cmd_lineno++;
+ BEGIN INITIAL;
+ return (SCS_NEWLINE);
+ }
+
+[ \t]+ ;
+
+">" { return SCS_REDIRECT; }
+"=" { return SCS_EQUALS; }
+"(" { ++parens; return SCS_LPAREN; }
+")" { --parens; return SCS_RPAREN; }
+
+. {
+ uu_die(gettext("unrecognized character %s\n"),
+ yytext);
+ }
+
+%%
+
+int
+yyerror(const char *s)
+{
+ return (0);
+}
+
+static int
+input(void)
+{
+ static int saw_eof = 0;
+
+ int c = engine_cmd_getc(est);
+
+ /*
+ * To ensure input is terminated, slip in a newline on EOF.
+ */
+ if (c == EOF) {
+ if (saw_eof)
+ return (0);
+
+ saw_eof = 1;
+ return ('\n');
+ } else
+ saw_eof = 0;
+
+ if (c == '\n')
+ yylineno++;
+
+ return (c);
+}
+
+static void
+unput(int c)
+{
+ if (c == '\n')
+ yylineno--;
+
+ (void) engine_cmd_ungetc(est, c == 0 ? EOF : c);
+}
+
+static void
+output(int c)
+{
+ char ch = c;
+ engine_cmd_nputs(est, &ch, sizeof (ch));
+}
diff --git a/usr/src/cmd/svc/svccfg/svccfg.y b/usr/src/cmd/svc/svccfg/svccfg.y
new file mode 100644
index 0000000000..364a4cd4a9
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg.y
@@ -0,0 +1,452 @@
+%{
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <libintl.h>
+
+#include "svccfg.h"
+
+uu_list_pool_t *string_pool;
+
+%}
+
+%union {
+ int tok;
+ char *str;
+ uu_list_t *uul;
+}
+
+%start commands
+
+%token SCC_VALIDATE SCC_IMPORT SCC_EXPORT SCC_ARCHIVE SCC_APPLY SCC_EXTRACT
+%token SCC_REPOSITORY SCC_INVENTORY SCC_SET SCC_END SCC_HELP
+%token SCC_LIST SCC_ADD SCC_DELETE SCC_SELECT SCC_UNSELECT
+%token SCC_LISTPG SCC_ADDPG SCC_DELPG
+%token SCC_LISTPROP SCC_SETPROP SCC_DELPROP SCC_EDITPROP
+%token SCC_ADDPROPVALUE SCC_DELPROPVALUE SCC_SETENV SCC_UNSETENV
+%token SCC_LISTSNAP SCC_SELECTSNAP SCC_REVERT
+%token SCS_REDIRECT SCS_NEWLINE SCS_EQUALS SCS_LPAREN SCS_RPAREN
+%token SCV_WORD SCV_STRING
+
+%type <tok> command_token
+%type <str> SCV_WORD SCV_STRING
+%type <str> string opt_word
+%type <uul> string_list multiline_string_list
+
+%%
+
+/*
+ * We could hoist the command terminator for all the rules up here, but then
+ * the parser would reduce before shifting the terminator, which would require
+ * an additional error rule (per command) to catch extra arguments.
+ * This way requires all input to be terminated, which is done by input() in
+ * svccfg.l.
+ */
+
+commands : command
+ | commands command
+
+command : terminator
+ | validate_cmd
+ | import_cmd
+ | export_cmd
+ | archive_cmd
+ | apply_cmd
+ | extract_cmd
+ | repository_cmd
+ | inventory_cmd
+ | set_cmd
+ | end_cmd
+ | help_cmd
+ | list_cmd
+ | add_cmd
+ | delete_cmd
+ | select_cmd
+ | unselect_cmd
+ | listpg_cmd
+ | addpg_cmd
+ | delpg_cmd
+ | listprop_cmd
+ | setprop_cmd
+ | delprop_cmd
+ | editprop_cmd
+ | addpropvalue_cmd
+ | delpropvalue_cmd
+ | setenv_cmd
+ | unsetenv_cmd
+ | listsnap_cmd
+ | selectsnap_cmd
+ | revert_cmd
+ | unknown_cmd
+ | error terminator { semerr(gettext("Syntax error.\n")); }
+
+unknown_cmd : SCV_WORD terminator
+ {
+ semerr(gettext("Unknown command \"%s\".\n"), $1);
+ free($1);
+ }
+ | SCV_WORD string_list terminator
+ {
+ string_list_t *slp;
+ void *cookie = NULL;
+
+ semerr(gettext("Unknown command \"%s\".\n"), $1);
+
+ while ((slp = uu_list_teardown($2, &cookie)) != NULL) {
+ free(slp->str);
+ free(slp);
+ }
+
+ uu_list_destroy($2);
+ free($1);
+ }
+
+validate_cmd : SCC_VALIDATE SCV_WORD terminator
+ {
+ bundle_t *b = internal_bundle_new();
+ lxml_get_bundle_file(b, $2, 0);
+ (void) internal_bundle_free(b);
+ free($2);
+ }
+ | SCC_VALIDATE error terminator { synerr(SCC_VALIDATE); }
+
+import_cmd : SCC_IMPORT string_list terminator
+ {
+ string_list_t *slp;
+ void *cookie = NULL;
+
+ if (engine_import($2) == -2)
+ synerr(SCC_IMPORT);
+
+ while ((slp = uu_list_teardown($2, &cookie)) != NULL) {
+ free(slp->str);
+ free(slp);
+ }
+
+ uu_list_destroy($2);
+ }
+ | SCC_IMPORT error terminator { synerr(SCC_IMPORT); }
+
+export_cmd : SCC_EXPORT SCV_WORD terminator
+ {
+ lscf_service_export($2, NULL);
+ free($2);
+ }
+ | SCC_EXPORT SCV_WORD SCS_REDIRECT SCV_WORD terminator
+ {
+ lscf_service_export($2, $4);
+ free($2);
+ free($4);
+ }
+ | SCC_EXPORT error terminator { synerr(SCC_EXPORT); }
+
+archive_cmd : SCC_ARCHIVE terminator
+ {
+ lscf_archive(NULL);
+ }
+ | SCC_ARCHIVE SCS_REDIRECT SCV_WORD terminator
+ {
+ lscf_archive($3);
+ free($3);
+ }
+ | SCC_ARCHIVE error terminator { synerr(SCC_ARCHIVE); }
+
+apply_cmd : SCC_APPLY SCV_WORD terminator
+ { (void) engine_apply($2); free($2); }
+ | SCC_APPLY error terminator { synerr(SCC_APPLY); }
+
+extract_cmd: SCC_EXTRACT terminator { lscf_profile_extract(NULL); }
+ | SCC_EXTRACT SCS_REDIRECT SCV_WORD terminator
+ {
+ lscf_profile_extract($3);
+ free($3);
+ }
+ | SCC_EXTRACT error terminator { synerr(SCC_EXTRACT); }
+
+repository_cmd : SCC_REPOSITORY SCV_WORD terminator
+ {
+ lscf_set_repository($2);
+ free($2);
+ }
+ | SCC_REPOSITORY error terminator { synerr(SCC_REPOSITORY); }
+
+inventory_cmd : SCC_INVENTORY SCV_WORD terminator
+ { lxml_inventory($2); free($2); }
+ | SCC_INVENTORY error terminator { synerr(SCC_INVENTORY); }
+
+set_cmd : SCC_SET string_list terminator
+ {
+ string_list_t *slp;
+ void *cookie = NULL;
+
+ (void) engine_set($2);
+
+ while ((slp = uu_list_teardown($2, &cookie)) != NULL) {
+ free(slp->str);
+ free(slp);
+ }
+
+ uu_list_destroy($2);
+ }
+ | SCC_SET error terminator { synerr(SCC_SET); }
+
+end_cmd : SCC_END terminator { exit(0); }
+ | SCC_END error terminator { synerr (SCC_END); }
+
+help_cmd : SCC_HELP terminator { help(0); }
+ | SCC_HELP command_token terminator { help($2); }
+ | SCC_HELP error terminator { synerr(SCC_HELP); }
+
+list_cmd : SCC_LIST opt_word terminator { lscf_list($2); free($2); }
+ | SCC_LIST error terminator { synerr(SCC_LIST); }
+
+add_cmd : SCC_ADD SCV_WORD terminator { lscf_add($2); free($2); }
+ | SCC_ADD error terminator { synerr(SCC_ADD); }
+
+delete_cmd : SCC_DELETE SCV_WORD terminator
+ { lscf_delete($2, 0); free($2); }
+ | SCC_DELETE SCV_WORD SCV_WORD terminator
+ {
+ if (strcmp($2, "-f") == 0) {
+ lscf_delete($3, 1);
+ free($2);
+ free($3);
+ } else {
+ synerr(SCC_DELETE);
+ }
+ }
+ | SCC_DELETE error terminator { synerr(SCC_DELETE); }
+
+select_cmd : SCC_SELECT SCV_WORD terminator { lscf_select($2); free($2); }
+ | SCC_SELECT error terminator { synerr(SCC_SELECT); }
+
+unselect_cmd : SCC_UNSELECT terminator { lscf_unselect(); }
+ | SCC_UNSELECT error terminator { synerr(SCC_UNSELECT); }
+
+listpg_cmd : SCC_LISTPG opt_word terminator
+ { lscf_listpg($2); free($2); }
+ | SCC_LISTPG error terminator { synerr(SCC_LISTPG); }
+
+addpg_cmd : SCC_ADDPG SCV_WORD SCV_WORD opt_word terminator
+ {
+ (void) lscf_addpg($2, $3, $4);
+ free($2);
+ free($3);
+ free($4);
+ }
+ | SCC_ADDPG error terminator { synerr(SCC_ADDPG); }
+
+delpg_cmd : SCC_DELPG SCV_WORD terminator
+ { lscf_delpg($2); free($2); }
+ | SCC_DELPG error terminator { synerr(SCC_DELPG); }
+
+listprop_cmd : SCC_LISTPROP opt_word terminator
+ { lscf_listprop($2); free($2); }
+ | SCC_LISTPROP error terminator { synerr(SCC_LISTPROP); }
+
+setprop_cmd : SCC_SETPROP SCV_WORD SCS_EQUALS string terminator
+ {
+ lscf_setprop($2, NULL, $4, NULL);
+ free($2);
+ free($4);
+ }
+ | SCC_SETPROP SCV_WORD SCS_EQUALS SCV_WORD string terminator
+ {
+ (void) lscf_setprop($2, $4, $5, NULL);
+ free($2);
+ free($4);
+ free($5);
+ }
+ | SCC_SETPROP SCV_WORD SCS_EQUALS opt_word SCS_LPAREN
+ multiline_string_list SCS_RPAREN terminator
+ {
+ string_list_t *slp;
+ void *cookie = NULL;
+
+ (void) lscf_setprop($2, $4, NULL, $6);
+
+ free($2);
+ free($4);
+
+ while ((slp = uu_list_teardown($6, &cookie)) != NULL) {
+ free(slp->str);
+ free(slp);
+ }
+
+ uu_list_destroy($6);
+ }
+ | SCC_SETPROP error terminator { synerr(SCC_SETPROP); }
+ | SCC_SETPROP error { synerr(SCC_SETPROP); }
+
+delprop_cmd : SCC_DELPROP SCV_WORD terminator
+ { lscf_delprop($2); free($2); }
+ | SCC_DELPROP error terminator { synerr(SCC_DELPROP); }
+
+editprop_cmd : SCC_EDITPROP terminator { lscf_editprop(); }
+ | SCC_EDITPROP error terminator { synerr(SCC_EDITPROP); }
+
+addpropvalue_cmd : SCC_ADDPROPVALUE SCV_WORD string terminator
+ {
+ lscf_addpropvalue($2, NULL, $3);
+ free($2);
+ free($3);
+ }
+ | SCC_ADDPROPVALUE SCV_WORD string string terminator
+ {
+ (void) lscf_addpropvalue($2, $3, $4);
+ free($2);
+ free($3);
+ free($4);
+ }
+ | SCC_ADDPROPVALUE error terminator { synerr(SCC_ADDPROPVALUE); }
+
+delpropvalue_cmd : SCC_DELPROPVALUE SCV_WORD string terminator
+ {
+ lscf_delpropvalue($2, $3, 0);
+ free($2);
+ free($3);
+ }
+ | SCC_DELPROPVALUE error terminator { synerr(SCC_DELPROPVALUE); }
+
+setenv_cmd : SCC_SETENV string_list terminator
+ {
+ string_list_t *slp;
+ void *cookie = NULL;
+
+ if (lscf_setenv($2, 0) == -2)
+ synerr(SCC_SETENV);
+
+ while ((slp = uu_list_teardown($2, &cookie)) != NULL) {
+ free(slp->str);
+ free(slp);
+ }
+
+ uu_list_destroy($2);
+ }
+ | SCC_SETENV error terminator { synerr(SCC_SETENV); }
+
+unsetenv_cmd : SCC_UNSETENV string_list terminator
+ {
+ string_list_t *slp;
+ void *cookie = NULL;
+
+ if (lscf_setenv($2, 1) == -2)
+ synerr(SCC_UNSETENV);
+
+ while ((slp = uu_list_teardown($2, &cookie)) != NULL) {
+ free(slp->str);
+ free(slp);
+ }
+
+ uu_list_destroy($2);
+ }
+ | SCC_UNSETENV error terminator { synerr(SCC_UNSETENV); }
+
+listsnap_cmd : SCC_LISTSNAP terminator { lscf_listsnap(); }
+ | SCC_LISTSNAP error terminator { synerr(SCC_LISTSNAP); }
+
+selectsnap_cmd : SCC_SELECTSNAP opt_word terminator
+ { lscf_selectsnap($2); free($2); }
+ | SCC_SELECTSNAP error terminator
+ { synerr(SCC_SELECTSNAP); }
+
+revert_cmd: SCC_REVERT opt_word terminator { lscf_revert($2); free ($2); }
+ | SCC_REVERT error terminator { synerr(SCC_REVERT); }
+
+
+terminator : SCS_NEWLINE
+
+string_list :
+ {
+ $$ = uu_list_create(string_pool, NULL, 0);
+ if ($$ == NULL)
+ uu_die(gettext("Out of memory\n"));
+ }
+ | string_list string
+ {
+ string_list_t *slp;
+
+ slp = safe_malloc(sizeof (*slp));
+
+ slp->str = $2;
+ uu_list_node_init(slp, &slp->node, string_pool);
+ uu_list_append($1, slp);
+ $$ = $1;
+ }
+
+multiline_string_list : string_list
+ {
+ $$ = $1;
+ }
+ | multiline_string_list SCS_NEWLINE string_list
+ {
+ void *cookie = NULL;
+ string_list_t *slp;
+
+ /* Append $3 to $1. */
+ while ((slp = uu_list_teardown($3, &cookie)) != NULL)
+ uu_list_append($1, slp);
+
+ uu_list_destroy($3);
+ }
+
+string : SCV_WORD { $$ = $1; }
+ | SCV_STRING { $$ = $1; }
+
+opt_word : { $$ = NULL; }
+ | SCV_WORD { $$ = $1; }
+
+command_token : SCC_VALIDATE { $$ = SCC_VALIDATE; }
+ | SCC_IMPORT { $$ = SCC_IMPORT; }
+ | SCC_EXPORT { $$ = SCC_EXPORT; }
+ | SCC_APPLY { $$ = SCC_APPLY; }
+ | SCC_EXTRACT { $$ = SCC_EXTRACT; }
+ | SCC_REPOSITORY { $$ = SCC_REPOSITORY; }
+ | SCC_ARCHIVE { $$ = SCC_ARCHIVE; }
+ | SCC_INVENTORY { $$ = SCC_INVENTORY; }
+ | SCC_SET { $$ = SCC_SET; }
+ | SCC_END { $$ = SCC_END; }
+ | SCC_HELP { $$ = SCC_HELP; }
+ | SCC_LIST { $$ = SCC_LIST; }
+ | SCC_ADD { $$ = SCC_ADD; }
+ | SCC_DELETE { $$ = SCC_DELETE; }
+ | SCC_SELECT { $$ = SCC_SELECT; }
+ | SCC_UNSELECT { $$ = SCC_UNSELECT; }
+ | SCC_LISTPG { $$ = SCC_LISTPG; }
+ | SCC_ADDPG { $$ = SCC_ADDPG; }
+ | SCC_DELPG { $$ = SCC_DELPG; }
+ | SCC_LISTPROP { $$ = SCC_LISTPROP; }
+ | SCC_SETPROP { $$ = SCC_SETPROP; }
+ | SCC_DELPROP { $$ = SCC_DELPROP; }
+ | SCC_EDITPROP { $$ = SCC_EDITPROP; }
+ | SCC_ADDPROPVALUE { $$ = SCC_ADDPROPVALUE; }
+ | SCC_DELPROPVALUE { $$ = SCC_DELPROPVALUE; }
+ | SCC_SETENV { $$ = SCC_SETENV; }
+ | SCC_UNSETENV { $$ = SCC_UNSETENV; }
+ | SCC_LISTSNAP { $$ = SCC_LISTSNAP; }
+ | SCC_SELECTSNAP { $$ = SCC_SELECTSNAP; }
+ | SCC_REVERT { $$ = SCC_REVERT; }
diff --git a/usr/src/cmd/svc/svccfg/svccfg_engine.c b/usr/src/cmd/svc/svccfg/svccfg_engine.c
new file mode 100644
index 0000000000..76b6c8fc05
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg_engine.c
@@ -0,0 +1,725 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * svccfg(1) interpreter and command execution engine.
+ */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <errno.h>
+#include <libintl.h>
+#include <libtecla.h>
+#include <md5.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "manifest_hash.h"
+#include "svccfg.h"
+
+#define MS_PER_US 1000
+
+engine_state_t *est;
+
+/*
+ * Replacement lex(1) character retrieval routines.
+ */
+int
+engine_cmd_getc(engine_state_t *E)
+{
+ if (E->sc_cmd_file != NULL)
+ return (getc(E->sc_cmd_file));
+
+ if (E->sc_cmd_flags & SC_CMD_EOF)
+ return (EOF);
+
+ if (E->sc_cmd_bufoff < E->sc_cmd_bufsz)
+ return (*(E->sc_cmd_buf + E->sc_cmd_bufoff++));
+
+ if (!(E->sc_cmd_flags & SC_CMD_IACTIVE)) {
+ E->sc_cmd_flags |= SC_CMD_EOF;
+
+ return (EOF);
+ } else {
+#ifdef NATIVE_BUILD
+ return (EOF);
+#else
+ extern int parens;
+
+ if (parens <= 0) {
+ E->sc_cmd_flags |= SC_CMD_EOF;
+ return (EOF);
+ }
+
+ for (;;) {
+ E->sc_cmd_buf = gl_get_line(E->sc_gl, "> ", NULL, -1);
+ if (E->sc_cmd_buf != NULL)
+ break;
+
+ switch (gl_return_status(E->sc_gl)) {
+ case GLR_SIGNAL:
+ gl_abandon_line(E->sc_gl);
+ continue;
+
+ case GLR_EOF:
+ E->sc_cmd_flags |= SC_CMD_EOF;
+ return (EOF);
+
+ case GLR_ERROR:
+ uu_die(gettext("Error reading terminal: %s.\n"),
+ gl_error_message(E->sc_gl, NULL, 0));
+ /* NOTREACHED */
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: gl_get_line() "
+ "returned unexpected value %d.\n", __FILE__,
+ __LINE__, gl_return_status(E->sc_gl));
+#endif
+ abort();
+ }
+ }
+
+ E->sc_cmd_bufsz = strlen(E->sc_cmd_buf);
+ E->sc_cmd_bufoff = 1;
+
+ return (E->sc_cmd_buf[0]);
+#endif /* NATIVE_BUILD */
+ }
+}
+
+int
+engine_cmd_ungetc(engine_state_t *E, char c)
+{
+ if (E->sc_cmd_file != NULL)
+ return (ungetc(c, E->sc_cmd_file));
+
+ if (E->sc_cmd_buf != NULL)
+ *(E->sc_cmd_buf + --E->sc_cmd_bufoff) = c;
+
+ return (c);
+}
+
+/*ARGSUSED*/
+void
+engine_cmd_nputs(engine_state_t *E, char *c, size_t n)
+{
+ /* our lexer shouldn't need this state */
+ exit(11);
+}
+
+int
+engine_exec(char *cmd)
+{
+ est->sc_cmd_buf = cmd;
+ est->sc_cmd_bufsz = strlen(cmd) + 1;
+ est->sc_cmd_bufoff = 0;
+
+ (void) yyparse();
+
+ return (0);
+}
+
+#ifndef NATIVE_BUILD
+/* ARGSUSED */
+static
+CPL_CHECK_FN(check_xml)
+{
+ const char *ext;
+
+ if (strlen(pathname) < 4)
+ return (0);
+
+ ext = pathname + strlen(pathname) - 4;
+
+ return (strcmp(ext, ".xml") == 0 ? 1 : 0);
+}
+
+static const char * const whitespace = " \t";
+
+static
+CPL_MATCH_FN(complete_single_xml_file_arg)
+{
+ const char *arg1 = data;
+ int arg1end_i, ret;
+ CplFileConf *cfc;
+
+ arg1end_i = arg1 + strcspn(arg1, whitespace) - line;
+ if (arg1end_i < word_end)
+ return (0);
+
+ cfc = new_CplFileConf();
+ if (cfc == NULL) {
+ cpl_record_error(cpl, "Out of memory.");
+ return (1);
+ }
+
+ cfc_set_check_fn(cfc, check_xml, NULL);
+
+ ret = cpl_file_completions(cpl, cfc, line, word_end);
+
+ (void) del_CplFileConf(cfc);
+ return (ret);
+}
+
+static struct cmd_info {
+ const char *name;
+ uint32_t flags;
+ CplMatchFn *complete_args_f;
+} cmds[] = {
+ { "validate", CS_GLOBAL, complete_single_xml_file_arg },
+ { "import", CS_GLOBAL, complete_single_xml_file_arg },
+ { "export", CS_GLOBAL, NULL },
+ { "archive", CS_GLOBAL, NULL },
+ { "apply", CS_GLOBAL, complete_single_xml_file_arg },
+ { "extract", CS_GLOBAL, NULL },
+ { "repository", CS_GLOBAL, NULL },
+ { "inventory", CS_GLOBAL, complete_single_xml_file_arg },
+ { "set", CS_GLOBAL, NULL },
+ { "end", CS_GLOBAL, NULL },
+ { "exit", CS_GLOBAL, NULL },
+ { "quit", CS_GLOBAL, NULL },
+ { "help", CS_GLOBAL, NULL },
+ { "delete", CS_GLOBAL, NULL },
+ { "select", CS_GLOBAL, complete_select },
+ { "unselect", CS_SVC | CS_INST | CS_SNAP, NULL },
+ { "list", CS_SCOPE | CS_SVC | CS_SNAP, NULL },
+ { "add", CS_SCOPE | CS_SVC, NULL },
+ { "listpg", CS_SVC | CS_INST | CS_SNAP, NULL },
+ { "addpg", CS_SVC | CS_INST, NULL },
+ { "delpg", CS_SVC | CS_INST, NULL },
+ { "listprop", CS_SVC | CS_INST | CS_SNAP, NULL },
+ { "setprop", CS_SVC | CS_INST, NULL },
+ { "delprop", CS_SVC | CS_INST, NULL },
+ { "editprop", CS_SVC | CS_INST, NULL },
+ { "listsnap", CS_INST | CS_SNAP, NULL },
+ { "selectsnap", CS_INST | CS_SNAP, NULL },
+ { "revert", CS_INST | CS_SNAP, NULL },
+ { NULL }
+};
+
+int
+add_cmd_matches(WordCompletion *cpl, const char *line, int word_end,
+ uint32_t scope)
+{
+ int word_start, err;
+ size_t len;
+ const char *bol;
+ struct cmd_info *cip;
+
+ word_start = strspn(line, whitespace);
+ len = word_end - word_start;
+ bol = line + word_end - len;
+
+ for (cip = cmds; cip->name != NULL; ++cip) {
+ if ((cip->flags & scope) == 0)
+ continue;
+
+ if (strncmp(cip->name, bol, len) == 0) {
+ err = cpl_add_completion(cpl, line, word_start,
+ word_end, cip->name + len, "", " ");
+ if (err != 0)
+ return (err);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Suggest completions. We must first determine if the cursor is in command
+ * position or in argument position. If the former, complete_command() finds
+ * matching commands. If the latter, we tail-call the command-specific
+ * argument-completion routine in the cmds table.
+ */
+/* ARGSUSED */
+static
+CPL_MATCH_FN(complete)
+{
+ const char *arg0, *arg1;
+ size_t arg0len;
+ struct cmd_info *cip;
+
+ arg0 = line + strspn(line, whitespace);
+ arg0len = strcspn(arg0, whitespace);
+ if ((arg0 + arg0len) - line >= word_end ||
+ (arg0[arg0len] != ' ' && arg0[arg0len] != '\t'))
+ return (complete_command(cpl, (void *)arg0, line, word_end));
+
+ arg1 = arg0 + arg0len;
+ arg1 += strspn(arg1, whitespace);
+
+ for (cip = cmds; cip->name != NULL; ++cip) {
+ if (strlen(cip->name) != arg0len)
+ continue;
+
+ if (strncmp(cip->name, arg0, arg0len) != 0)
+ continue;
+
+ if (cip->complete_args_f == NULL)
+ break;
+
+ return (cip->complete_args_f(cpl, (void *)arg1, line,
+ word_end));
+ }
+
+ return (0);
+}
+#endif /* NATIVE_BUILD */
+
+int
+engine_interp()
+{
+#ifdef NATIVE_BUILD
+ uu_die("native build does not support interactive mode.");
+#else
+ char *selfmri;
+ size_t sfsz;
+ int r;
+
+ extern int parens;
+
+ (void) sigset(SIGINT, SIG_IGN);
+
+ est->sc_gl = new_GetLine(512, 8000);
+ if (est->sc_gl == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ /* The longest string is "[snapname]fmri[:instname]> ". */
+ sfsz = 1 + max_scf_name_len + 1 + max_scf_fmri_len + 2 +
+ max_scf_name_len + 1 + 2 + 1;
+ selfmri = safe_malloc(sfsz);
+
+ r = gl_customize_completion(est->sc_gl, NULL, complete);
+ assert(r == 0);
+
+ for (;;) {
+ lscf_get_selection_str(selfmri, sfsz - 2);
+ (void) strcat(selfmri, "> ");
+ est->sc_cmd_buf = gl_get_line(est->sc_gl, selfmri, NULL, -1);
+
+ if (est->sc_cmd_buf == NULL) {
+ switch (gl_return_status(est->sc_gl)) {
+ case GLR_SIGNAL:
+ gl_abandon_line(est->sc_gl);
+ continue;
+
+ case GLR_EOF:
+ break;
+
+ case GLR_ERROR:
+ uu_die(gettext("Error reading terminal: %s.\n"),
+ gl_error_message(est->sc_gl, NULL, 0));
+ /* NOTREACHED */
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: gl_get_line() "
+ "returned unexpected value %d.\n", __FILE__,
+ __LINE__, gl_return_status(est->sc_gl));
+#endif
+ abort();
+ }
+
+ break;
+ }
+
+ parens = 0;
+ est->sc_cmd_bufsz = strlen(est->sc_cmd_buf);
+ est->sc_cmd_bufoff = 0;
+ est->sc_cmd_flags = SC_CMD_IACTIVE;
+
+ (void) yyparse();
+ }
+
+ free(selfmri);
+ est->sc_gl = del_GetLine(est->sc_gl); /* returns NULL */
+
+#endif /* NATIVE_BUILD */
+ return (0);
+}
+
+int
+engine_source(const char *name, boolean_t dont_exit)
+{
+ engine_state_t *old = est;
+ struct stat st;
+ int ret;
+
+ est = uu_zalloc(sizeof (engine_state_t));
+
+ /* first, copy the stuff set up in engine_init */
+ est->sc_repo_pid = old->sc_repo_pid;
+ if (old->sc_repo_filename != NULL)
+ est->sc_repo_filename = safe_strdup(old->sc_repo_filename);
+ if (old->sc_repo_doordir != NULL)
+ est->sc_repo_doordir = safe_strdup(old->sc_repo_doordir);
+ if (old->sc_repo_doorname != NULL)
+ est->sc_repo_doorname = safe_strdup(old->sc_repo_doorname);
+ if (old->sc_repo_server != NULL)
+ est->sc_repo_server = safe_strdup(old->sc_repo_server);
+
+ /* set up the new guy */
+ est->sc_cmd_lineno = 1;
+
+ if (dont_exit)
+ est->sc_cmd_flags |= SC_CMD_DONT_EXIT;
+
+ if (strcmp(name, "-") == 0) {
+ est->sc_cmd_file = stdin;
+ est->sc_cmd_filename = "<stdin>";
+ } else {
+ errno = 0;
+ est->sc_cmd_filename = name;
+ est->sc_cmd_file = fopen(name, "r");
+ if (est->sc_cmd_file == NULL) {
+ if (errno == 0)
+ semerr(gettext("No free stdio streams.\n"));
+ else
+ semerr(gettext("Could not open %s"), name);
+
+ ret = -1;
+ goto fail;
+ }
+
+ do
+ ret = fstat(fileno(est->sc_cmd_file), &st);
+ while (ret != 0 && errno == EINTR);
+ if (ret != 0) {
+ (void) fclose(est->sc_cmd_file);
+ est->sc_cmd_file = NULL; /* for semerr() */
+
+ semerr(gettext("Could not stat %s"), name);
+
+ ret = -1;
+ goto fail;
+ }
+
+ if (!S_ISREG(st.st_mode)) {
+ (void) fclose(est->sc_cmd_file);
+ est->sc_cmd_file = NULL; /* for semerr() */
+
+ semerr(gettext("%s is not a regular file.\n"), name);
+
+ ret = -1;
+ goto fail;
+ }
+ }
+
+ (void) yyparse();
+
+ if (est->sc_cmd_file != stdin)
+ (void) fclose(est->sc_cmd_file);
+
+ ret = 0;
+
+fail:
+ if (est->sc_repo_pid != old->sc_repo_pid)
+ lscf_cleanup(); /* clean up any new repository */
+
+ if (est->sc_repo_filename != NULL)
+ free((void *)est->sc_repo_filename);
+ if (est->sc_repo_doordir != NULL)
+ free((void *)est->sc_repo_doordir);
+ if (est->sc_repo_doorname != NULL)
+ free((void *)est->sc_repo_doorname);
+ if (est->sc_repo_server != NULL)
+ free((void *)est->sc_repo_server);
+ free(est);
+
+ est = old;
+
+ return (ret);
+}
+
+/*
+ * Initialize svccfg state. We recognize four environment variables:
+ *
+ * SVCCFG_REPOSITORY Create a private instance of svc.configd(1M) to answer
+ * requests for the specified repository file.
+ * SVCCFG_DOOR_PATH Directory for door creation.
+ *
+ * SVCCFG_DOOR Rendezvous via an alternative repository door.
+ *
+ * SVCCFG_CONFIGD_PATH Resolvable path to alternative svc.configd(1M) binary.
+ */
+void
+engine_init()
+{
+ const char *cp;
+
+ est = uu_zalloc(sizeof (engine_state_t));
+
+ est->sc_cmd_lineno = 1;
+ est->sc_repo_pid = -1;
+
+ cp = getenv("SVCCFG_REPOSITORY");
+ est->sc_repo_filename = cp ? safe_strdup(cp) : NULL;
+
+ cp = getenv("SVCCFG_DOOR_PATH");
+ est->sc_repo_doordir = cp ? cp : "/var/run";
+
+ cp = getenv("SVCCFG_DOOR");
+ if (cp != NULL) {
+ if (est->sc_repo_filename != NULL) {
+ uu_warn(gettext("SVCCFG_DOOR unused when "
+ "SVCCFG_REPOSITORY specified\n"));
+ } else {
+ est->sc_repo_doorname = safe_strdup(cp);
+ }
+ }
+
+ cp = getenv("SVCCFG_CONFIGD_PATH");
+ est->sc_repo_server = cp ? cp : "/lib/svc/bin/svc.configd";
+}
+
+int
+engine_import(uu_list_t *args)
+{
+ int ret, argc, i, o;
+ bundle_t *b;
+ char *file, *pname;
+ uchar_t hash[16];
+ char **argv;
+ string_list_t *slp;
+ boolean_t verify = B_FALSE;
+ uint_t flags = SCI_GENERALLAST;
+
+ argc = uu_list_numnodes(args);
+ if (argc < 1)
+ return (-2);
+
+ argv = calloc(argc + 1, sizeof (char *));
+ if (argv == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ for (slp = uu_list_first(args), i = 0;
+ slp != NULL;
+ slp = uu_list_next(args, slp), ++i)
+ argv[i] = slp->str;
+
+ argv[i] = NULL;
+
+ opterr = 0;
+ optind = 0; /* Remember, no argv[0]. */
+ for (;;) {
+ o = getopt(argc, argv, "nV");
+ if (o == -1)
+ break;
+
+ switch (o) {
+ case 'n':
+ flags |= SCI_NOREFRESH;
+ break;
+
+ case 'V':
+ verify = B_TRUE;
+ break;
+
+ case '?':
+ free(argv);
+ return (-2);
+
+ default:
+ bad_error("getopt", o);
+ }
+ }
+
+ argc -= optind;
+ if (argc != 1) {
+ free(argv);
+ return (-2);
+ }
+
+ file = argv[optind];
+ free(argv);
+
+ lscf_prep_hndl();
+
+ if ((ret = mhash_test_file(g_hndl, file, 0, &pname, hash)) != 0)
+ return (ret);
+
+ /* Load */
+ b = internal_bundle_new();
+
+ if (lxml_get_bundle_file(b, file, 0) != 0) {
+ internal_bundle_free(b);
+ return (-1);
+ }
+
+ /* Import */
+ if (lscf_bundle_import(b, file, flags) != 0) {
+ internal_bundle_free(b);
+ return (-1);
+ }
+
+ internal_bundle_free(b);
+
+ if (g_verbose)
+ warn(gettext("Successful import.\n"));
+
+ if (pname) {
+ char *errstr;
+
+ if (mhash_store_entry(g_hndl, pname, hash, &errstr)) {
+ if (errstr)
+ semerr(errstr);
+ else
+ semerr(gettext("Unknown error from "
+ "mhash_store_entry()\n"));
+ }
+
+ free(pname);
+ }
+
+ /* Verify */
+ if (verify)
+ warn(gettext("import -V not implemented.\n"));
+
+ return (0);
+}
+
+int
+engine_apply(const char *file)
+{
+ int ret;
+ bundle_t *b;
+ char *pname;
+ uchar_t hash[16];
+
+ lscf_prep_hndl();
+
+ if ((ret = mhash_test_file(g_hndl, file, 1, &pname, hash)) != 0)
+ return (ret);
+
+ b = internal_bundle_new();
+
+ if (lxml_get_bundle_file(b, file, 1) != 0) {
+ internal_bundle_free(b);
+ return (-1);
+ }
+
+ if (lscf_bundle_apply(b) != 0) {
+ internal_bundle_free(b);
+ return (-1);
+ }
+
+ internal_bundle_free(b);
+
+ if (pname) {
+ char *errstr;
+ if (mhash_store_entry(g_hndl, pname, hash, &errstr))
+ semerr(errstr);
+
+ free(pname);
+ }
+
+ return (0);
+}
+
+int
+engine_set(uu_list_t *args)
+{
+ uu_list_walk_t *walk;
+ string_list_t *slp;
+
+ if (uu_list_first(args) == NULL) {
+ /* Display current options. */
+ if (!g_verbose)
+ (void) fputs("no", stdout);
+ (void) puts("verbose");
+
+ return (0);
+ }
+
+ walk = uu_list_walk_start(args, UU_DEFAULT);
+ if (walk == NULL)
+ uu_die(gettext("Couldn't read arguments"));
+
+ /* Use getopt? */
+ for (slp = uu_list_walk_next(walk);
+ slp != NULL;
+ slp = uu_list_walk_next(walk)) {
+ if (slp->str[0] == '-') {
+ char *op;
+
+ for (op = &slp->str[1]; *op != '\0'; ++op) {
+ switch (*op) {
+ case 'v':
+ g_verbose = 1;
+ break;
+
+ case 'V':
+ g_verbose = 0;
+ break;
+
+ default:
+ warn(gettext("Unknown option -%c.\n"),
+ *op);
+ }
+ }
+ } else {
+ warn(gettext("No non-flag arguments defined.\n"));
+ }
+ }
+
+ return (0);
+}
+
+void
+help(int com)
+{
+ int i;
+
+ if (com == 0) {
+ warn(gettext("General commands: help set repository end\n"
+ "Manifest commands: inventory validate import export "
+ "archive\n"
+ "Profile commands: apply extract\n"
+ "Entity commands: list select unselect add delete\n"
+ "Snapshot commands: listsnap selectsnap revert\n"
+ "Property group commands: listpg addpg delpg\n"
+ "Property commands: listprop setprop delprop editprop\n"
+ "Property value commands: addpropvalue delpropvalue "
+ "setenv unsetenv\n"));
+ return;
+ }
+
+ for (i = 0; help_messages[i].message != NULL; ++i) {
+ if (help_messages[i].token == com) {
+ warn(gettext("Usage: %s\n"),
+ gettext(help_messages[i].message));
+ return;
+ }
+ }
+
+ warn(gettext("Unknown command.\n"));
+}
diff --git a/usr/src/cmd/svc/svccfg/svccfg_help.c b/usr/src/cmd/svc/svccfg/svccfg_help.c
new file mode 100644
index 0000000000..2c431e65dc
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg_help.c
@@ -0,0 +1,121 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "svccfg.h"
+#include "svccfg_grammar.h"
+
+struct help_message help_messages[] = {
+ { SCC_VALIDATE, "validate file\n\n"
+ "Process a manifest file without changing the repository."
+ },
+ { SCC_IMPORT, "import file\n\nImport a manifest into the repository." },
+ { SCC_EXPORT, "export {service | pattern} [> file]\n\n"
+"Print a manifest for service to file, or standard output if not specified."
+ },
+ { SCC_ARCHIVE, "archive [> file]\n\n"
+"Print an archive to file, or standard output if not specified."
+ },
+ { SCC_APPLY, "apply file\n\nApply a profile." },
+ { SCC_EXTRACT, "extract [> file]\n\n"
+"Print a profile to file, or standard output if not specified." },
+ { SCC_REPOSITORY, "repository file\n\nSet the repository to modify." },
+ { SCC_INVENTORY, "inventory file\n\n"
+ "Print the services and instances contained in a manifest."
+ },
+ { SCC_SET, "set [-vV]\n\n"
+"Without arguments, display current options. Otherwise set the given options."
+ },
+ { SCC_END, "end\n\nStop processing and exit." },
+ { SCC_HELP, "help [command]\n\nDisplay help." },
+ { SCC_LIST, "list [glob_pattern]\n\n"
+ "List children of the currently selected entity."
+ },
+ { SCC_ADD, "add name\n\n"
+ "Add a new child entity to the currently selected entity."
+ },
+ { SCC_DELETE, "delete [-f] {name | fmri | pattern}\n\n"
+"Delete the named child entity or the one indicated by fmri. With -f, delete\n"
+"running services.\n"
+ },
+ { SCC_SELECT, "select {name | fmri | pattern}\n\n"
+ "Select the named child entity or the one indicated by fmri."
+ },
+ { SCC_UNSELECT, "unselect\n\n"
+ "Select the parent of the currently selected entity."
+ },
+ { SCC_LISTPG, "listpg [glob_pattern]\n\n"
+ "List property groups of the currently selected entity."
+ },
+ { SCC_ADDPG, "addpg name type [P]\n\n"
+ "Add a new property group to the currently selected entity."
+ },
+ { SCC_DELPG, "delpg name\n\n"
+"Delete the named property group from the currently selected entity."
+ },
+ { SCC_LISTPROP, "listprop [glob_pattern]\n\n"
+"List property groups and properties of the currently selected entity."
+ },
+ { SCC_SETPROP,
+ "\tsetprop pg/name = [type:] value\n"
+ "\tsetprop pg/name = [type:] ([value...])\n\n"
+"Set the pg/name property of the currently selected entity. Values may be\n"
+"enclosed in double-quotes. Value lists may span multiple lines."
+ },
+ { SCC_DELPROP, "delprop pg/name\n\n"
+ "Delete the pg/name property of the currently selected entity."
+ },
+ { SCC_EDITPROP, "editprop\n\n"
+"Invoke $EDITOR to edit the properties of the currently selected entity."
+ },
+ { SCC_ADDPROPVALUE, "addpropvalue pg/name [type:] value\n\n"
+"Add the given value to the named property."
+ },
+ { SCC_DELPROPVALUE, "delpropvalue pg/name glob_pattern\n\n"
+"Delete all values matching the glob pattern fron the given property."
+ },
+ { SCC_SETENV, "setenv [-s | -i | -m method] NAME value\n\n"
+"Set an environment variable for the given service, instance, or method "
+"context."
+ },
+ { SCC_UNSETENV, "unsetenv [-s | -i | -m method] NAME value\n\n"
+"Unset an environment variable for the given service, instance, or method "
+"context."
+ },
+ { SCC_LISTSNAP, "listsnap\n\n"
+ "List snapshots of the currently selected instance."
+ },
+ { SCC_SELECTSNAP, "selectsnap [snapshot]\n\n"
+"Select a snapshot of the currently selected instance, or the Editing\n"
+"snapshot by default."
+ },
+ { SCC_REVERT, "revert [snapshot]\n\n"
+"Change the properties of the currently selected instance and its ancestors\n"
+"to those in a snapshot, or the currently selected snapshot by default."
+ },
+ { 0, NULL }
+};
diff --git a/usr/src/cmd/svc/svccfg/svccfg_internal.c b/usr/src/cmd/svc/svccfg/svccfg_internal.c
new file mode 100644
index 0000000000..4950c40669
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg_internal.c
@@ -0,0 +1,1231 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <errno.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "svccfg.h"
+
+/*
+ * Internal representation manipulation routines for svccfg(1)
+ */
+
+static uu_list_pool_t *entity_pool;
+static uu_list_pool_t *pgroup_pool;
+static uu_list_pool_t *property_pool;
+static uu_list_pool_t *value_pool;
+
+/* ARGSUSED */
+static int
+entity_cmp(const void *a, const void *b, void *p)
+{
+ entity_t *A = (entity_t *)a;
+ entity_t *B = (entity_t *)b;
+
+ return (strcmp(A->sc_name, B->sc_name));
+}
+
+/*ARGSUSED*/
+static int
+pgroup_cmp(const void *a, const void *b, void *p)
+{
+ pgroup_t *A = (pgroup_t *)a;
+ pgroup_t *B = (pgroup_t *)b;
+
+ return (strcmp(A->sc_pgroup_name, B->sc_pgroup_name));
+}
+
+/* ARGSUSED */
+static int
+property_cmp(const void *a, const void *b, void *p)
+{
+ property_t *A = (property_t *)a;
+ property_t *B = (property_t *)b;
+
+ return (strcmp(A->sc_property_name, B->sc_property_name));
+}
+
+/* ARGSUSED */
+int
+value_cmp(const void *a, const void *b, void *p)
+{
+ const value_t *A = a;
+ const value_t *B = b;
+
+ if (A->sc_type != B->sc_type)
+ return (B->sc_type - A->sc_type);
+
+ switch (A->sc_type) {
+ case SCF_TYPE_BOOLEAN:
+ case SCF_TYPE_COUNT:
+ return (B->sc_u.sc_count - A->sc_u.sc_count);
+
+ case SCF_TYPE_INTEGER:
+ return (B->sc_u.sc_integer - A->sc_u.sc_integer);
+
+ default:
+ return (strcmp(A->sc_u.sc_string, B->sc_u.sc_string));
+ }
+}
+
+void
+internal_init()
+{
+ if ((entity_pool = uu_list_pool_create("entities", sizeof (entity_t),
+ offsetof(entity_t, sc_node), entity_cmp, 0)) == NULL)
+ uu_die(gettext("entity list pool creation failed: %s\n"),
+ uu_strerror(uu_error()));
+
+ if ((pgroup_pool = uu_list_pool_create("property_groups",
+ sizeof (pgroup_t), offsetof(pgroup_t, sc_node), pgroup_cmp, 0)) ==
+ NULL)
+ uu_die(
+ gettext("property group list pool creation failed: %s\n"),
+ uu_strerror(uu_error()));
+
+ if ((property_pool = uu_list_pool_create("properties",
+ sizeof (property_t), offsetof(property_t, sc_node), property_cmp,
+ 0)) == NULL)
+ uu_die(gettext("property list pool creation failed: %s\n"),
+ uu_strerror(uu_error()));
+
+ if ((value_pool = uu_list_pool_create("property_values",
+ sizeof (value_t), offsetof(value_t, sc_node), value_cmp, 0)) ==
+ NULL)
+ uu_die(
+ gettext("property value list pool creation failed: %s\n"),
+ uu_strerror(uu_error()));
+}
+
+/*ARGSUSED*/
+static int
+internal_value_dump(void *v, void *pvt)
+{
+ value_t *val = v;
+
+ switch (val->sc_type) {
+ case SCF_TYPE_BOOLEAN:
+ (void) printf(" value = %s\n",
+ val->sc_u.sc_count ? "true" : "false");
+ break;
+ case SCF_TYPE_COUNT:
+ (void) printf(" value = %llu\n", val->sc_u.sc_count);
+ break;
+ case SCF_TYPE_INTEGER:
+ (void) printf(" value = %lld\n", val->sc_u.sc_integer);
+ break;
+ case SCF_TYPE_ASTRING:
+ case SCF_TYPE_FMRI:
+ case SCF_TYPE_HOST:
+ case SCF_TYPE_HOSTNAME:
+ case SCF_TYPE_NET_ADDR_V4:
+ case SCF_TYPE_NET_ADDR_V6:
+ case SCF_TYPE_OPAQUE:
+ case SCF_TYPE_TIME:
+ case SCF_TYPE_URI:
+ case SCF_TYPE_USTRING:
+ (void) printf(" value = %s\n",
+ val->sc_u.sc_string ? val->sc_u.sc_string : "(nil)");
+ break;
+ default:
+ uu_die(gettext("unknown value type (%d)\n"), val->sc_type);
+ break;
+ }
+
+ return (UU_WALK_NEXT);
+}
+
+/*ARGSUSED*/
+static int
+internal_property_dump(void *v, void *pvt)
+{
+ property_t *p = v;
+
+ (void) printf("property\n name = %s\n", p->sc_property_name);
+ (void) printf(" type = %d\n", p->sc_value_type);
+
+ (void) uu_list_walk(p->sc_property_values, internal_value_dump,
+ NULL, UU_DEFAULT);
+
+ return (UU_WALK_NEXT);
+}
+
+/*ARGSUSED*/
+static int
+internal_pgroup_dump(void *v, void *pvt)
+{
+ pgroup_t *pg = v;
+
+ (void) printf("pgroup name = %s\n", pg->sc_pgroup_name);
+ (void) printf(" type = %s\n", pg->sc_pgroup_type);
+
+ (void) uu_list_walk(pg->sc_pgroup_props, internal_property_dump,
+ NULL, UU_DEFAULT);
+
+ return (UU_WALK_NEXT);
+}
+
+/*ARGSUSED*/
+static int
+internal_instance_dump(void *v, void *pvt)
+{
+ entity_t *i = v;
+
+ (void) printf("instance name = %s\n", i->sc_name);
+
+ (void) uu_list_walk(i->sc_pgroups, internal_pgroup_dump, NULL,
+ UU_DEFAULT);
+
+ return (UU_WALK_NEXT);
+}
+
+/*ARGSUSED*/
+static int
+internal_service_dump(void *v, void *pvt)
+{
+ entity_t *s = v;
+
+ (void) printf("service name = %s\n", s->sc_name);
+ (void) printf(" type = %x\n", s->sc_u.sc_service.sc_service_type);
+ (void) printf(" version = %u\n", s->sc_u.sc_service.sc_service_version);
+
+ (void) uu_list_walk(s->sc_pgroups, internal_pgroup_dump, NULL,
+ UU_DEFAULT);
+
+ (void) uu_list_walk(s->sc_u.sc_service.sc_service_instances,
+ internal_instance_dump, NULL, UU_DEFAULT);
+
+ return (UU_WALK_NEXT);
+}
+
+void
+internal_dump(bundle_t *b)
+{
+ (void) printf("bundle name = %s\n", b->sc_bundle_name);
+ (void) printf(" type = %x\n", b->sc_bundle_type);
+
+ (void) uu_list_walk(b->sc_bundle_services, internal_service_dump,
+ NULL, UU_DEFAULT);
+}
+
+bundle_t *
+internal_bundle_new()
+{
+ bundle_t *b;
+
+ if ((b = uu_zalloc(sizeof (bundle_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ b->sc_bundle_type = SVCCFG_UNKNOWN_BUNDLE;
+ b->sc_bundle_services = uu_list_create(entity_pool, b, 0);
+
+ return (b);
+}
+
+void
+internal_bundle_free(bundle_t *b)
+{
+ void *cookie = NULL;
+ entity_t *service;
+
+ while ((service = uu_list_teardown(b->sc_bundle_services, &cookie)) !=
+ NULL)
+ internal_service_free(service);
+
+ free(b);
+}
+
+entity_t *
+internal_service_new(const char *name)
+{
+ entity_t *s;
+
+ if ((s = uu_zalloc(sizeof (entity_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ uu_list_node_init(s, &s->sc_node, entity_pool);
+
+ s->sc_name = name;
+ s->sc_fmri = uu_msprintf("svc:/%s", name);
+ if (s->sc_fmri == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ s->sc_etype = SVCCFG_SERVICE_OBJECT;
+ s->sc_pgroups = uu_list_create(pgroup_pool, s, 0);
+ s->sc_dependents = uu_list_create(pgroup_pool, s, 0);
+
+ s->sc_u.sc_service.sc_service_type = SVCCFG_UNKNOWN_SERVICE;
+ s->sc_u.sc_service.sc_service_instances = uu_list_create(entity_pool, s,
+ 0);
+
+ return (s);
+}
+
+void
+internal_service_free(entity_t *s)
+{
+ entity_t *inst;
+ pgroup_t *pg;
+ void *cookie;
+
+ cookie = NULL;
+ while ((pg = uu_list_teardown(s->sc_pgroups, &cookie)) != NULL)
+ internal_pgroup_free(pg);
+
+ cookie = NULL;
+ while ((pg = uu_list_teardown(s->sc_dependents, &cookie)) != NULL)
+ internal_pgroup_free(pg);
+
+ cookie = NULL;
+ while ((inst = uu_list_teardown(s->sc_u.sc_service.sc_service_instances,
+ &cookie)) != NULL)
+ internal_instance_free(inst);
+
+ free(s);
+}
+
+entity_t *
+internal_instance_new(const char *name)
+{
+ entity_t *i;
+
+ if ((i = uu_zalloc(sizeof (entity_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ uu_list_node_init(i, &i->sc_node, entity_pool);
+
+ i->sc_name = name;
+ /* Can't set i->sc_fmri until we're attached to a service. */
+ i->sc_etype = SVCCFG_INSTANCE_OBJECT;
+ i->sc_pgroups = uu_list_create(pgroup_pool, i, 0);
+ i->sc_dependents = uu_list_create(pgroup_pool, i, 0);
+
+ return (i);
+}
+
+void
+internal_instance_free(entity_t *i)
+{
+ pgroup_t *pg;
+ void *cookie = NULL;
+
+ while ((pg = uu_list_teardown(i->sc_pgroups, &cookie)) != NULL)
+ internal_pgroup_free(pg);
+
+ cookie = NULL;
+ while ((pg = uu_list_teardown(i->sc_dependents, &cookie)) != NULL)
+ internal_pgroup_free(pg);
+
+ free(i);
+}
+
+entity_t *
+internal_template_new()
+{
+ entity_t *t;
+
+ if ((t = uu_zalloc(sizeof (entity_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ uu_list_node_init(t, &t->sc_node, entity_pool);
+
+ t->sc_etype = SVCCFG_TEMPLATE_OBJECT;
+ t->sc_pgroups = uu_list_create(pgroup_pool, t, 0);
+
+ return (t);
+}
+
+pgroup_t *
+internal_pgroup_new()
+{
+ pgroup_t *p;
+
+ if ((p = uu_zalloc(sizeof (pgroup_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ uu_list_node_init(p, &p->sc_node, pgroup_pool);
+
+ p->sc_pgroup_props = uu_list_create(property_pool, p, UU_LIST_SORTED);
+ p->sc_pgroup_name = "<unset>";
+ p->sc_pgroup_type = "<unset>";
+
+ return (p);
+}
+
+void
+internal_pgroup_free(pgroup_t *pg)
+{
+ property_t *prop;
+ void *cookie = NULL;
+
+ while ((prop = uu_list_teardown(pg->sc_pgroup_props, &cookie)) != NULL)
+ internal_property_free(prop);
+
+ free(pg);
+}
+
+static pgroup_t *
+find_pgroup(uu_list_t *list, const char *name, const char *type)
+{
+ pgroup_t *pg;
+
+ for (pg = uu_list_first(list);
+ pg != NULL;
+ pg = uu_list_next(list, pg)) {
+ if (strcmp(pg->sc_pgroup_name, name) != 0)
+ continue;
+
+ if (type == NULL)
+ return (pg);
+
+ if (strcmp(pg->sc_pgroup_type, type) == 0)
+ return (pg);
+ }
+
+ return (NULL);
+}
+
+pgroup_t *
+internal_dependent_find(entity_t *e, const char *name)
+{
+ return (find_pgroup(e->sc_dependents, name, NULL));
+}
+
+pgroup_t *
+internal_pgroup_find(entity_t *e, const char *name, const char *type)
+{
+ return (find_pgroup(e->sc_pgroups, name, type));
+}
+
+pgroup_t *
+internal_pgroup_find_or_create(entity_t *e, const char *name, const char *type)
+{
+ pgroup_t *pg;
+
+ pg = internal_pgroup_find(e, name, type);
+ if (pg != NULL)
+ return (pg);
+
+ pg = internal_pgroup_new();
+ (void) internal_attach_pgroup(e, pg);
+ pg->sc_pgroup_name = strdup(name);
+ pg->sc_pgroup_type = strdup(type);
+ pg->sc_pgroup_flags = 0;
+
+ if (pg->sc_pgroup_name == NULL || pg->sc_pgroup_type == NULL)
+ uu_die(gettext("Could not duplicate string"));
+
+ return (pg);
+}
+
+property_t *
+internal_property_new()
+{
+ property_t *p;
+
+ if ((p = uu_zalloc(sizeof (property_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ uu_list_node_init(p, &p->sc_node, property_pool);
+
+ p->sc_property_values = uu_list_create(value_pool, p, UU_LIST_SORTED);
+ p->sc_property_name = "<unset>";
+
+ return (p);
+}
+
+void
+internal_property_free(property_t *p)
+{
+ value_t *val;
+ void *cookie = NULL;
+
+ while ((val = uu_list_teardown(p->sc_property_values, &cookie)) !=
+ NULL) {
+ if (val->sc_free != NULL)
+ val->sc_free(val);
+ free(val);
+ }
+
+ free(p);
+}
+
+property_t *
+internal_property_find(pgroup_t *pg, const char *name)
+{
+ property_t *p;
+
+ for (p = uu_list_first(pg->sc_pgroup_props);
+ p != NULL;
+ p = uu_list_next(pg->sc_pgroup_props, p))
+ if (strcmp(p->sc_property_name, name) == 0)
+ return (p);
+
+ return (NULL);
+}
+
+value_t *
+internal_value_new()
+{
+ value_t *v;
+
+ if ((v = uu_zalloc(sizeof (value_t))) == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ uu_list_node_init(v, &v->sc_node, value_pool);
+
+ return (v);
+}
+
+static void
+internal_value_free_str(value_t *v)
+{
+ free(v->sc_u.sc_string);
+}
+
+property_t *
+internal_property_create(const char *name, scf_type_t vtype, uint_t nvals, ...)
+{
+ va_list args;
+ property_t *p;
+ value_t *v;
+
+ p = internal_property_new();
+
+ p->sc_property_name = (char *)name;
+ p->sc_value_type = vtype;
+
+ va_start(args, nvals);
+ for (; nvals > 0; nvals--) {
+
+ v = internal_value_new();
+ v->sc_type = vtype;
+
+ switch (vtype) {
+ case SCF_TYPE_BOOLEAN:
+ case SCF_TYPE_COUNT:
+ v->sc_u.sc_count = va_arg(args, uint64_t);
+ break;
+ case SCF_TYPE_INTEGER:
+ v->sc_u.sc_integer = va_arg(args, int64_t);
+ break;
+ case SCF_TYPE_ASTRING:
+ case SCF_TYPE_FMRI:
+ case SCF_TYPE_HOST:
+ case SCF_TYPE_HOSTNAME:
+ case SCF_TYPE_NET_ADDR_V4:
+ case SCF_TYPE_NET_ADDR_V6:
+ case SCF_TYPE_OPAQUE:
+ case SCF_TYPE_TIME:
+ case SCF_TYPE_URI:
+ case SCF_TYPE_USTRING:
+ v->sc_u.sc_string = (char *)va_arg(args, uchar_t *);
+ break;
+ default:
+ va_end(args);
+ uu_die(gettext("unknown property type (%d)\n"), vtype);
+ break;
+ }
+
+ internal_attach_value(p, v);
+ }
+ va_end(args);
+
+ return (p);
+}
+
+/*
+ * Some of these attach functions use uu_list_append() to maintain the
+ * same order across import/export, whereas others are always sorted
+ * anyway, or the order is irrelevant.
+ */
+
+int
+internal_attach_service(bundle_t *bndl, entity_t *svc)
+{
+ if (uu_list_find(bndl->sc_bundle_services, svc, NULL, NULL) != NULL) {
+ semerr(gettext("Multiple definitions for service %s in "
+ "bundle %s.\n"), svc->sc_name, bndl->sc_bundle_name);
+ return (-1);
+ }
+
+ (void) uu_list_append(bndl->sc_bundle_services, svc);
+
+ return (0);
+}
+
+int
+internal_attach_entity(entity_t *svc, entity_t *ent)
+{
+ if (ent->sc_etype == SVCCFG_TEMPLATE_OBJECT) {
+ svc->sc_u.sc_service.sc_service_template = ent;
+ return (0);
+ }
+
+ if (svc->sc_etype != SVCCFG_SERVICE_OBJECT)
+ uu_die(gettext("bad entity attach: %s is not a service\n"),
+ svc->sc_name);
+
+ if (uu_list_find(svc->sc_u.sc_service.sc_service_instances, ent, NULL,
+ NULL) != NULL) {
+ semerr(gettext("Multiple definitions of entity %s in service "
+ "%s.\n"), ent->sc_name, svc->sc_name);
+ return (-1);
+ }
+
+ (void) uu_list_prepend(svc->sc_u.sc_service.sc_service_instances, ent);
+ ent->sc_parent = svc;
+ ent->sc_fmri = uu_msprintf("%s:%s", svc->sc_fmri, ent->sc_name);
+ if (ent->sc_fmri == NULL)
+ uu_die(gettext("couldn't allocate memory"));
+
+ return (0);
+}
+
+int
+internal_attach_pgroup(entity_t *ent, pgroup_t *pgrp)
+{
+ if (uu_list_find(ent->sc_pgroups, pgrp, NULL, NULL) != NULL) {
+ semerr(gettext("Multiple definitions of property group %s in "
+ "entity %s.\n"), pgrp->sc_pgroup_name, ent->sc_name);
+ return (-1);
+ }
+
+ (void) uu_list_append(ent->sc_pgroups, pgrp);
+
+ pgrp->sc_parent = ent;
+
+ return (0);
+}
+
+int
+internal_attach_dependent(entity_t *ent, pgroup_t *pg)
+{
+ if (uu_list_find(ent->sc_dependents, pg, NULL, NULL) != NULL) {
+ semerr(gettext("Multiple definitions of dependent %s in "
+ "entity %s.\n"), pg->sc_pgroup_name, ent->sc_name);
+ return (-1);
+ }
+
+ (void) uu_list_append(ent->sc_dependents, pg);
+
+ pg->sc_parent = ent;
+
+ return (0);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * -1 - prop already exists in pgrp
+ */
+int
+internal_attach_property(pgroup_t *pgrp, property_t *prop)
+{
+ uu_list_index_t idx;
+
+ if (uu_list_find(pgrp->sc_pgroup_props, prop, NULL, &idx) != NULL) {
+ semerr(gettext("Multiple definitions for property %s in "
+ "property group %s.\n"), prop->sc_property_name,
+ pgrp->sc_pgroup_name);
+ return (-1);
+ }
+
+ uu_list_insert(pgrp->sc_pgroup_props, prop, idx);
+
+ return (0);
+}
+
+void
+internal_attach_value(property_t *prop, value_t *val)
+{
+ uu_list_index_t idx;
+
+ (void) uu_list_find(prop->sc_property_values, val, NULL, &idx);
+ uu_list_insert(prop->sc_property_values, val, idx);
+}
+
+/*
+ * These functions create an internal representation of a property group
+ * (pgroup_t) from the repository (scf_propertygroup_t). They are used by the
+ * import functions in svccfg_libscf.c .
+ *
+ * load_init() must be called first to initialize these globals, and
+ * load_fini() should be called afterwards to destroy them.
+ */
+
+static char *loadbuf = NULL;
+static size_t loadbuf_sz;
+static scf_property_t *load_prop = NULL;
+static scf_value_t *load_val = NULL;
+static scf_iter_t *load_propiter = NULL, *load_valiter = NULL;
+
+/*
+ * Initialize the global state for the load_*() routines.
+ * Returns
+ * 0 - success
+ * ENOMEM - out of memory
+ */
+int
+load_init(void)
+{
+ loadbuf_sz = ((max_scf_value_len > max_scf_pg_type_len) ?
+ max_scf_value_len : max_scf_pg_type_len) + 1;
+
+ loadbuf = malloc(loadbuf_sz);
+ if (loadbuf == NULL)
+ return (ENOMEM);
+
+ if ((load_prop = scf_property_create(g_hndl)) == NULL ||
+ (load_val = scf_value_create(g_hndl)) == NULL ||
+ (load_propiter = scf_iter_create(g_hndl)) == NULL ||
+ (load_valiter = scf_iter_create(g_hndl)) == NULL) {
+ load_fini();
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+void
+load_fini(void)
+{
+ scf_iter_destroy(load_propiter);
+ load_propiter = NULL;
+ scf_iter_destroy(load_valiter);
+ load_valiter = NULL;
+ scf_value_destroy(load_val);
+ load_val = NULL;
+ scf_property_destroy(load_prop);
+ load_prop = NULL;
+ free(loadbuf);
+ loadbuf = NULL;
+}
+
+/*
+ * Create a property_t which represents an scf_property_t. Returns
+ * 0 - success
+ * ECANCELED - prop's pg was deleted
+ * ECONNABORTED - repository disconnected
+ * ENOMEM - out of memory
+ */
+static int
+load_property(scf_property_t *prop, property_t **ipp)
+{
+ property_t *iprop;
+ int r;
+ ssize_t ssz;
+
+ /* get name */
+ if (scf_property_get_name(prop, loadbuf, loadbuf_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_get_name", scf_error());
+ }
+ }
+
+ iprop = internal_property_new();
+ iprop->sc_property_name = strdup(loadbuf);
+ if (iprop->sc_property_name == NULL) {
+ internal_property_free(iprop);
+ return (ENOMEM);
+ }
+
+ /* get type */
+ if (scf_property_type(prop, &iprop->sc_value_type) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_type", scf_error());
+ }
+ }
+
+ /* get values */
+ if (scf_iter_property_values(load_valiter, prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_property_values", scf_error());
+ }
+ }
+
+ for (;;) {
+ value_t *ival;
+
+ r = scf_iter_next_value(load_valiter, load_val);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_iter_next_value", scf_error());
+ }
+ }
+
+ ival = internal_value_new();
+ ival->sc_type = scf_value_type(load_val);
+ assert(ival->sc_type != SCF_TYPE_INVALID);
+
+ switch (ival->sc_type) {
+ case SCF_TYPE_BOOLEAN: {
+ uint8_t b;
+
+ r = scf_value_get_boolean(load_val, &b);
+ if (r != 0)
+ bad_error("scf_value_get_boolean", scf_error());
+ ival->sc_u.sc_count = b;
+ break;
+ }
+
+ case SCF_TYPE_COUNT:
+ r = scf_value_get_count(load_val, &ival->sc_u.sc_count);
+ if (r != 0)
+ bad_error("scf_value_get_count", scf_error());
+ break;
+
+ case SCF_TYPE_INTEGER:
+ r = scf_value_get_integer(load_val,
+ &ival->sc_u.sc_integer);
+ if (r != 0)
+ bad_error("scf_value_get_integer", scf_error());
+ break;
+
+ default:
+ ssz = scf_value_get_as_string(load_val, loadbuf,
+ loadbuf_sz);
+ if (ssz < 0)
+ bad_error("scf_value_get_as_string",
+ scf_error());
+
+ ival->sc_u.sc_string = strdup(loadbuf);
+ if (ival->sc_u.sc_string == NULL) {
+ r = ENOMEM;
+ goto out;
+ }
+
+ ival->sc_free = internal_value_free_str;
+ }
+
+ internal_attach_value(iprop, ival);
+ }
+
+ *ipp = iprop;
+ return (0);
+
+out:
+ free(iprop->sc_property_name);
+ internal_property_free(iprop);
+ return (r);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECANCELED - pg was deleted
+ * ECONNABORTED - repository disconnected
+ * ENOMEM - out of memory
+ */
+int
+load_pg_attrs(const scf_propertygroup_t *pg, pgroup_t **ipgp)
+{
+ pgroup_t *ipg;
+
+ ipg = internal_pgroup_new();
+
+ if (scf_pg_get_flags(pg, &ipg->sc_pgroup_flags) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ internal_pgroup_free(ipg);
+ return (ECANCELED);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ internal_pgroup_free(ipg);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_get_name", scf_error());
+ }
+ }
+
+ if (scf_pg_get_name(pg, loadbuf, loadbuf_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ internal_pgroup_free(ipg);
+ return (ECANCELED);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ internal_pgroup_free(ipg);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_get_name", scf_error());
+ }
+ }
+
+ ipg->sc_pgroup_name = strdup(loadbuf);
+ if (ipg->sc_pgroup_name == NULL) {
+ internal_pgroup_free(ipg);
+ return (ENOMEM);
+ }
+
+ if (scf_pg_get_type(pg, loadbuf, loadbuf_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ free((char *)ipg->sc_pgroup_name);
+ internal_pgroup_free(ipg);
+ return (ECANCELED);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ free((char *)ipg->sc_pgroup_name);
+ internal_pgroup_free(ipg);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_get_name", scf_error());
+ }
+ }
+
+ ipg->sc_pgroup_type = strdup(loadbuf);
+ if (ipg->sc_pgroup_type == NULL) {
+ free((char *)ipg->sc_pgroup_name);
+ internal_pgroup_free(ipg);
+ return (ENOMEM);
+ }
+
+ *ipgp = ipg;
+ return (0);
+}
+
+/*
+ * Load a property group into a pgroup_t. Returns
+ * 0 - success
+ * ECANCELED - pg was deleted
+ * ECONNABORTED - repository disconnected
+ * EBADF - pg is corrupt (error printed if fmri is given)
+ * ENOMEM - out of memory
+ */
+int
+load_pg(const scf_propertygroup_t *pg, pgroup_t **ipgp, const char *fmri,
+ const char *snapname)
+{
+ pgroup_t *ipg;
+ int r;
+
+ if (scf_iter_pg_properties(load_propiter, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_iter_pg_properties", scf_error());
+ }
+ }
+
+ r = load_pg_attrs(pg, &ipg);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ECONNABORTED:
+ case ENOMEM:
+ return (r);
+
+ default:
+ bad_error("load_pg_attrs", r);
+ }
+
+ for (;;) {
+ property_t *iprop;
+
+ r = scf_iter_next_property(load_propiter, load_prop);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = ECONNABORTED;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_iter_next_property",
+ scf_error());
+ }
+ }
+
+ r = load_property(load_prop, &iprop);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ECONNABORTED:
+ case ENOMEM:
+ goto out;
+
+ default:
+ bad_error("load_property", r);
+ }
+
+ r = internal_attach_property(ipg, iprop);
+ if (r != 0) {
+ if (fmri != NULL) {
+ if (snapname == NULL)
+ warn(gettext("Property group \"%s\" of "
+ "%s has multiple definitions of "
+ "property \"%s\".\n"),
+ ipg->sc_pgroup_name, fmri,
+ iprop->sc_property_name);
+ else
+ warn(gettext("Property group \"%s\" of "
+ "the \"%s\" snapshot of %s has "
+ "multiple definitions of property "
+ "\"%s\".\n"),
+ ipg->sc_pgroup_name, snapname, fmri,
+ iprop->sc_property_name);
+ }
+ r = EBADF;
+ goto out;
+ }
+ }
+
+ *ipgp = ipg;
+ return (0);
+
+out:
+ internal_pgroup_free(ipg);
+ return (r);
+}
+
+/*
+ * These functions compare internal property groups and properties (pgroup_t
+ * & property_t). They return 1 if the given structures are equal and
+ * 0 otherwise. Some will report the differences between the two structures.
+ * They are used by the import functions in svccfg_libscf.c .
+ */
+
+int
+prop_equal(property_t *p1, property_t *p2, const char *fmri, const char *pgname,
+ int new)
+{
+ value_t *v1, *v2;
+
+ const char * const values_diff = gettext("Conflict upgrading %s "
+ "(property \"%s/%s\" has different values).\n");
+ const char * const values_diff_new = gettext("Conflict upgrading %s "
+ "(new property \"%s/%s\" has different values).\n");
+
+ assert((fmri == NULL) == (pgname == NULL));
+
+ if (fmri != NULL) {
+ /*
+ * If we find any differences, we'll report conflicts. But
+ * conflict messages won't make any sense if the names don't
+ * match. If the caller supplied fmri, assert that the names
+ * match.
+ */
+ assert(strcmp(p1->sc_property_name, p2->sc_property_name) == 0);
+ } else {
+ if (strcmp(p1->sc_property_name, p2->sc_property_name) != 0)
+ return (0);
+ }
+
+ if (p1->sc_value_type != p2->sc_value_type) {
+ if (fmri != NULL) {
+ if (new)
+ warn(gettext("Conflict upgrading %s "
+ "(new property \"%s/%s\" has different "
+ "type).\n"), fmri, pgname,
+ p1->sc_property_name);
+ else
+ warn(gettext("Conflict upgrading %s "
+ "(property \"%s/%s\" has different "
+ "type).\n"), fmri, pgname,
+ p1->sc_property_name);
+ }
+ return (0);
+ }
+
+ if (uu_list_numnodes(p1->sc_property_values) !=
+ uu_list_numnodes(p2->sc_property_values)) {
+ if (fmri != NULL)
+ warn(new ? values_diff_new : values_diff, fmri,
+ pgname, p1->sc_property_name);
+ return (0);
+ }
+
+ v1 = uu_list_first(p1->sc_property_values);
+ v2 = uu_list_first(p2->sc_property_values);
+
+ while (v1 != NULL) {
+ assert(v2 != NULL);
+
+ if (value_cmp(v1, v2, NULL) != 0) {
+ if (fmri != NULL)
+ warn(new ? values_diff_new : values_diff,
+ fmri, pgname, p1->sc_property_name);
+ return (0);
+ }
+
+ v1 = uu_list_next(p1->sc_property_values, v1);
+ v2 = uu_list_next(p2->sc_property_values, v2);
+ }
+
+ return (1);
+}
+
+int
+pg_attrs_equal(pgroup_t *pg1, pgroup_t *pg2, const char *fmri, int new)
+{
+ if (strcmp(pg1->sc_pgroup_name, pg2->sc_pgroup_name) != 0) {
+ assert(fmri == NULL);
+ return (0);
+ }
+
+ if (pg1->sc_pgroup_flags != pg2->sc_pgroup_flags) {
+ if (fmri) {
+ if (new)
+ warn(gettext("Conflict upgrading %s "
+ "(new property group \"%s\" has different "
+ "flags).\n"), fmri, pg1->sc_pgroup_name);
+ else
+ warn(gettext("Conflict upgrading %s "
+ "(property group \"%s\" has different "
+ "flags).\n"), fmri, pg1->sc_pgroup_name);
+ }
+ return (0);
+ }
+
+ if (strcmp(pg1->sc_pgroup_type, pg2->sc_pgroup_type) != 0) {
+ if (fmri) {
+ if (new)
+ warn(gettext("Conflict upgrading %s "
+ "(new property group \"%s\" has different "
+ "type).\n"), fmri, pg1->sc_pgroup_name);
+ else
+ warn(gettext("Conflict upgrading %s "
+ "(property group \"%s\" has different "
+ "type).\n"), fmri, pg1->sc_pgroup_name);
+ }
+ return (0);
+ }
+
+ return (1);
+}
+
+int
+pg_equal(pgroup_t *pg1, pgroup_t *pg2)
+{
+ property_t *p1, *p2;
+
+ if (!pg_attrs_equal(pg1, pg2, NULL, 0))
+ return (0);
+
+ if (uu_list_numnodes(pg1->sc_pgroup_props) !=
+ uu_list_numnodes(pg2->sc_pgroup_props))
+ return (0);
+
+ p1 = uu_list_first(pg1->sc_pgroup_props);
+ p2 = uu_list_first(pg2->sc_pgroup_props);
+
+ while (p1 != NULL) {
+ assert(p2 != NULL);
+
+ if (!prop_equal(p1, p2, NULL, NULL, 0))
+ return (0);
+
+ p1 = uu_list_next(pg1->sc_pgroup_props, p1);
+ p2 = uu_list_next(pg2->sc_pgroup_props, p2);
+ }
+
+ return (1);
+}
diff --git a/usr/src/cmd/svc/svccfg/svccfg_libscf.c b/usr/src/cmd/svc/svccfg/svccfg_libscf.c
new file mode 100644
index 0000000000..6880faf14b
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg_libscf.c
@@ -0,0 +1,12018 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <alloca.h>
+#include <assert.h>
+#include <ctype.h>
+#include <door.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fnmatch.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libtecla.h>
+#include <libuutil.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <wait.h>
+
+#include <libxml/tree.h>
+
+#include "svccfg.h"
+
+/* The colon namespaces in each entity (each followed by a newline). */
+#define COLON_NAMESPACES ":properties\n"
+
+#define TEMP_FILE_PATTERN "/tmp/svccfg-XXXXXX"
+
+/* These are characters which the lexer requires to be in double-quotes. */
+#define CHARS_TO_QUOTE " \t\n>=\"()"
+
+#define HASH_SIZE 16
+#define HASH_SVC "smf/manifest"
+#define HASH_PG_TYPE "framework"
+#define HASH_PG_FLAGS 0
+#define HASH_PROP "md5sum"
+
+
+/*
+ * These are the classes of elements which may appear as children of service
+ * or instance elements in XML manifests.
+ */
+struct entity_elts {
+ xmlNodePtr create_default_instance;
+ xmlNodePtr single_instance;
+ xmlNodePtr restarter;
+ xmlNodePtr dependencies;
+ xmlNodePtr dependents;
+ xmlNodePtr method_context;
+ xmlNodePtr exec_methods;
+ xmlNodePtr property_groups;
+ xmlNodePtr instances;
+ xmlNodePtr stability;
+ xmlNodePtr template;
+};
+
+/*
+ * Likewise for property_group elements.
+ */
+struct pg_elts {
+ xmlNodePtr stability;
+ xmlNodePtr propvals;
+ xmlNodePtr properties;
+};
+
+/*
+ * Likewise for template elements.
+ */
+struct template_elts {
+ xmlNodePtr common_name;
+ xmlNodePtr description;
+ xmlNodePtr documentation;
+};
+
+/*
+ * This structure is for snaplevel lists. They are convenient because libscf
+ * only allows traversing snaplevels in one direction.
+ */
+struct snaplevel {
+ uu_list_node_t list_node;
+ scf_snaplevel_t *sl;
+};
+
+
+const char * const scf_pg_general = SCF_PG_GENERAL;
+const char * const scf_group_framework = SCF_GROUP_FRAMEWORK;
+const char * const scf_property_enabled = SCF_PROPERTY_ENABLED;
+const char * const scf_property_external = "external";
+
+const char * const snap_initial = "initial";
+const char * const snap_lastimport = "last-import";
+const char * const snap_previous = "previous";
+const char * const snap_running = "running";
+
+
+scf_handle_t *g_hndl = NULL; /* only valid after lscf_prep_hndl() */
+
+ssize_t max_scf_fmri_len;
+ssize_t max_scf_name_len;
+ssize_t max_scf_pg_type_len;
+ssize_t max_scf_value_len;
+static size_t max_scf_len;
+
+static scf_scope_t *cur_scope;
+static scf_service_t *cur_svc = NULL;
+static scf_instance_t *cur_inst = NULL;
+static scf_snapshot_t *cur_snap = NULL;
+static scf_snaplevel_t *cur_level = NULL;
+
+static uu_list_pool_t *snaplevel_pool;
+/* cur_levels is the snaplevels of cur_snap, from least specific to most. */
+static uu_list_t *cur_levels;
+static struct snaplevel *cur_elt; /* cur_elt->sl == cur_level */
+
+static FILE *tempfile = NULL;
+static char tempfilename[sizeof (TEMP_FILE_PATTERN)] = "";
+
+static const char *emsg_entity_not_selected;
+static const char *emsg_permission_denied;
+static const char *emsg_create_xml;
+static const char *emsg_cant_modify_snapshots;
+static const char *emsg_read_only;
+static const char *emsg_deleted;
+static const char *emsg_invalid_pg_name;
+static const char *emsg_invalid_prop_name;
+static const char *emsg_no_such_pg;
+static const char *emsg_fmri_invalid_pg_name;
+static const char *emsg_pg_added;
+static const char *emsg_pg_changed;
+static const char *emsg_pg_deleted;
+static const char *emsg_pg_mod_perm;
+static const char *emsg_pg_add_perm;
+static const char *emsg_pg_del_perm;
+static const char *emsg_snap_perm;
+
+static int li_only;
+static int no_refresh = 0;
+
+/* import globals, to minimize allocations */
+static scf_scope_t *imp_scope = NULL;
+static scf_service_t *imp_svc = NULL, *imp_tsvc = NULL;
+static scf_instance_t *imp_inst = NULL, *imp_tinst = NULL;
+static scf_snapshot_t *imp_snap = NULL, *imp_lisnap = NULL, *imp_tlisnap = NULL;
+static scf_snapshot_t *imp_rsnap = NULL;
+static scf_snaplevel_t *imp_snpl = NULL, *imp_rsnpl = NULL;
+static scf_propertygroup_t *imp_pg = NULL, *imp_pg2 = NULL;
+static scf_property_t *imp_prop = NULL;
+static scf_iter_t *imp_iter = NULL;
+static scf_iter_t *imp_rpg_iter = NULL;
+static scf_iter_t *imp_up_iter = NULL;
+static scf_transaction_t *imp_tx = NULL; /* always reset this */
+static scf_transaction_t *imp_tx2 = NULL;
+static char *imp_str = NULL;
+static size_t imp_str_sz;
+static char *imp_tsname = NULL;
+static char *imp_fe1 = NULL; /* for fmri_equal() */
+static char *imp_fe2 = NULL;
+
+/* upgrade_dependents() globals */
+static scf_instance_t *ud_inst = NULL;
+static scf_snaplevel_t *ud_snpl = NULL;
+static scf_propertygroup_t *ud_pg = NULL;
+static scf_propertygroup_t *ud_cur_depts_pg = NULL;
+static int ud_cur_depts_pg_set = NULL;
+static scf_property_t *ud_prop = NULL;
+static scf_property_t *ud_dpt_prop = NULL;
+static scf_value_t *ud_val = NULL;
+static scf_iter_t *ud_iter = NULL, *ud_iter2 = NULL;
+static char *ud_ctarg = NULL;
+static char *ud_oldtarg = NULL;
+static char *ud_name = NULL;
+
+/* export globals */
+static scf_instance_t *exp_inst;
+static scf_propertygroup_t *exp_pg;
+static scf_property_t *exp_prop;
+static scf_value_t *exp_val;
+static scf_iter_t *exp_inst_iter, *exp_pg_iter, *exp_prop_iter, *exp_val_iter;
+static char *exp_str;
+static size_t exp_str_sz;
+
+static char *start_method_names[] = {
+ "start",
+ "inetd_start",
+ NULL
+};
+
+static void
+safe_printf(const char *fmt, ...)
+{
+ va_list va;
+
+ va_start(va, fmt);
+ if (vprintf(fmt, va) < 0)
+ uu_die(gettext("Error writing to stdout"));
+ va_end(va);
+}
+
+/*
+ * For unexpected libscf errors.
+ */
+#ifdef NDEBUG
+
+static void
+scfdie(void)
+{
+ scf_error_t err = scf_error();
+
+ if (err == SCF_ERROR_CONNECTION_BROKEN)
+ uu_die(gettext("Repository connection broken. Exiting.\n"));
+
+ uu_die(gettext("Unexpected fatal libscf error: %s. Exiting.\n"),
+ scf_strerror(err));
+}
+
+#else
+
+#define scfdie() scfdie_lineno(__LINE__)
+
+static void
+scfdie_lineno(int lineno)
+{
+ scf_error_t err = scf_error();
+
+ if (err == SCF_ERROR_CONNECTION_BROKEN)
+ uu_die(gettext("Repository connection broken. Exiting.\n"));
+
+ uu_die(gettext("Unexpected libscf error on line %d of " __FILE__
+ ": %s.\n"), lineno, scf_strerror(err));
+}
+
+#endif
+
+static void
+scfwarn(void)
+{
+ warn(gettext("Unexpected libscf error: %s.\n"),
+ scf_strerror(scf_error()));
+}
+
+/*
+ * Clear a field of a structure.
+ */
+static int
+clear_int(void *a, void *b)
+{
+ /* LINTED */
+ *(int *)((char *)a + (size_t)b) = 0;
+
+ return (UU_WALK_NEXT);
+}
+
+static int
+scferror2errno(scf_error_t err)
+{
+ switch (err) {
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (EACCES);
+
+ case SCF_ERROR_BACKEND_READONLY:
+ return (EROFS);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ return (EINVAL);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_EXISTS:
+ return (EEXIST);
+
+ case SCF_ERROR_NO_MEMORY:
+ return (ENOMEM);
+
+ case SCF_ERROR_NO_RESOURCES:
+ return (ENOSPC);
+
+ case SCF_ERROR_NOT_FOUND:
+ return (ENOENT);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ return (EPERM);
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: Unknown libscf error %d.\n",
+ __FILE__, __LINE__, err);
+#else
+ (void) fprintf(stderr, "Unknown libscf error %d.\n", err);
+#endif
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+static int
+entity_get_pg(void *ent, int issvc, const char *name,
+ scf_propertygroup_t *pg)
+{
+ if (issvc)
+ return (scf_service_get_pg(ent, name, pg));
+ else
+ return (scf_instance_get_pg(ent, name, pg));
+}
+
+static void
+entity_destroy(void *ent, int issvc)
+{
+ if (issvc)
+ scf_service_destroy(ent);
+ else
+ scf_instance_destroy(ent);
+}
+
+/*
+ * Find a snaplevel in a snapshot. If get_svc is true, find the service
+ * snaplevel. Otherwise find the instance snaplevel.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - instance containing snap was deleted
+ * ENOENT - snap has no snaplevels
+ * - requested snaplevel not found
+ */
+static int
+get_snaplevel(scf_snapshot_t *snap, int get_svc, scf_snaplevel_t *snpl)
+{
+ if (scf_snapshot_get_base_snaplevel(snap, snpl) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_snapshot_get_base_snaplevel",
+ scf_error());
+ }
+ }
+
+ for (;;) {
+ ssize_t ssz;
+
+ ssz = scf_snaplevel_get_instance_name(snpl, NULL, 0);
+ if (ssz >= 0) {
+ if (!get_svc)
+ return (0);
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ if (get_svc)
+ return (0);
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_snaplevel_get_instance_name",
+ scf_error());
+ }
+ }
+
+ if (scf_snaplevel_get_next_snaplevel(snpl, snpl) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_snaplevel_get_next_snaplevel",
+ scf_error());
+ }
+ }
+ }
+}
+
+/*
+ * If issvc is 0, take ent to be a pointer to an scf_instance_t. If it has
+ * a running snapshot, and that snapshot has an instance snaplevel, set pg to
+ * the property group named name in it. If it doesn't have a running
+ * snapshot, set pg to the instance's current property group named name.
+ *
+ * If issvc is nonzero, take ent to be a pointer to an scf_service_t, and walk
+ * its instances. If one has a running snapshot with a service snaplevel, set
+ * pg to the property group named name in it. If no such snaplevel could be
+ * found, set pg to the service's current property group named name.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - ent was deleted
+ * ENOENT - no such property group
+ * EINVAL - name is an invalid property group name
+ * EBADF - snapshot is missing a snaplevel
+ */
+static int
+entity_get_running_pg(void *ent, int issvc, const char *name,
+ scf_propertygroup_t *pg, scf_iter_t *iter, scf_instance_t *inst,
+ scf_snapshot_t *snap, scf_snaplevel_t *snpl)
+{
+ int r;
+
+ if (issvc) {
+ /* Search for an instance with a running snapshot. */
+ if (scf_iter_service_instances(iter, ent) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_iter_service_instances",
+ scf_error());
+ }
+ }
+
+ for (;;) {
+ r = scf_iter_next_instance(iter, inst);
+ if (r == 0) {
+ if (scf_service_get_pg(ent, name, pg) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_get_pg",
+ scf_error());
+ }
+ }
+ if (r != 1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_iter_next_instance",
+ scf_error());
+ }
+ }
+
+ if (scf_instance_get_snapshot(inst, snap_running,
+ snap) == 0)
+ break;
+
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_instance_get_snapshot",
+ scf_error());
+ }
+ }
+ } else {
+ if (scf_instance_get_snapshot(ent, snap_running, snap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_snapshot",
+ scf_error());
+ }
+
+ if (scf_instance_get_pg(ent, name, pg) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_pg", scf_error());
+ }
+ }
+ }
+
+ r = get_snaplevel(snap, issvc, snpl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ECANCELED:
+ return (r);
+
+ case ENOENT:
+ return (EBADF);
+
+ default:
+ bad_error("get_snaplevel", r);
+ }
+
+ if (scf_snaplevel_get_pg(snpl, name, pg) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NOT_FOUND:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_snaplevel_get_pg", scf_error());
+ /* NOTREACHED */
+ }
+}
+
+
+/*
+ * To be registered with atexit().
+ */
+static void
+remove_tempfile(void)
+{
+ int ret;
+
+ if (tempfile != NULL) {
+ if (fclose(tempfile) == EOF)
+ warn(gettext("Could not close temporary file"));
+ tempfile = NULL;
+ }
+
+ if (tempfilename[0] != '\0') {
+ do
+ ret = remove(tempfilename);
+ while (ret == -1 && errno == EINTR);
+ if (ret == -1)
+ warn(gettext("Could not remove temporary file"));
+ tempfilename[0] = '\0';
+ }
+}
+
+/*
+ * Launch private svc.configd(1M) for manipulating alternate repositories.
+ */
+static void
+start_private_repository(engine_state_t *est)
+{
+ int fd, stat;
+ struct door_info info;
+ pid_t pid;
+
+ /*
+ * 1. Create a temporary file for the door.
+ */
+ if (est->sc_repo_doorname != NULL)
+ free((void *)est->sc_repo_doorname);
+
+ est->sc_repo_doorname = tempnam(est->sc_repo_doordir, "scfdr");
+ if (est->sc_repo_doorname == NULL)
+ uu_die(gettext("Could not acquire temporary filename"));
+
+ fd = open(est->sc_repo_doorname, O_CREAT | O_EXCL | O_RDWR, 0600);
+ if (fd < 0)
+ uu_die(gettext("Could not create temporary file for "
+ "repository server"));
+
+ (void) close(fd);
+
+ /*
+ * 2. Launch a configd with that door, using the specified
+ * repository.
+ */
+ if ((est->sc_repo_pid = fork()) == 0) {
+ (void) execlp(est->sc_repo_server, est->sc_repo_server, "-p",
+ "-d", est->sc_repo_doorname, "-r", est->sc_repo_filename,
+ NULL);
+ uu_die(gettext("Could not execute %s"), est->sc_repo_server);
+ } else if (est->sc_repo_pid == -1)
+ uu_die(gettext("Attempt to fork failed"));
+
+ do
+ pid = waitpid(est->sc_repo_pid, &stat, 0);
+ while (pid == -1 && errno == EINTR);
+
+ if (pid == -1)
+ uu_die(gettext("Could not waitpid() for repository server"));
+
+ if (!WIFEXITED(stat)) {
+ uu_die(gettext("Repository server failed (status %d).\n"),
+ stat);
+ } else if (WEXITSTATUS(stat) != 0) {
+ uu_die(gettext("Repository server failed (exit %d).\n"),
+ WEXITSTATUS(stat));
+ }
+
+ /*
+ * See if it was successful by checking if the door is a door.
+ */
+
+ fd = open(est->sc_repo_doorname, O_RDWR);
+ if (fd < 0)
+ uu_die(gettext("Could not open door \"%s\""),
+ est->sc_repo_doorname);
+
+ if (door_info(fd, &info) < 0)
+ uu_die(gettext("Unexpected door_info() error"));
+
+ if (close(fd) == -1)
+ warn(gettext("Could not close repository door"),
+ strerror(errno));
+
+ est->sc_repo_pid = info.di_target;
+}
+
+void
+lscf_cleanup(void)
+{
+ /*
+ * In the case where we've launched a private svc.configd(1M)
+ * instance, we must terminate our child and remove the temporary
+ * rendezvous point.
+ */
+ if (est->sc_repo_pid > 0) {
+ (void) kill(est->sc_repo_pid, SIGTERM);
+ (void) waitpid(est->sc_repo_pid, NULL, 0);
+ (void) unlink(est->sc_repo_doorname);
+
+ est->sc_repo_pid = 0;
+ }
+}
+
+void
+unselect_cursnap(void)
+{
+ void *cookie;
+
+ cur_level = NULL;
+
+ cookie = NULL;
+ while ((cur_elt = uu_list_teardown(cur_levels, &cookie)) != NULL) {
+ scf_snaplevel_destroy(cur_elt->sl);
+ free(cur_elt);
+ }
+
+ scf_snapshot_destroy(cur_snap);
+ cur_snap = NULL;
+}
+
+void
+lscf_prep_hndl(void)
+{
+ if (g_hndl != NULL)
+ return;
+
+ g_hndl = scf_handle_create(SCF_VERSION);
+ if (g_hndl == NULL)
+ scfdie();
+
+ if (est->sc_repo_filename != NULL)
+ start_private_repository(est);
+
+ if (est->sc_repo_doorname != NULL) {
+ scf_value_t *repo_value;
+ int ret;
+
+ repo_value = scf_value_create(g_hndl);
+ if (repo_value == NULL)
+ scfdie();
+
+ ret = scf_value_set_astring(repo_value, est->sc_repo_doorname);
+ assert(ret == SCF_SUCCESS);
+
+ if (scf_handle_decorate(g_hndl, "door_path", repo_value) !=
+ SCF_SUCCESS)
+ scfdie();
+
+ scf_value_destroy(repo_value);
+ }
+
+ if (scf_handle_bind(g_hndl) != 0)
+ uu_die(gettext("Could not connect to repository server: %s.\n"),
+ scf_strerror(scf_error()));
+
+ cur_scope = scf_scope_create(g_hndl);
+ if (cur_scope == NULL)
+ scfdie();
+
+ if (scf_handle_get_local_scope(g_hndl, cur_scope) != 0)
+ scfdie();
+}
+
+static void
+repository_teardown(void)
+{
+ if (g_hndl != NULL) {
+ if (cur_snap != NULL)
+ unselect_cursnap();
+ scf_instance_destroy(cur_inst);
+ scf_service_destroy(cur_svc);
+ scf_scope_destroy(cur_scope);
+ scf_handle_destroy(g_hndl);
+ g_hndl = NULL;
+
+ lscf_cleanup();
+ }
+}
+
+void
+lscf_set_repository(const char *repfile)
+{
+ repository_teardown();
+
+ if (est->sc_repo_filename != NULL)
+ free((void *)est->sc_repo_filename);
+
+ est->sc_repo_filename = safe_strdup(repfile);
+
+ lscf_prep_hndl();
+}
+
+void
+lscf_init()
+{
+ if ((max_scf_fmri_len = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH)) < 0 ||
+ (max_scf_name_len = scf_limit(SCF_LIMIT_MAX_NAME_LENGTH)) < 0 ||
+ (max_scf_pg_type_len = scf_limit(SCF_LIMIT_MAX_PG_TYPE_LENGTH)) <
+ 0 ||
+ (max_scf_value_len = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH)) < 0)
+ scfdie();
+
+ max_scf_len = max_scf_fmri_len;
+ if (max_scf_name_len > max_scf_len)
+ max_scf_len = max_scf_name_len;
+ if (max_scf_pg_type_len > max_scf_len)
+ max_scf_len = max_scf_pg_type_len;
+ if (max_scf_value_len > max_scf_len)
+ max_scf_len = max_scf_value_len;
+
+ if (atexit(remove_tempfile) != 0)
+ uu_die(gettext("Could not register atexit() function"));
+
+ emsg_entity_not_selected = gettext("An entity is not selected.\n");
+ emsg_permission_denied = gettext("Permission denied.\n");
+ emsg_create_xml = gettext("Could not create XML node.\n");
+ emsg_cant_modify_snapshots = gettext("Cannot modify snapshots.\n");
+ emsg_read_only = gettext("Backend read-only.\n");
+ emsg_deleted = gettext("Current selection has been deleted.\n");
+ emsg_invalid_pg_name =
+ gettext("Invalid property group name \"%s\".\n");
+ emsg_invalid_prop_name = gettext("Invalid property name \"%s\".\n");
+ emsg_no_such_pg = gettext("No such property group \"%s\".\n");
+ emsg_fmri_invalid_pg_name = gettext("Service %s has property group "
+ "with invalid name \"%s\".\n");
+ emsg_pg_added = gettext("%s changed unexpectedly "
+ "(property group \"%s\" added).\n");
+ emsg_pg_changed = gettext("%s changed unexpectedly "
+ "(property group \"%s\" changed).\n");
+ emsg_pg_deleted = gettext("%s changed unexpectedly "
+ "(property group \"%s\" deleted).\n");
+ emsg_pg_mod_perm = gettext("Could not modify property group \"%s\" "
+ "in %s (permission denied).\n");
+ emsg_pg_add_perm = gettext("Could not create property group \"%s\" "
+ "in %s (permission denied).\n");
+ emsg_pg_del_perm = gettext("Could not delete property group \"%s\" "
+ "in %s (permission denied).\n");
+ emsg_snap_perm = gettext("Could not take \"%s\" snapshot of %s "
+ "(permission denied).\n");
+
+ string_pool = uu_list_pool_create("strings", sizeof (string_list_t),
+ offsetof(string_list_t, node), NULL, 0);
+ snaplevel_pool = uu_list_pool_create("snaplevels",
+ sizeof (struct snaplevel), offsetof(struct snaplevel, list_node),
+ NULL, 0);
+}
+
+
+static const char *
+prop_to_typestr(const scf_property_t *prop)
+{
+ scf_type_t ty;
+
+ if (scf_property_type(prop, &ty) != SCF_SUCCESS)
+ scfdie();
+
+ return (scf_type_to_string(ty));
+}
+
+static scf_type_t
+string_to_type(const char *type)
+{
+ size_t len = strlen(type);
+ char *buf;
+
+ if (len == 0 || type[len - 1] != ':')
+ return (SCF_TYPE_INVALID);
+
+ buf = (char *)alloca(len + 1);
+ (void) strlcpy(buf, type, len + 1);
+ buf[len - 1] = 0;
+
+ return (scf_string_to_type(buf));
+}
+
+static scf_value_t *
+string_to_value(const char *str, scf_type_t ty, boolean_t require_quotes)
+{
+ scf_value_t *v;
+ char *dup, *nstr;
+ size_t len;
+
+ v = scf_value_create(g_hndl);
+ if (v == NULL)
+ scfdie();
+
+ len = strlen(str);
+ if (require_quotes &&
+ (len < 2 || str[0] != '\"' || str[len - 1] != '\"')) {
+ semerr(gettext("Multiple string values or string values "
+ "with spaces must be quoted with '\"'.\n"));
+ scf_value_destroy(v);
+ return (NULL);
+ }
+
+ nstr = dup = safe_strdup(str);
+ if (dup[0] == '\"') {
+ /*
+ * Strip out the first and the last quote.
+ */
+ dup[len - 1] = '\0';
+ nstr = dup + 1;
+ }
+
+ if (scf_value_set_from_string(v, ty, (const char *)nstr) != 0) {
+ assert(scf_error() == SCF_ERROR_INVALID_ARGUMENT);
+ semerr(gettext("Invalid \"%s\" value \"%s\".\n"),
+ scf_type_to_string(ty), nstr);
+ scf_value_destroy(v);
+ v = NULL;
+ }
+ free(dup);
+ return (v);
+}
+
+/*
+ * Print str to strm, quoting double-quotes and backslashes with backslashes.
+ */
+static int
+quote_and_print(const char *str, FILE *strm)
+{
+ const char *cp;
+
+ for (cp = str; *cp != '\0'; ++cp) {
+ if (*cp == '"' || *cp == '\\')
+ (void) putc('\\', strm);
+
+ (void) putc(*cp, strm);
+ }
+
+ return (ferror(strm));
+}
+
+/*
+ * These wrappers around lowlevel functions provide consistent error checking
+ * and warnings.
+ */
+static int
+pg_get_prop(scf_propertygroup_t *pg, const char *propname, scf_property_t *prop)
+{
+ if (scf_pg_get_property(pg, propname, prop) == SCF_SUCCESS)
+ return (0);
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (g_verbose) {
+ ssize_t len;
+ char *fmri;
+
+ len = scf_pg_to_fmri(pg, NULL, 0);
+ if (len < 0)
+ scfdie();
+
+ fmri = safe_malloc(len + 1);
+
+ if (scf_pg_to_fmri(pg, fmri, len + 1) < 0)
+ scfdie();
+
+ warn(gettext("Expected property %s of property group %s is "
+ "missing.\n"), propname, fmri);
+
+ free(fmri);
+ }
+
+ return (-1);
+}
+
+static int
+prop_check_type(scf_property_t *prop, scf_type_t ty)
+{
+ scf_type_t pty;
+
+ if (scf_property_type(prop, &pty) != SCF_SUCCESS)
+ scfdie();
+
+ if (ty == pty)
+ return (0);
+
+ if (g_verbose) {
+ ssize_t len;
+ char *fmri;
+ const char *tystr;
+
+ len = scf_property_to_fmri(prop, NULL, 0);
+ if (len < 0)
+ scfdie();
+
+ fmri = safe_malloc(len + 1);
+
+ if (scf_property_to_fmri(prop, fmri, len + 1) < 0)
+ scfdie();
+
+ tystr = scf_type_to_string(ty);
+ if (tystr == NULL)
+ tystr = "?";
+
+ warn(gettext("Property %s is not of expected type %s.\n"),
+ fmri, tystr);
+
+ free(fmri);
+ }
+
+ return (-1);
+}
+
+static int
+prop_get_val(scf_property_t *prop, scf_value_t *val)
+{
+ scf_error_t err;
+
+ if (scf_property_get_value(prop, val) == SCF_SUCCESS)
+ return (0);
+
+ err = scf_error();
+
+ if (err != SCF_ERROR_NOT_FOUND && err != SCF_ERROR_CONSTRAINT_VIOLATED)
+ scfdie();
+
+ if (g_verbose) {
+ ssize_t len;
+ char *fmri, *emsg;
+
+ len = scf_property_to_fmri(prop, NULL, 0);
+ if (len < 0)
+ scfdie();
+
+ fmri = safe_malloc(len + 1);
+
+ if (scf_property_to_fmri(prop, fmri, len + 1) < 0)
+ scfdie();
+
+ if (err == SCF_ERROR_NOT_FOUND)
+ emsg = gettext("Property %s has no values; expected "
+ "one.\n");
+ else
+ emsg = gettext("Property %s has multiple values; "
+ "expected one.\n");
+
+ warn(emsg, fmri);
+
+ free(fmri);
+ }
+
+ return (-1);
+}
+
+
+static boolean_t
+snaplevel_is_instance(const scf_snaplevel_t *level)
+{
+ if (scf_snaplevel_get_instance_name(level, NULL, 0) < 0) {
+ if (scf_error() != SCF_ERROR_CONSTRAINT_VIOLATED)
+ scfdie();
+ return (0);
+ } else {
+ return (1);
+ }
+}
+
+/*
+ * Decode FMRI into a service or instance, and put the result in *ep. If
+ * memory cannot be allocated, return SCF_ERROR_NO_MEMORY. If the FMRI is
+ * invalid, return SCF_ERROR_INVALID_ARGUMENT. If the FMRI does not specify
+ * an entity, return SCF_ERROR_CONSTRAINT_VIOLATED. If the entity cannot be
+ * found, return SCF_ERROR_NOT_FOUND. Otherwise return SCF_ERROR_NONE, point
+ * *ep to a valid scf_service_t or scf_instance_t, and set *isservice to
+ * whether *ep is a service.
+ */
+static scf_error_t
+fmri_to_entity(scf_handle_t *h, const char *fmri, void **ep, int *isservice)
+{
+ char *fmri_copy;
+ const char *sstr, *istr, *pgstr;
+ scf_service_t *svc;
+ scf_instance_t *inst;
+
+ fmri_copy = strdup(fmri);
+ if (fmri_copy == NULL)
+ return (SCF_ERROR_NO_MEMORY);
+
+ if (scf_parse_svc_fmri(fmri_copy, NULL, &sstr, &istr, &pgstr, NULL) !=
+ SCF_SUCCESS) {
+ free(fmri_copy);
+ return (SCF_ERROR_INVALID_ARGUMENT);
+ }
+
+ free(fmri_copy);
+
+ if (sstr == NULL || pgstr != NULL)
+ return (SCF_ERROR_CONSTRAINT_VIOLATED);
+
+ if (istr == NULL) {
+ svc = scf_service_create(h);
+ if (svc == NULL)
+ return (SCF_ERROR_NO_MEMORY);
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ return (SCF_ERROR_NOT_FOUND);
+ }
+
+ *ep = svc;
+ *isservice = 1;
+ } else {
+ inst = scf_instance_create(h);
+ if (inst == NULL)
+ return (SCF_ERROR_NO_MEMORY);
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ return (SCF_ERROR_NOT_FOUND);
+ }
+
+ *ep = inst;
+ *isservice = 0;
+ }
+
+ return (SCF_ERROR_NONE);
+}
+
+/*
+ * Create the entity named by fmri. Place a pointer to its libscf handle in
+ * *ep, and set or clear *isservicep if it is a service or an instance.
+ * Returns
+ * SCF_ERROR_NONE - success
+ * SCF_ERROR_NO_MEMORY - scf_*_create() failed
+ * SCF_ERROR_INVALID_ARGUMENT - fmri is invalid
+ * SCF_ERROR_CONSTRAINT_VIOLATED - fmri is not a service or instance
+ * SCF_ERROR_NOT_FOUND - no such scope
+ * SCF_ERROR_PERMISSION_DENIED
+ * SCF_ERROR_BACKEND_READONLY
+ * SCF_ERROR_BACKEND_ACCESS
+ */
+static scf_error_t
+create_entity(scf_handle_t *h, const char *fmri, void **ep, int *isservicep)
+{
+ char *fmri_copy;
+ const char *scstr, *sstr, *istr, *pgstr;
+ scf_scope_t *scope = NULL;
+ scf_service_t *svc = NULL;
+ scf_instance_t *inst = NULL;
+ scf_error_t scfe;
+
+ fmri_copy = safe_strdup(fmri);
+
+ if (scf_parse_svc_fmri(fmri_copy, &scstr, &sstr, &istr, &pgstr, NULL) !=
+ 0) {
+ free(fmri_copy);
+ return (SCF_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (scstr == NULL || sstr == NULL || pgstr != NULL) {
+ free(fmri_copy);
+ return (SCF_ERROR_CONSTRAINT_VIOLATED);
+ }
+
+ *ep = NULL;
+
+ if ((scope = scf_scope_create(h)) == NULL ||
+ (svc = scf_service_create(h)) == NULL ||
+ (inst = scf_instance_create(h)) == NULL) {
+ scfe = SCF_ERROR_NO_MEMORY;
+ goto out;
+ }
+
+get_scope:
+ if (scf_handle_get_scope(h, scstr, scope) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scfdie();
+ /* NOTREACHED */
+
+ case SCF_ERROR_NOT_FOUND:
+ scfe = SCF_ERROR_NOT_FOUND;
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_handle_get_scope", scf_error());
+ }
+ }
+
+get_svc:
+ if (scf_scope_get_service(scope, sstr, svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scfdie();
+ /* NOTREACHED */
+
+ case SCF_ERROR_DELETED:
+ goto get_scope;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_scope_get_service", scf_error());
+ }
+
+ if (scf_scope_add_service(scope, sstr, svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scfdie();
+ /* NOTREACHED */
+
+ case SCF_ERROR_DELETED:
+ goto get_scope;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ scfe = scf_error();
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_scope_get_service", scf_error());
+ }
+ }
+ }
+
+ if (istr == NULL) {
+ scfe = SCF_ERROR_NONE;
+ *ep = svc;
+ *isservicep = 1;
+ goto out;
+ }
+
+get_inst:
+ if (scf_service_get_instance(svc, istr, inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scfdie();
+ /* NOTREACHED */
+
+ case SCF_ERROR_DELETED:
+ goto get_svc;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_get_instance", scf_error());
+ }
+
+ if (scf_service_add_instance(svc, istr, inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scfdie();
+ /* NOTREACHED */
+
+ case SCF_ERROR_DELETED:
+ goto get_svc;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ scfe = scf_error();
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_add_instance",
+ scf_error());
+ }
+ }
+ }
+
+ scfe = SCF_ERROR_NONE;
+ *ep = inst;
+ *isservicep = 0;
+
+out:
+ if (*ep != inst)
+ scf_instance_destroy(inst);
+ if (*ep != svc)
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+ free(fmri_copy);
+ return (scfe);
+}
+
+/*
+ * Refresh entity. If isservice is zero, take entity to be an scf_instance_t *.
+ * Otherwise take entity to be an scf_service_t * and refresh all of its child
+ * instances. fmri is used for messages. inst, iter, and name_buf are used
+ * for scratch space. Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - entity was deleted
+ * EACCES - backend denied access
+ * EPERM - permission denied
+ * -1 - _smf_refresh_instance_i() failed. scf_error() should be set.
+ */
+static int
+refresh_entity(int isservice, void *entity, const char *fmri,
+ scf_instance_t *inst, scf_iter_t *iter, char *name_buf)
+{
+ scf_error_t scfe;
+ int r;
+
+ if (!isservice) {
+ if (_smf_refresh_instance_i(entity) == 0) {
+ if (g_verbose)
+ warn(gettext("Refreshed %s.\n"), fmri);
+ return (0);
+ }
+
+ switch (scf_error()) {
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (EACCES);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ return (EPERM);
+
+ default:
+ return (-1);
+ }
+ }
+
+ if (scf_iter_service_instances(iter, entity) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_service_instances", scf_error());
+ }
+ }
+
+ for (;;) {
+ r = scf_iter_next_instance(iter, inst);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_iter_next_instance",
+ scf_error());
+ }
+ }
+
+ if (_smf_refresh_instance_i(inst) == 0) {
+ if (g_verbose) {
+ if (scf_instance_get_name(inst, name_buf,
+ max_scf_name_len + 1) < 0)
+ (void) strcpy(name_buf, "?");
+
+ warn(gettext("Refreshed %s:%s.\n"),
+ fmri, name_buf);
+ }
+ } else {
+ if (scf_error() != SCF_ERROR_BACKEND_ACCESS ||
+ g_verbose) {
+ scfe = scf_error();
+
+ if (scf_instance_to_fmri(inst, name_buf,
+ max_scf_name_len + 1) < 0)
+ (void) strcpy(name_buf, "?");
+
+ warn(gettext(
+ "Refresh of %s:%s failed: %s.\n"), fmri,
+ name_buf, scf_strerror(scfe));
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+stash_scferror_err(scf_callback_t *cbp, scf_error_t err)
+{
+ cbp->sc_err = scferror2errno(err);
+ return (UU_WALK_ERROR);
+}
+
+static int
+stash_scferror(scf_callback_t *cbp)
+{
+ return (stash_scferror_err(cbp, scf_error()));
+}
+
+
+/*
+ * Import. These functions import a bundle into the repository.
+ */
+
+/*
+ * Add a transaction entry to lcbdata->sc_trans for this property_t. Uses
+ * sc_handle, sc_trans, and sc_flags (SCI_NOENABLED) in lcbdata. On success,
+ * returns UU_WALK_NEXT. On error returns UU_WALK_ERROR and sets
+ * lcbdata->sc_err to
+ * ENOMEM - out of memory
+ * ECONNABORTED - repository connection broken
+ * ECANCELED - sc_trans's property group was deleted
+ * EINVAL - p's name is invalid (error printed)
+ * - p has an invalid value (error printed)
+ */
+static int
+lscf_property_import(void *v, void *pvt)
+{
+ property_t *p = v;
+ scf_callback_t *lcbdata = pvt;
+ value_t *vp;
+ scf_transaction_t *trans = lcbdata->sc_trans;
+ scf_transaction_entry_t *entr;
+ scf_value_t *val;
+ scf_type_t tp;
+
+ if (lcbdata->sc_flags & SCI_NOENABLED &&
+ strcmp(p->sc_property_name, SCF_PROPERTY_ENABLED) == 0)
+ return (UU_WALK_NEXT);
+
+ entr = scf_entry_create(lcbdata->sc_handle);
+ if (entr == NULL) {
+ switch (scf_error()) {
+ case SCF_ERROR_NO_MEMORY:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_entry_create", scf_error());
+ }
+ }
+
+ tp = p->sc_value_type;
+
+ if (scf_transaction_property_new(trans, entr,
+ p->sc_property_name, tp) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(emsg_invalid_prop_name, p->sc_property_name);
+ scf_entry_destroy(entr);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_EXISTS:
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scf_entry_destroy(entr);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_property_new", scf_error());
+ }
+
+ if (scf_transaction_property_change_type(trans, entr,
+ p->sc_property_name, tp) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scf_entry_destroy(entr);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(emsg_invalid_prop_name,
+ p->sc_property_name);
+ scf_entry_destroy(entr);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error(
+ "scf_transaction_property_change_type",
+ scf_error());
+ }
+ }
+ }
+
+ for (vp = uu_list_first(p->sc_property_values);
+ vp != NULL;
+ vp = uu_list_next(p->sc_property_values, vp)) {
+ val = scf_value_create(g_hndl);
+ if (val == NULL) {
+ switch (scf_error()) {
+ case SCF_ERROR_NO_MEMORY:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_value_create", scf_error());
+ }
+ }
+
+ switch (tp) {
+ case SCF_TYPE_BOOLEAN:
+ scf_value_set_boolean(val, vp->sc_u.sc_count);
+ break;
+ case SCF_TYPE_COUNT:
+ scf_value_set_count(val, vp->sc_u.sc_count);
+ break;
+ case SCF_TYPE_INTEGER:
+ scf_value_set_integer(val, vp->sc_u.sc_integer);
+ break;
+ default:
+ assert(vp->sc_u.sc_string != NULL);
+ if (scf_value_set_from_string(val, tp,
+ vp->sc_u.sc_string) != 0) {
+ if (scf_error() != SCF_ERROR_INVALID_ARGUMENT)
+ bad_error("scf_value_set_from_string",
+ scf_error());
+
+ warn(gettext("Value \"%s\" is not a valid "
+ "%s.\n"), vp->sc_u.sc_string,
+ scf_type_to_string(tp));
+ scf_value_destroy(val);
+ return (stash_scferror(lcbdata));
+ }
+ break;
+ }
+
+ if (scf_entry_add_value(entr, val) != 0)
+ bad_error("scf_entry_add_value", scf_error());
+ }
+
+ return (UU_WALK_NEXT);
+}
+
+/*
+ * Import a pgroup_t into the repository. Uses sc_handle, sc_parent,
+ * sc_service, sc_flags (SCI_GENERALLAST, SCI_FORCE, & SCI_KEEP),
+ * sc_source_fmri, and sc_target_fmri in lcbdata, and uses imp_pg and imp_tx.
+ * On success, returns UU_WALK_NEXT. On error returns UU_WALK_ERROR and sets
+ * lcbdata->sc_err to
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - svc.configd is out of resources
+ * ECANCELED - sc_parent was deleted
+ * EPERM - could not create property group (permission denied) (error printed)
+ * - could not modify property group (permission denied) (error printed)
+ * - could not delete property group (permission denied) (error printed)
+ * EROFS - could not create property group (repository is read-only)
+ * - could not delete property group (repository is read-only)
+ * EACCES - could not create property group (backend access denied)
+ * - could not delete property group (backend access denied)
+ * EEXIST - could not create property group (already exists)
+ * EINVAL - invalid property group name (error printed)
+ * - invalid property name (error printed)
+ * - invalid value (error printed)
+ * EBUSY - new property group deleted (error printed)
+ * - new property group changed (error printed)
+ * - property group added (error printed)
+ * - property group deleted (error printed)
+ */
+static int
+entity_pgroup_import(void *v, void *pvt)
+{
+ pgroup_t *p = v;
+ scf_callback_t cbdata;
+ scf_callback_t *lcbdata = pvt;
+ void *ent = lcbdata->sc_parent;
+ int issvc = lcbdata->sc_service;
+ int r;
+
+ const char * const pg_changed = gettext("%s changed unexpectedly "
+ "(new property group \"%s\" changed).\n");
+
+ /* Never import deleted property groups. */
+ if (p->sc_pgroup_delete)
+ return (UU_WALK_NEXT);
+
+ if (!issvc && (lcbdata->sc_flags & SCI_GENERALLAST) &&
+ strcmp(p->sc_pgroup_name, SCF_PG_GENERAL) == 0) {
+ lcbdata->sc_general = p;
+ return (UU_WALK_NEXT);
+ }
+
+add_pg:
+ if (issvc)
+ r = scf_service_add_pg(ent, p->sc_pgroup_name,
+ p->sc_pgroup_type, p->sc_pgroup_flags, imp_pg);
+ else
+ r = scf_instance_add_pg(ent, p->sc_pgroup_name,
+ p->sc_pgroup_type, p->sc_pgroup_flags, imp_pg);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_NO_RESOURCES:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_EXISTS:
+ if (lcbdata->sc_flags & SCI_FORCE)
+ break;
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(emsg_fmri_invalid_pg_name, lcbdata->sc_source_fmri,
+ p->sc_pgroup_name);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_add_perm, p->sc_pgroup_name,
+ lcbdata->sc_target_fmri);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_add_pg", scf_error());
+ }
+
+ if (entity_get_pg(ent, issvc, p->sc_pgroup_name, imp_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(emsg_fmri_invalid_pg_name,
+ lcbdata->sc_source_fmri,
+ p->sc_pgroup_name);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_FOUND:
+ warn(gettext("%s changed unexpectedly "
+ "(property group \"%s\" added).\n"),
+ lcbdata->sc_target_fmri, p->sc_pgroup_name);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+ }
+
+ if (lcbdata->sc_flags & SCI_KEEP)
+ goto props;
+
+ if (scf_pg_delete(imp_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ warn(gettext("%s changed unexpectedly "
+ "(property group \"%s\" deleted).\n"),
+ lcbdata->sc_target_fmri, p->sc_pgroup_name);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_del_perm, p->sc_pgroup_name,
+ lcbdata->sc_target_fmri);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+
+ goto add_pg;
+ }
+
+props:
+
+ /*
+ * Add properties to property group, if any.
+ */
+ cbdata.sc_handle = lcbdata->sc_handle;
+ cbdata.sc_parent = imp_pg;
+ cbdata.sc_flags = lcbdata->sc_flags;
+ cbdata.sc_trans = imp_tx;
+
+ if (scf_transaction_start(imp_tx, imp_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_DELETED:
+ warn(pg_changed, lcbdata->sc_target_fmri,
+ p->sc_pgroup_name);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_mod_perm, p->sc_pgroup_name,
+ lcbdata->sc_target_fmri);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_transaction_start", scf_error());
+ }
+ }
+
+ if (uu_list_walk(p->sc_pgroup_props, lscf_property_import, &cbdata,
+ UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+ scf_transaction_reset(imp_tx);
+
+ lcbdata->sc_err = cbdata.sc_err;
+ if (cbdata.sc_err == ECANCELED) {
+ warn(pg_changed, lcbdata->sc_target_fmri,
+ p->sc_pgroup_name);
+ lcbdata->sc_err = EBUSY;
+ }
+ return (UU_WALK_ERROR);
+ }
+
+ r = scf_transaction_commit(imp_tx);
+ switch (r) {
+ case 1:
+ r = UU_WALK_NEXT;
+ break;
+
+ case 0:
+ warn(pg_changed, lcbdata->sc_target_fmri, p->sc_pgroup_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ break;
+
+ case -1:
+ switch (scf_error()) {
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ r = stash_scferror(lcbdata);
+ break;
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, lcbdata->sc_target_fmri,
+ p->sc_pgroup_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_mod_perm, p->sc_pgroup_name,
+ lcbdata->sc_target_fmri);
+ r = stash_scferror(lcbdata);
+ break;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_transaction_commit", scf_error());
+ }
+
+ default:
+ bad_error("scf_transaction_commit", r);
+ }
+
+ scf_transaction_destroy_children(imp_tx);
+
+ return (r);
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - svc.configd is out of resources
+ * ECANCELED - inst was deleted
+ * EPERM - could not create property group (permission denied) (error printed)
+ * - could not modify property group (permission denied) (error printed)
+ * EROFS - could not create property group (repository is read-only)
+ * EACCES - could not create property group (backend access denied)
+ * EEXIST - could not create property group (already exists)
+ * EINVAL - invalid property group name (error printed)
+ * - invalid property name (error printed)
+ * - invalid value (error printed)
+ * EBUSY - new property group changed (error printed)
+ */
+static int
+lscf_import_instance_pgs(scf_instance_t *inst, const char *target_fmri,
+ const entity_t *iinst, int flags)
+{
+ scf_callback_t cbdata;
+
+ cbdata.sc_handle = scf_instance_handle(inst);
+ cbdata.sc_parent = inst;
+ cbdata.sc_service = 0;
+ cbdata.sc_general = 0;
+ cbdata.sc_flags = flags;
+ cbdata.sc_source_fmri = iinst->sc_fmri;
+ cbdata.sc_target_fmri = target_fmri;
+
+ if (uu_list_walk(iinst->sc_pgroups, entity_pgroup_import, &cbdata,
+ UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ return (cbdata.sc_err);
+ }
+
+ if ((flags & SCI_GENERALLAST) && cbdata.sc_general) {
+ cbdata.sc_flags = flags & (~SCI_GENERALLAST);
+ if (entity_pgroup_import(cbdata.sc_general, &cbdata)
+ != UU_WALK_NEXT)
+ return (cbdata.sc_err);
+ }
+
+ return (0);
+}
+
+/*
+ * Report the reasons why we can't upgrade pg2 to pg1.
+ */
+static void
+report_pg_diffs(pgroup_t *pg1, pgroup_t *pg2, const char *fmri, int new)
+{
+ property_t *p1, *p2;
+
+ assert(strcmp(pg1->sc_pgroup_name, pg2->sc_pgroup_name) == 0);
+
+ if (!pg_attrs_equal(pg1, pg2, fmri, new))
+ return;
+
+ for (p1 = uu_list_first(pg1->sc_pgroup_props);
+ p1 != NULL;
+ p1 = uu_list_next(pg1->sc_pgroup_props, p1)) {
+ p2 = uu_list_find(pg2->sc_pgroup_props, p1, NULL, NULL);
+ if (p2 != NULL) {
+ (void) prop_equal(p1, p2, fmri, pg1->sc_pgroup_name,
+ new);
+ continue;
+ }
+
+ if (new)
+ warn(gettext("Conflict upgrading %s (new property "
+ "group \"%s\" is missing property \"%s\").\n"),
+ fmri, pg1->sc_pgroup_name, p1->sc_property_name);
+ else
+ warn(gettext("Conflict upgrading %s (property "
+ "\"%s/%s\" is missing).\n"), fmri,
+ pg1->sc_pgroup_name, p1->sc_property_name);
+ }
+
+ /*
+ * Since pg1 should be from the manifest, any properties in pg2 which
+ * aren't in pg1 shouldn't be reported as conflicts.
+ */
+}
+
+/*
+ * Add transaction entries to tx which will upgrade cur's pg according to old
+ * & new.
+ *
+ * Returns
+ * 0 - success
+ * EINVAL - new has a property with an invalid name or value (message emitted)
+ * ENOMEM - out of memory
+ */
+static int
+add_upgrade_entries(scf_transaction_t *tx, pgroup_t *old, pgroup_t *new,
+ pgroup_t *cur, int speak, const char *fmri)
+{
+ property_t *p, *new_p, *cur_p;
+ scf_transaction_entry_t *e;
+ int r;
+ int is_general;
+ int is_protected;
+
+ if (uu_list_walk(new->sc_pgroup_props, clear_int,
+ (void *)offsetof(property_t, sc_seen), UU_DEFAULT) != 0)
+ bad_error("uu_list_walk", uu_error());
+
+ is_general = strcmp(old->sc_pgroup_name, SCF_PG_GENERAL) == 0;
+
+ for (p = uu_list_first(old->sc_pgroup_props);
+ p != NULL;
+ p = uu_list_next(old->sc_pgroup_props, p)) {
+ /* p is a property in the old property group. */
+
+ /* Protect live properties. */
+ is_protected = 0;
+ if (is_general) {
+ if (strcmp(p->sc_property_name, SCF_PROPERTY_ENABLED) ==
+ 0 ||
+ strcmp(p->sc_property_name,
+ SCF_PROPERTY_RESTARTER) == 0)
+ is_protected = 1;
+ }
+
+ /* Look for the same property in the new properties. */
+ new_p = uu_list_find(new->sc_pgroup_props, p, NULL, NULL);
+ if (new_p != NULL) {
+ new_p->sc_seen = 1;
+
+ /*
+ * If the new property is the same as the old, don't do
+ * anything (leave any user customizations).
+ */
+ if (prop_equal(p, new_p, NULL, NULL, 0))
+ continue;
+
+ if (new_p->sc_property_override)
+ goto upgrade;
+ }
+
+ cur_p = uu_list_find(cur->sc_pgroup_props, p, NULL, NULL);
+ if (cur_p == NULL) {
+ /*
+ * p has been deleted from the repository. If we were
+ * going to delete it anyway, do nothing. Otherwise
+ * report a conflict.
+ */
+ if (new_p == NULL)
+ continue;
+
+ if (is_protected)
+ continue;
+
+ warn(gettext("Conflict upgrading %s "
+ "(property \"%s/%s\" is missing).\n"), fmri,
+ old->sc_pgroup_name, p->sc_property_name);
+ continue;
+ }
+
+ if (!prop_equal(p, cur_p, NULL, NULL, 0)) {
+ /*
+ * Conflict. Don't warn if the property is already the
+ * way we want it, though.
+ */
+ if (is_protected)
+ continue;
+
+ if (new_p == NULL)
+ (void) prop_equal(p, cur_p, fmri,
+ old->sc_pgroup_name, 0);
+ else
+ (void) prop_equal(cur_p, new_p, fmri,
+ old->sc_pgroup_name, 0);
+ continue;
+ }
+
+ if (is_protected) {
+ if (speak)
+ warn(gettext("%s: Refusing to upgrade "
+ "\"%s/%s\" (live property).\n"), fmri,
+ old->sc_pgroup_name, p->sc_property_name);
+ continue;
+ }
+
+upgrade:
+ /* p hasn't been customized in the repository. Upgrade it. */
+ if (new_p == NULL) {
+ /* p was deleted. Delete from cur if unchanged. */
+ if (speak)
+ warn(gettext(
+ "%s: Deleting property \"%s/%s\".\n"),
+ fmri, old->sc_pgroup_name,
+ p->sc_property_name);
+
+ e = scf_entry_create(g_hndl);
+ if (e == NULL)
+ return (ENOMEM);
+
+ if (scf_transaction_property_delete(tx, e,
+ p->sc_property_name) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ scf_entry_destroy(e);
+ return (ECANCELED);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scf_entry_destroy(e);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_FOUND:
+ /*
+ * This can happen if cur is from the
+ * running snapshot (and it differs
+ * from the live properties).
+ */
+ scf_entry_destroy(e);
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error(
+ "scf_transaction_property_delete",
+ scf_error());
+ }
+ }
+ } else {
+ scf_callback_t ctx;
+
+ if (speak)
+ warn(gettext(
+ "%s: Upgrading property \"%s/%s\".\n"),
+ fmri, old->sc_pgroup_name,
+ p->sc_property_name);
+
+ ctx.sc_handle = g_hndl;
+ ctx.sc_trans = tx;
+ ctx.sc_flags = 0;
+
+ r = lscf_property_import(new_p, &ctx);
+ if (r != UU_WALK_NEXT) {
+ if (r != UU_WALK_ERROR)
+ bad_error("lscf_property_import", r);
+ return (EINVAL);
+ }
+ }
+ }
+
+ /* Go over the properties which were added. */
+ for (new_p = uu_list_first(new->sc_pgroup_props);
+ new_p != NULL;
+ new_p = uu_list_next(new->sc_pgroup_props, new_p)) {
+ if (new_p->sc_seen)
+ continue;
+
+ /* This is a new property. */
+ cur_p = uu_list_find(cur->sc_pgroup_props, new_p, NULL, NULL);
+ if (cur_p == NULL) {
+ scf_callback_t ctx;
+
+ ctx.sc_handle = g_hndl;
+ ctx.sc_trans = tx;
+ ctx.sc_flags = 0;
+
+ r = lscf_property_import(new_p, &ctx);
+ if (r != UU_WALK_NEXT) {
+ if (r != UU_WALK_ERROR)
+ bad_error("lscf_property_import", r);
+ return (EINVAL);
+ }
+ continue;
+ }
+
+ /*
+ * Report a conflict if the new property differs from the
+ * current one. Unless it's general/enabled, since that's
+ * never in the last-import snapshot.
+ */
+ if (strcmp(new_p->sc_property_name, SCF_PROPERTY_ENABLED) ==
+ 0 &&
+ strcmp(cur->sc_pgroup_name, SCF_PG_GENERAL) == 0)
+ continue;
+
+ (void) prop_equal(cur_p, new_p, fmri, old->sc_pgroup_name, 1);
+ }
+
+ return (0);
+}
+
+/*
+ * Upgrade pg according to old & new.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - svc.configd is out of resources
+ * ECANCELED - pg was deleted
+ * EPERM - couldn't modify pg (permission denied)
+ * EROFS - couldn't modify pg (backend read-only)
+ * EACCES - couldn't modify pg (backend access denied)
+ * EINVAL - new has a property with invalid name or value (error printed)
+ * EBUSY - pg changed unexpectedly
+ */
+static int
+upgrade_pg(scf_propertygroup_t *pg, pgroup_t *cur, pgroup_t *old,
+ pgroup_t *new, int speak, const char *fmri)
+{
+ int r;
+
+ if (scf_transaction_start(imp_tx, pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_start", scf_error());
+ }
+ }
+
+ r = add_upgrade_entries(imp_tx, old, new, cur, speak, fmri);
+ switch (r) {
+ case 0:
+ break;
+
+ case EINVAL:
+ case ENOMEM:
+ scf_transaction_destroy_children(imp_tx);
+ return (r);
+
+ default:
+ bad_error("add_upgrade_entries", r);
+ }
+
+ r = scf_transaction_commit(imp_tx);
+
+ scf_transaction_destroy_children(imp_tx);
+
+ switch (r) {
+ case 1:
+ break;
+
+ case 0:
+ return (EBUSY);
+
+ case -1:
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_DELETED:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_commit", scf_error());
+ }
+
+ default:
+ bad_error("scf_transaction_commit", r);
+ }
+
+ return (0);
+}
+
+/*
+ * Compares two entity FMRIs. Returns
+ *
+ * 1 - equal
+ * 0 - not equal
+ * -1 - f1 is invalid or not an entity
+ * -2 - f2 is invalid or not an entity
+ */
+static int
+fmri_equal(const char *f1, const char *f2)
+{
+ int r;
+ const char *s1, *i1, *pg1;
+ const char *s2, *i2, *pg2;
+
+ if (strlcpy(imp_fe1, f1, max_scf_fmri_len + 1) >= max_scf_fmri_len + 1)
+ return (-1);
+ if (scf_parse_svc_fmri(imp_fe1, NULL, &s1, &i1, &pg1, NULL) != 0)
+ return (-1);
+
+ if (s1 == NULL || pg1 != NULL)
+ return (-1);
+
+ if (strlcpy(imp_fe2, f2, max_scf_fmri_len + 1) >= max_scf_fmri_len + 1)
+ return (-2);
+ if (scf_parse_svc_fmri(imp_fe2, NULL, &s2, &i2, &pg2, NULL) != 0)
+ return (-2);
+
+ if (s2 == NULL || pg2 != NULL)
+ return (-2);
+
+ r = strcmp(s1, s2);
+ if (r != 0)
+ return (0);
+
+ if (i1 == NULL && i2 == NULL)
+ return (1);
+
+ if (i1 == NULL || i2 == NULL)
+ return (0);
+
+ return (strcmp(i1, i2) == 0);
+}
+
+/*
+ * Import a dependent by creating a dependency property group in the dependent
+ * entity. If lcbdata->sc_trans is set, assume it's been started on the
+ * dependents pg, and add an entry to create a new property for this
+ * dependent. Uses sc_handle, sc_trans, and sc_fmri in lcbdata.
+ *
+ * On success, returns UU_WALK_NEXT. On error, returns UU_WALK_ERROR and sets
+ * lcbdata->sc_err to
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - configd is out of resources
+ * EINVAL - target is invalid (error printed)
+ * - target is not an entity (error printed)
+ * - dependent has invalid name (error printed)
+ * - invalid property name (error printed)
+ * - invalid value (error printed)
+ * - scope of target does not exist (error printed)
+ * EPERM - couldn't create target (permission denied) (error printed)
+ * - couldn't create dependency pg (permission denied) (error printed)
+ * - couldn't modify dependency pg (permission denied) (error printed)
+ * EROFS - couldn't create target (repository read-only)
+ * - couldn't create dependency pg (repository read-only)
+ * EACCES - couldn't create target (backend access denied)
+ * - couldn't create dependency pg (backend access denied)
+ * ECANCELED - sc_trans's pg was deleted
+ * EALREADY - property for dependent already exists in sc_trans's pg
+ * EEXIST - dependency pg already exists in target (error printed)
+ * EBUSY - target deleted (error printed)
+ * - property group changed during import (error printed)
+ */
+static int
+lscf_dependent_import(void *a1, void *pvt)
+{
+ pgroup_t *pgrp = a1;
+ scf_callback_t *lcbdata = pvt;
+
+ int isservice;
+ int ret;
+ scf_transaction_entry_t *e;
+ scf_value_t *val;
+ scf_callback_t dependent_cbdata;
+ scf_error_t scfe;
+
+ /*
+ * Decode the FMRI into dependent_cbdata->sc_parent. Do it here so if
+ * it's invalid, we fail before modifying the repository.
+ */
+ scfe = fmri_to_entity(lcbdata->sc_handle, pgrp->sc_pgroup_fmri,
+ &dependent_cbdata.sc_parent, &isservice);
+ switch (scfe) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NO_MEMORY:
+ return (stash_scferror_err(lcbdata, scfe));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("The FMRI for the \"%s\" dependent is "
+ "invalid.\n"), pgrp->sc_pgroup_name);
+ return (stash_scferror_err(lcbdata, scfe));
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ semerr(gettext("The FMRI \"%s\" for the \"%s\" dependent "
+ "specifies neither a service nor an instance.\n"),
+ pgrp->sc_pgroup_fmri, pgrp->sc_pgroup_name);
+ return (stash_scferror_err(lcbdata, scfe));
+
+ case SCF_ERROR_NOT_FOUND:
+ scfe = create_entity(lcbdata->sc_handle, pgrp->sc_pgroup_fmri,
+ &dependent_cbdata.sc_parent, &isservice);
+ switch (scfe) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NO_MEMORY:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (stash_scferror_err(lcbdata, scfe));
+
+ case SCF_ERROR_NOT_FOUND:
+ semerr(gettext("The scope in FMRI \"%s\" for the "
+ "\"%s\" dependent does not exist.\n"),
+ pgrp->sc_pgroup_fmri, pgrp->sc_pgroup_name);
+ lcbdata->sc_err = EINVAL;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext(
+ "Could not create %s (permission denied).\n"),
+ pgrp->sc_pgroup_fmri);
+ return (stash_scferror_err(lcbdata, scfe));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ default:
+ bad_error("create_entity", scfe);
+ }
+ break;
+
+ default:
+ bad_error("fmri_to_entity", scfe);
+ }
+
+ if (lcbdata->sc_trans != NULL) {
+ e = scf_entry_create(lcbdata->sc_handle);
+ if (e == NULL) {
+ if (scf_error() != SCF_ERROR_NO_MEMORY)
+ bad_error("scf_entry_create", scf_error());
+
+ entity_destroy(dependent_cbdata.sc_parent, isservice);
+ return (stash_scferror(lcbdata));
+ }
+
+ if (scf_transaction_property_new(lcbdata->sc_trans, e,
+ pgrp->sc_pgroup_name, SCF_TYPE_FMRI) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(gettext("Dependent of %s has invalid name "
+ "\"%s\".\n"), pgrp->sc_parent->sc_fmri,
+ pgrp->sc_pgroup_name);
+ /* FALLTHROUGH */
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ scf_entry_destroy(e);
+ entity_destroy(dependent_cbdata.sc_parent,
+ isservice);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_EXISTS:
+ scf_entry_destroy(e);
+ entity_destroy(dependent_cbdata.sc_parent,
+ isservice);
+ lcbdata->sc_err = EALREADY;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_property_new",
+ scf_error());
+ }
+ }
+
+ val = scf_value_create(lcbdata->sc_handle);
+ if (val == NULL) {
+ if (scf_error() != SCF_ERROR_NO_MEMORY)
+ bad_error("scf_value_create", scf_error());
+
+ entity_destroy(dependent_cbdata.sc_parent, isservice);
+ return (stash_scferror(lcbdata));
+ }
+
+ if (scf_value_set_from_string(val, SCF_TYPE_FMRI,
+ pgrp->sc_pgroup_fmri) != 0)
+ /* invalid should have been caught above */
+ bad_error("scf_value_set_from_string", scf_error());
+
+ if (scf_entry_add_value(e, val) != 0)
+ bad_error("scf_entry_add_value", scf_error());
+ }
+
+ /* Add the property group to the target entity. */
+
+ dependent_cbdata.sc_handle = lcbdata->sc_handle;
+ dependent_cbdata.sc_flags = 0;
+ dependent_cbdata.sc_source_fmri = lcbdata->sc_source_fmri;
+ dependent_cbdata.sc_target_fmri = pgrp->sc_pgroup_fmri;
+
+ ret = entity_pgroup_import(pgrp, &dependent_cbdata);
+
+ entity_destroy(dependent_cbdata.sc_parent, isservice);
+
+ if (ret == UU_WALK_NEXT)
+ return (ret);
+
+ if (ret != UU_WALK_ERROR)
+ bad_error("entity_pgroup_import", ret);
+
+ switch (dependent_cbdata.sc_err) {
+ case ECANCELED:
+ warn(gettext("%s deleted unexpectedly.\n"),
+ pgrp->sc_pgroup_fmri);
+ lcbdata->sc_err = EBUSY;
+ break;
+
+ case EEXIST:
+ warn(gettext("Could not create \"%s\" dependency in %s "
+ "(already exists).\n"), pgrp->sc_pgroup_name,
+ pgrp->sc_pgroup_fmri);
+ /* FALLTHROUGH */
+
+ default:
+ lcbdata->sc_err = dependent_cbdata.sc_err;
+ }
+
+ return (UU_WALK_ERROR);
+}
+
+static int upgrade_dependent(const scf_property_t *, const entity_t *,
+ const scf_snaplevel_t *, scf_transaction_t *);
+
+/*
+ * Upgrade uncustomized dependents of ent to those specified in ient. Read
+ * the current dependent targets from running (the snaplevel of a running
+ * snapshot which corresponds to ient) if not NULL (ent, an scf_service_t * or
+ * scf_instance_t * according to ient, otherwise). Draw the ancestral
+ * dependent targets and dependency properties from li_dpts_pg (the
+ * "dependents" property group in snpl) and snpl (the snaplevel which
+ * corresponds to ent in a last-import snapshot). If li_dpts_pg is NULL, then
+ * snpl doesn't have a "dependents" property group, and any dependents in ient
+ * are new.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - configd is out of resources
+ * ECANCELED - ent was deleted
+ * ENODEV - the entity containing li_dpts_pg was deleted
+ * EPERM - could not modify dependents pg (permission denied) (error printed)
+ * - couldn't upgrade dependent (permission denied) (error printed)
+ * - couldn't create dependent (permission denied) (error printed)
+ * EROFS - could not modify dependents pg (repository read-only)
+ * - couldn't upgrade dependent (repository read-only)
+ * - couldn't create dependent (repository read-only)
+ * EACCES - could not modify dependents pg (backend access denied)
+ * - could not upgrade dependent (backend access denied)
+ * - could not create dependent (backend access denied)
+ * EBUSY - "dependents" pg of ent added, changed, or deleted (error printed)
+ * - dependent target deleted (error printed)
+ * - dependent pg changed (error printed)
+ * EINVAL - new dependent is invalid (error printed)
+ * EBADF - snpl is corrupt (error printed)
+ * - snpl has corrupt pg (error printed)
+ * - dependency pg in target is corrupt (error printed)
+ * EEXIST - dependency pg already existed in target service (error printed)
+ */
+static int
+upgrade_dependents(const scf_propertygroup_t *li_dpts_pg,
+ const scf_snaplevel_t *snpl, const entity_t *ient,
+ const scf_snaplevel_t *running, void *ent)
+{
+ pgroup_t *new_dpt_pgroup;
+ scf_callback_t cbdata;
+ int r, unseen, tx_set;
+
+ const char * const dependents = "dependents";
+
+ const int issvc = (ient->sc_etype == SVCCFG_SERVICE_OBJECT);
+
+ /*
+ * Algorithm: Each single-valued fmri (or astring) property in
+ * li_dpts_pg represents a dependent tag in the old manifest. For
+ * each, decide whether it has been changed in the new manifest (via
+ * ient). Do nothing if it hasn't (thereby leaving any user
+ * customizations). If it has, decide whether the user has customized
+ * the dependent since last-import. If he hasn't, change it as
+ * prescribed by the new manifest. If he has, report a conflict and
+ * don't change anything.
+ */
+
+ /*
+ * Clear the seen fields of the dependents, so we can tell which ones
+ * are new.
+ */
+ if (uu_list_walk(ient->sc_dependents, clear_int,
+ (void *)offsetof(pgroup_t, sc_pgroup_seen), UU_DEFAULT) != 0)
+ bad_error("uu_list_walk", uu_error());
+
+ if (li_dpts_pg != NULL) {
+ ud_cur_depts_pg_set = 1;
+ if (running != NULL)
+ r = scf_snaplevel_get_pg(running, dependents,
+ ud_cur_depts_pg);
+ else
+ r = entity_get_pg(ent, issvc, dependents,
+ ud_cur_depts_pg);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+
+ ud_cur_depts_pg_set = 0;
+ }
+
+ if (scf_iter_pg_properties(ud_iter, li_dpts_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ li_dpts_pg = NULL;
+ goto nodpg;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_pg_properties",
+ scf_error());
+ }
+ }
+
+ tx_set = 0;
+ if (entity_get_pg(ent, issvc, dependents, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+ } else {
+ if (scf_transaction_start(imp_tx2, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_mod_perm, dependents,
+ ient->sc_fmri);
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_start",
+ scf_error());
+ }
+ } else {
+ tx_set = 1;
+ }
+ }
+
+ for (;;) {
+ int r;
+
+ r = scf_iter_next_property(ud_iter, ud_dpt_prop);
+ if (r == 0)
+ break;
+ if (r == 1) {
+ r = upgrade_dependent(ud_dpt_prop, ient, snpl,
+ imp_tx2);
+ switch (r) {
+ case 0:
+ continue;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case ENOSPC:
+ case EBADF:
+ case EBUSY:
+ case EINVAL:
+ case EPERM:
+ case EROFS:
+ case EACCES:
+ case EEXIST:
+ goto txout;
+
+ case ECANCELED:
+ r = ENODEV;
+ goto txout;
+
+ default:
+ bad_error("upgrade_dependent", r);
+ }
+ }
+ if (r != -1)
+ bad_error("scf_iter_next_property", r);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ r = ENODEV;
+ goto txout;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = ECONNABORTED;
+ goto txout;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_iter_next_property",
+ scf_error());
+ }
+ }
+
+ if (!tx_set)
+ return (0);
+
+ r = scf_transaction_commit(imp_tx2);
+ switch (r) {
+ case 1:
+ r = 0;
+ break;
+
+ case 0:
+ warn(emsg_pg_changed, ient->sc_fmri, dependents);
+ r = EBUSY;
+ break;
+
+ case -1:
+ switch (scf_error()) {
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ r = scferror2errno(scf_error());
+ break;
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri,
+ dependents);
+ r = EBUSY;
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_mod_perm, dependents,
+ ient->sc_fmri);
+ r = EPERM;
+ break;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_transaction_commit",
+ scf_error());
+ }
+ break;
+
+ default:
+ bad_error("scf_transaction_commit", r);
+ }
+
+txout:
+ scf_transaction_destroy_children(imp_tx2);
+ scf_transaction_reset(imp_tx2);
+ return (r);
+ }
+
+nodpg:
+ /* import unseen dependents */
+ /* If there are none, exit early. */
+ unseen = 0;
+ for (new_dpt_pgroup = uu_list_first(ient->sc_dependents);
+ new_dpt_pgroup != NULL;
+ new_dpt_pgroup = uu_list_next(ient->sc_dependents,
+ new_dpt_pgroup)) {
+ if (!new_dpt_pgroup->sc_pgroup_seen) {
+ unseen = 1;
+ break;
+ }
+ }
+
+ if (unseen == 0)
+ return (0);
+
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_parent = ent;
+ cbdata.sc_service = issvc;
+ cbdata.sc_flags = 0;
+
+ if (entity_get_pg(ent, issvc, dependents, ud_cur_depts_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+
+ if (issvc)
+ r = scf_service_add_pg(ent, dependents,
+ SCF_GROUP_FRAMEWORK, 0, ud_cur_depts_pg);
+ else
+ r = scf_instance_add_pg(ent, dependents,
+ SCF_GROUP_FRAMEWORK, 0, ud_cur_depts_pg);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_NO_RESOURCES:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_EXISTS:
+ warn(emsg_pg_added, ient->sc_fmri,
+ dependents);
+ return (EBUSY);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_add_perm, dependents,
+ ient->sc_fmri);
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_add_pg", scf_error());
+ }
+ }
+ }
+
+ cbdata.sc_trans = imp_tx2;
+
+ if (scf_transaction_start(imp_tx2, ud_cur_depts_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_BACKEND_READONLY:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri, dependents);
+ return (EBUSY);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_mod_perm, dependents, ient->sc_fmri);
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_IN_USE:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_start", scf_error());
+ }
+ }
+
+ for (new_dpt_pgroup = uu_list_first(ient->sc_dependents);
+ new_dpt_pgroup != NULL;
+ new_dpt_pgroup = uu_list_next(ient->sc_dependents,
+ new_dpt_pgroup)) {
+ if (new_dpt_pgroup->sc_pgroup_seen)
+ continue;
+
+ r = lscf_dependent_import(new_dpt_pgroup, &cbdata);
+ if (r != UU_WALK_NEXT) {
+ if (r != UU_WALK_ERROR)
+ bad_error("lscf_dependent_import", r);
+
+ if (cbdata.sc_err == EALREADY) {
+ /*
+ * Duplicate dependents should have been
+ * caught.
+ */
+ bad_error("lscf_dependent_import",
+ cbdata.sc_err);
+ }
+
+ scf_transaction_destroy_children(imp_tx2);
+ scf_transaction_reset(imp_tx2);
+ return (cbdata.sc_err);
+ }
+ }
+
+ r = scf_transaction_commit(imp_tx2);
+
+ scf_transaction_destroy_children(imp_tx2);
+ scf_transaction_reset(imp_tx2);
+
+ switch (r) {
+ case 1:
+ return (0);
+
+ case 0:
+ warn(emsg_pg_changed, ient->sc_fmri, dependents);
+ return (EBUSY);
+
+ case -1:
+ break;
+
+ default:
+ bad_error("scf_transaction_commit", r);
+ }
+
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_NO_RESOURCES:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri, dependents);
+ return (EBUSY);
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_mod_perm, dependents, ient->sc_fmri);
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_destroy", scf_error());
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * prop is taken to be a property in the "dependents" property group of snpl,
+ * which is taken to be the snaplevel of a last-import snapshot corresponding
+ * to ient. If prop is a valid dependents property, upgrade the dependent it
+ * represents according to the repository & ient. If ud_cur_depts_pg_set is
+ * true, then ud_cur_depts_pg is taken to be the "dependents" property group
+ * of the entity ient represents (possibly in the running snapshot). If it
+ * needs to be changed, an entry will be added to tx, if not NULL.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - configd was out of resources
+ * ECANCELED - snpl's entity was deleted
+ * EINVAL - dependent target is invalid (error printed)
+ * - dependent is invalid (error printed)
+ * EBADF - snpl is corrupt (error printed)
+ * - snpl has corrupt pg (error printed)
+ * - dependency pg in target is corrupt (error printed)
+ * - running snapshot in dependent is missing snaplevel (error printed)
+ * EPERM - couldn't delete dependency pg (permission denied) (error printed)
+ * - couldn't create dependent (permission denied) (error printed)
+ * - couldn't modify dependent pg (permission denied) (error printed)
+ * EROFS - couldn't delete dependency pg (repository read-only)
+ * - couldn't create dependent (repository read-only)
+ * EACCES - couldn't delete dependency pg (backend access denied)
+ * - couldn't create dependent (backend access denied)
+ * EBUSY - ud_cur_depts_pg was deleted (error printed)
+ * - tx's pg was deleted (error printed)
+ * - dependent pg was changed or deleted (error printed)
+ * EEXIST - dependency pg already exists in new target (error printed)
+ */
+static int
+upgrade_dependent(const scf_property_t *prop, const entity_t *ient,
+ const scf_snaplevel_t *snpl, scf_transaction_t *tx)
+{
+ pgroup_t pgrp;
+ scf_type_t ty;
+ pgroup_t *new_dpt_pgroup;
+ pgroup_t *old_dpt_pgroup = NULL;
+ pgroup_t *current_pg;
+ scf_callback_t cbdata;
+ int tissvc;
+ void *target_ent;
+ scf_error_t serr;
+ int r;
+ scf_transaction_entry_t *ent;
+
+ const char * const cf_inval = gettext("Conflict upgrading %s "
+ "(dependent \"%s\" has invalid dependents property).\n");
+ const char * const cf_missing = gettext("Conflict upgrading %s "
+ "(dependent \"%s\" is missing).\n");
+ const char * const cf_newdpg = gettext("Conflict upgrading %s "
+ "(dependent \"%s\" has new dependency property group).\n");
+ const char * const cf_newtarg = gettext("Conflict upgrading %s "
+ "(dependent \"%s\" has new target).\n");
+ const char * const li_corrupt =
+ gettext("%s: \"last-import\" snapshot is corrupt.\n");
+ const char * const upgrading =
+ gettext("%s: Upgrading dependent \"%s\".\n");
+ const char * const r_no_lvl = gettext("%s: \"running\" snapshot is "
+ "corrupt (missing snaplevel).\n");
+
+ if (scf_property_type(prop, &ty) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_type", scf_error());
+ }
+ }
+
+ if (!(ty == SCF_TYPE_FMRI || ty == SCF_TYPE_ASTRING)) {
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+ }
+
+ /*
+ * prop represents a dependent in the old manifest. It is named after
+ * the dependent.
+ */
+ if (scf_property_get_name(prop, ud_name, max_scf_name_len + 1) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_get_name", scf_error());
+ }
+ }
+
+ /* See if it's in the new manifest. */
+ pgrp.sc_pgroup_name = ud_name;
+ new_dpt_pgroup =
+ uu_list_find(ient->sc_dependents, &pgrp, NULL, UU_DEFAULT);
+
+ /* If it's not, delete it... if it hasn't been customized. */
+ if (new_dpt_pgroup == NULL) {
+ if (!ud_cur_depts_pg_set)
+ return (0);
+
+ if (scf_property_get_value(prop, ud_val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_get_value",
+ scf_error());
+ }
+ }
+
+ if (scf_value_get_as_string(ud_val, ud_oldtarg,
+ max_scf_value_len + 1) < 0)
+ bad_error("scf_value_get_as_string", scf_error());
+
+ if (scf_pg_get_property(ud_cur_depts_pg, ud_name, ud_prop) !=
+ 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ return (0);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri,
+ "dependents");
+ return (EBUSY);
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+ if (scf_property_get_value(ud_prop, ud_val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ warn(cf_inval, ient->sc_fmri, ud_name);
+ return (0);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_get_value",
+ scf_error());
+ }
+ }
+
+ ty = scf_value_type(ud_val);
+ assert(ty != SCF_TYPE_INVALID);
+ if (!(ty == SCF_TYPE_FMRI || ty == SCF_TYPE_ASTRING)) {
+ warn(cf_inval, ient->sc_fmri, ud_name);
+ return (0);
+ }
+
+ if (scf_value_get_as_string(ud_val, ud_ctarg,
+ max_scf_value_len + 1) < 0)
+ bad_error("scf_value_get_as_string", scf_error());
+
+ r = fmri_equal(ud_ctarg, ud_oldtarg);
+ switch (r) {
+ case 1:
+ break;
+
+ case 0:
+ case -1: /* warn? */
+ warn(cf_newtarg, ient->sc_fmri, ud_name);
+ return (0);
+
+ case -2:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ default:
+ bad_error("fmri_equal", r);
+ }
+
+ if (scf_snaplevel_get_pg(snpl, ud_name, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_snaplevel_get_pg", scf_error());
+ }
+ }
+
+ r = load_pg(ud_pg, &old_dpt_pgroup, ient->sc_fmri,
+ snap_lastimport);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ serr = fmri_to_entity(g_hndl, ud_ctarg, &target_ent, &tissvc);
+ switch (serr) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NO_MEMORY:
+ internal_pgroup_free(old_dpt_pgroup);
+ return (ENOMEM);
+
+ case SCF_ERROR_NOT_FOUND:
+ internal_pgroup_free(old_dpt_pgroup);
+ goto delprop;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED: /* caught above */
+ case SCF_ERROR_INVALID_ARGUMENT: /* caught above */
+ default:
+ bad_error("fmri_to_entity", serr);
+ }
+
+ r = entity_get_running_pg(target_ent, tissvc, ud_name,
+ ud_pg, ud_iter2, ud_inst, imp_snap, ud_snpl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ internal_pgroup_free(old_dpt_pgroup);
+ return (r);
+
+ case ECANCELED:
+ case ENOENT:
+ internal_pgroup_free(old_dpt_pgroup);
+ goto delprop;
+
+ case EBADF:
+ warn(r_no_lvl, ud_ctarg);
+ internal_pgroup_free(old_dpt_pgroup);
+ return (r);
+
+ case EINVAL:
+ default:
+ bad_error("entity_get_running_pg", r);
+ }
+
+ /* load it */
+ r = load_pg(ud_pg, &current_pg, ud_ctarg, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ internal_pgroup_free(old_dpt_pgroup);
+ goto delprop;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ internal_pgroup_free(old_dpt_pgroup);
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ /* compare property groups */
+ if (!pg_equal(old_dpt_pgroup, current_pg)) {
+ warn(cf_newdpg, ient->sc_fmri, ud_name);
+ internal_pgroup_free(old_dpt_pgroup);
+ internal_pgroup_free(current_pg);
+ return (0);
+ }
+
+ internal_pgroup_free(old_dpt_pgroup);
+ internal_pgroup_free(current_pg);
+
+ if (g_verbose)
+ warn(gettext("%s: Deleting dependent \"%s\".\n"),
+ ient->sc_fmri, ud_name);
+
+ if (entity_get_pg(target_ent, tissvc, ud_name, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_DELETED:
+ internal_pgroup_free(old_dpt_pgroup);
+ goto delprop;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ internal_pgroup_free(old_dpt_pgroup);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+ }
+
+ if (scf_pg_delete(ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_del_perm, ud_name, ient->sc_fmri);
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+
+delprop:
+ if (tx == NULL)
+ return (0);
+
+ ent = scf_entry_create(g_hndl);
+ if (ent == NULL)
+ return (ENOMEM);
+
+ if (scf_transaction_property_delete(tx, ent, ud_name) != 0) {
+ scf_entry_destroy(ent);
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri,
+ "dependents");
+ return (EBUSY);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_property_delete",
+ scf_error());
+ }
+ }
+
+ return (0);
+ }
+
+ new_dpt_pgroup->sc_pgroup_seen = 1;
+
+ /*
+ * Decide whether the dependent has changed in the manifest.
+ */
+ /* Compare the target. */
+ if (scf_property_get_value(prop, ud_val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_get_value", scf_error());
+ }
+ }
+
+ if (scf_value_get_as_string(ud_val, ud_oldtarg, max_scf_value_len + 1) <
+ 0)
+ bad_error("scf_value_get_as_string", scf_error());
+
+ r = fmri_equal(ud_oldtarg, new_dpt_pgroup->sc_pgroup_fmri);
+ switch (r) {
+ case 0:
+ break;
+
+ case 1:
+ /* Compare the dependency pgs. */
+ if (scf_snaplevel_get_pg(snpl, ud_name, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_snaplevel_get_pg", scf_error());
+ }
+ }
+
+ r = load_pg(ud_pg, &old_dpt_pgroup, ient->sc_fmri,
+ snap_lastimport);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (pg_equal(old_dpt_pgroup, new_dpt_pgroup)) {
+ /* no change, leave customizations */
+ internal_pgroup_free(old_dpt_pgroup);
+ return (0);
+ }
+ break;
+
+ case -1:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ case -2:
+ warn(gettext("Dependent \"%s\" has invalid target \"%s\".\n"),
+ ud_name, new_dpt_pgroup->sc_pgroup_fmri);
+ return (EINVAL);
+
+ default:
+ bad_error("fmri_equal", r);
+ }
+
+ /*
+ * The dependent has changed in the manifest. Upgrade the current
+ * properties if they haven't been customized.
+ */
+
+ /*
+ * If new_dpt_pgroup->sc_override, then act as though the property
+ * group hasn't been customized.
+ */
+ if (new_dpt_pgroup->sc_pgroup_override)
+ goto nocust;
+
+ if (!ud_cur_depts_pg_set) {
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+ } else if (scf_pg_get_property(ud_cur_depts_pg, ud_name, ud_prop) !=
+ 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri, "dependents");
+ r = EBUSY;
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+ if (scf_property_get_value(ud_prop, ud_val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ warn(cf_inval, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_property_get_value", scf_error());
+ }
+ }
+
+ ty = scf_value_type(ud_val);
+ assert(ty != SCF_TYPE_INVALID);
+ if (!(ty == SCF_TYPE_FMRI || ty == SCF_TYPE_ASTRING)) {
+ warn(cf_inval, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+ }
+ if (scf_value_get_as_string(ud_val, ud_ctarg, max_scf_value_len + 1) <
+ 0)
+ bad_error("scf_value_get_as_string", scf_error());
+
+ r = fmri_equal(ud_ctarg, ud_oldtarg);
+ if (r == -1) {
+ warn(cf_inval, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+ } else if (r == -2) {
+ warn(li_corrupt, ient->sc_fmri);
+ r = EBADF;
+ goto out;
+ } else if (r == 0) {
+ /*
+ * Target has been changed. Only abort now if it's been
+ * changed to something other than what's in the manifest.
+ */
+ r = fmri_equal(ud_ctarg, new_dpt_pgroup->sc_pgroup_fmri);
+ if (r == -1) {
+ warn(cf_inval, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+ } else if (r == 0) {
+ warn(cf_newtarg, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+ } else if (r != 1) {
+ /* invalid sc_pgroup_fmri caught above */
+ bad_error("fmri_equal", r);
+ }
+
+ /*
+ * Fetch the current dependency pg. If it's what the manifest
+ * says, then no problem.
+ */
+ serr = fmri_to_entity(g_hndl, ud_ctarg, &target_ent, &tissvc);
+ switch (serr) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NOT_FOUND:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_NO_MEMORY:
+ r = ENOMEM;
+ goto out;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("fmri_to_entity", serr);
+ }
+
+ r = entity_get_running_pg(target_ent, tissvc, ud_name,
+ ud_pg, ud_iter2, ud_inst, imp_snap, ud_snpl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto out;
+
+ case ECANCELED:
+ case ENOENT:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case EBADF:
+ warn(r_no_lvl, ud_ctarg);
+ goto out;
+
+ case EINVAL:
+ default:
+ bad_error("entity_get_running_pg", r);
+ }
+
+ r = load_pg(ud_pg, &current_pg, ud_ctarg, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ goto out;
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (!pg_equal(current_pg, new_dpt_pgroup))
+ warn(cf_newdpg, ient->sc_fmri, ud_name);
+ internal_pgroup_free(current_pg);
+ r = 0;
+ goto out;
+ } else if (r != 1) {
+ bad_error("fmri_equal", r);
+ }
+
+nocust:
+ /*
+ * Target has not been customized. Check the dependency property
+ * group.
+ */
+
+ if (old_dpt_pgroup == NULL) {
+ if (scf_snaplevel_get_pg(snpl, new_dpt_pgroup->sc_pgroup_name,
+ ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ warn(li_corrupt, ient->sc_fmri);
+ return (EBADF);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_snaplevel_get_pg", scf_error());
+ }
+ }
+
+ r = load_pg(ud_pg, &old_dpt_pgroup, ient->sc_fmri,
+ snap_lastimport);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+ }
+
+ serr = fmri_to_entity(g_hndl, ud_ctarg, &target_ent, &tissvc);
+ switch (serr) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NOT_FOUND:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_NO_MEMORY:
+ r = ENOMEM;
+ goto out;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("fmri_to_entity", serr);
+ }
+
+ r = entity_get_running_pg(target_ent, tissvc, ud_name, ud_pg,
+ ud_iter2, ud_inst, imp_snap, ud_snpl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto out;
+
+ case ECANCELED:
+ case ENOENT:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case EBADF:
+ warn(r_no_lvl, ud_ctarg);
+ goto out;
+
+ case EINVAL:
+ default:
+ bad_error("entity_get_running_pg", r);
+ }
+
+ r = load_pg(ud_pg, &current_pg, ud_ctarg, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ goto out;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ goto out;
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (!pg_equal(current_pg, old_dpt_pgroup)) {
+ if (!pg_equal(current_pg, new_dpt_pgroup))
+ warn(cf_newdpg, ient->sc_fmri, ud_name);
+ internal_pgroup_free(current_pg);
+ r = 0;
+ goto out;
+ }
+
+ /* Uncustomized. Upgrade. */
+
+ r = fmri_equal(new_dpt_pgroup->sc_pgroup_fmri, ud_oldtarg);
+ switch (r) {
+ case 1:
+ if (pg_equal(current_pg, new_dpt_pgroup)) {
+ /* Already upgraded. */
+ internal_pgroup_free(current_pg);
+ r = 0;
+ goto out;
+ }
+
+ internal_pgroup_free(current_pg);
+
+ /* upgrade current_pg */
+ if (entity_get_pg(target_ent, tissvc, ud_name, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+
+ if (tissvc)
+ r = scf_service_add_pg(target_ent, ud_name,
+ SCF_GROUP_DEPENDENCY, 0, ud_pg);
+ else
+ r = scf_instance_add_pg(target_ent, ud_name,
+ SCF_GROUP_DEPENDENCY, 0, ud_pg);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ warn(cf_missing, ient->sc_fmri,
+ ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_deleted, ud_ctarg,
+ ud_name);
+ r = EPERM;
+ goto out;
+
+ case SCF_ERROR_EXISTS:
+ warn(emsg_pg_added, ud_ctarg, ud_name);
+ r = EBUSY;
+ goto out;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("entity_add_pg", scf_error());
+ }
+ }
+ }
+
+ r = load_pg(ud_pg, &current_pg, ud_ctarg, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ goto out;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ goto out;
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (g_verbose)
+ warn(upgrading, ient->sc_fmri, ud_name);
+
+ r = upgrade_pg(ud_pg, current_pg, old_dpt_pgroup,
+ new_dpt_pgroup, 0, ient->sc_fmri);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(emsg_pg_deleted, ud_ctarg, ud_name);
+ r = EBUSY;
+ goto out;
+
+ case EPERM:
+ warn(emsg_pg_mod_perm, ud_name, ud_ctarg);
+ goto out;
+
+ case EBUSY:
+ warn(emsg_pg_changed, ud_ctarg, ud_name);
+ goto out;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case ENOSPC:
+ case EROFS:
+ case EACCES:
+ case EINVAL:
+ goto out;
+
+ default:
+ bad_error("upgrade_pg", r);
+ }
+ break;
+
+ case 0: {
+ scf_transaction_entry_t *ent;
+ scf_value_t *val;
+
+ internal_pgroup_free(current_pg);
+
+ /* delete old pg */
+ if (g_verbose)
+ warn(upgrading, ient->sc_fmri, ud_name);
+
+ if (entity_get_pg(target_ent, tissvc, ud_name, ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ warn(cf_missing, ient->sc_fmri, ud_name);
+ r = 0;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+ } else if (scf_pg_delete(ud_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_del_perm, ud_name, ient->sc_fmri);
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+
+ /* import new one */
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_trans = NULL; /* handled below */
+
+ r = lscf_dependent_import(new_dpt_pgroup, &cbdata);
+ if (r != UU_WALK_NEXT) {
+ if (r != UU_WALK_ERROR)
+ bad_error("lscf_dependent_import", r);
+
+ r = cbdata.sc_err;
+ goto out;
+ }
+
+ if (tx == NULL)
+ break;
+
+ if ((ent = scf_entry_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL) {
+ if (scf_error() == SCF_ERROR_NO_MEMORY)
+ return (ENOMEM);
+
+ bad_error("scf_entry_create", scf_error());
+ }
+
+ if (scf_transaction_property_change_type(tx, ent, ud_name,
+ SCF_TYPE_FMRI) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri,
+ "dependents");
+ r = EBUSY;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_property_"
+ "change_type", scf_error());
+ }
+
+ if (scf_transaction_property_new(tx, ent, ud_name,
+ SCF_TYPE_FMRI) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ warn(emsg_pg_deleted, ient->sc_fmri,
+ "dependents");
+ r = EBUSY;
+ goto out;
+
+ case SCF_ERROR_EXISTS:
+ warn(emsg_pg_changed, ient->sc_fmri,
+ "dependents");
+ r = EBUSY;
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_transaction_property_"
+ "new", scf_error());
+ }
+ }
+ }
+
+ if (scf_value_set_from_string(val, SCF_TYPE_FMRI,
+ new_dpt_pgroup->sc_pgroup_fmri) != 0)
+ /* invalid sc_pgroup_fmri caught above */
+ bad_error("scf_value_set_from_string",
+ scf_error());
+
+ if (scf_entry_add_value(ent, val) != 0)
+ bad_error("scf_entry_add_value", scf_error());
+ break;
+ }
+
+ case -2:
+ warn(li_corrupt, ient->sc_fmri);
+ internal_pgroup_free(current_pg);
+ r = EBADF;
+ goto out;
+
+ case -1:
+ default:
+ /* invalid sc_pgroup_fmri caught above */
+ bad_error("fmri_equal", r);
+ }
+
+ r = 0;
+
+out:
+ if (old_dpt_pgroup != NULL)
+ internal_pgroup_free(old_dpt_pgroup);
+
+ return (r);
+}
+
+/*
+ * lipg is a property group in the last-import snapshot of ent, which is an
+ * scf_service_t or an scf_instance_t (according to ient). If lipg is not in
+ * ient's pgroups, delete it from ent if it hasn't been customized. If it is
+ * in ents's property groups, compare and upgrade ent appropriately.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - configd is out of resources
+ * EINVAL - ient has invalid dependent (error printed)
+ * - ient has invalid pgroup_t (error printed)
+ * ECANCELED - ent has been deleted
+ * ENODEV - entity containing lipg has been deleted
+ * - entity containing running has been deleted
+ * EPERM - could not delete pg (permission denied) (error printed)
+ * - couldn't upgrade dependents (permission denied) (error printed)
+ * - couldn't import pg (permission denied) (error printed)
+ * - couldn't upgrade pg (permission denied) (error printed)
+ * EROFS - could not delete pg (repository read-only)
+ * - couldn't upgrade dependents (repository read-only)
+ * - couldn't import pg (repository read-only)
+ * - couldn't upgrade pg (repository read-only)
+ * EACCES - could not delete pg (backend access denied)
+ * - couldn't upgrade dependents (backend access denied)
+ * - couldn't import pg (backend access denied)
+ * - couldn't upgrade pg (backend access denied)
+ * EBUSY - property group was added (error printed)
+ * - property group was deleted (error printed)
+ * - property group changed (error printed)
+ * - "dependents" pg was added, changed, or deleted (error printed)
+ * - dependent target deleted (error printed)
+ * - dependent pg changed (error printed)
+ * EBADF - imp_snpl is corrupt (error printed)
+ * - ent has bad pg (error printed)
+ */
+static int
+process_old_pg(const scf_propertygroup_t *lipg, entity_t *ient, void *ent,
+ const scf_snaplevel_t *running)
+{
+ int r;
+ pgroup_t *mpg, *lipg_i, *curpg_i, pgrp;
+ scf_callback_t cbdata;
+
+ const char * const cf_pg_missing =
+ gettext("Conflict upgrading %s (property group %s is missing)\n");
+ const char * const deleting =
+ gettext("%s: Deleting property group \"%s\".\n");
+
+ const int issvc = (ient->sc_etype == SVCCFG_SERVICE_OBJECT);
+
+ /* Skip dependent property groups. */
+ if (scf_pg_get_type(lipg, imp_str, imp_str_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (ENODEV);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_get_type", scf_error());
+ }
+ }
+
+ if (strcmp(imp_str, SCF_GROUP_DEPENDENCY) == 0) {
+ if (scf_pg_get_property(lipg, "external", NULL) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_DELETED:
+ return (ENODEV);
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_get_property", scf_error());
+ }
+ }
+
+ /* lookup pg in new properties */
+ if (scf_pg_get_name(lipg, imp_str, imp_str_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (ENODEV);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_get_name", scf_error());
+ }
+ }
+
+ pgrp.sc_pgroup_name = imp_str;
+ mpg = uu_list_find(ient->sc_pgroups, &pgrp, NULL, NULL);
+
+ if (mpg == NULL || mpg->sc_pgroup_delete) {
+ if (mpg != NULL)
+ mpg->sc_pgroup_seen = 1;
+
+ /* property group was deleted from manifest */
+ if (entity_get_pg(ent, issvc, imp_str, imp_pg2) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ return (0);
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+ }
+
+ if (mpg != NULL && mpg->sc_pgroup_delete) {
+ if (g_verbose)
+ warn(deleting, ient->sc_fmri, imp_str);
+ if (scf_pg_delete(imp_pg2) == 0)
+ return (0);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (0);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_del_perm, imp_str, ient->sc_fmri);
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+
+ r = load_pg(lipg, &lipg_i, ient->sc_fmri, snap_lastimport);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ return (ENODEV);
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ r = load_pg(imp_pg2, &curpg_i, ient->sc_fmri, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ECONNABORTED:
+ case ENOMEM:
+ case EBADF:
+ internal_pgroup_free(lipg_i);
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (pg_equal(lipg_i, curpg_i)) {
+ if (g_verbose)
+ warn(deleting, ient->sc_fmri, imp_str);
+ if (scf_pg_delete(imp_pg2) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ internal_pgroup_free(lipg_i);
+ internal_pgroup_free(curpg_i);
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+ } else {
+ report_pg_diffs(lipg_i, curpg_i, ient->sc_fmri, 0);
+ }
+
+ internal_pgroup_free(lipg_i);
+ internal_pgroup_free(curpg_i);
+
+ return (0);
+ }
+
+ mpg->sc_pgroup_seen = 1;
+
+ if (strcmp(imp_str, "dependents") == 0)
+ return (upgrade_dependents(lipg, imp_snpl, ient, running, ent));
+
+ /*
+ * Only dependent pgs can have override set, and we skipped those
+ * above.
+ */
+ assert(!mpg->sc_pgroup_override);
+
+ /* compare */
+ r = load_pg(lipg, &lipg_i, ient->sc_fmri, snap_lastimport);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ return (ENODEV);
+
+ case ECONNABORTED:
+ case EBADF:
+ case ENOMEM:
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (pg_equal(mpg, lipg_i)) {
+ /* The manifest pg has not changed. Move on. */
+ r = 0;
+ goto out;
+ }
+
+ /* upgrade current properties according to lipg & mpg */
+ if (running != NULL)
+ r = scf_snaplevel_get_pg(running, imp_str, imp_pg2);
+ else
+ r = entity_get_pg(ent, issvc, imp_str, imp_pg2);
+ if (r != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_DELETED:
+ if (running != NULL)
+ r = ENODEV;
+ else
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+
+ warn(cf_pg_missing, ient->sc_fmri, imp_str);
+
+ r = 0;
+ goto out;
+ }
+
+ r = load_pg_attrs(imp_pg2, &curpg_i);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(cf_pg_missing, ient->sc_fmri, imp_str);
+ r = 0;
+ goto out;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ goto out;
+
+ default:
+ bad_error("load_pg_attrs", r);
+ }
+
+ if (!pg_attrs_equal(lipg_i, curpg_i, NULL, 0)) {
+ (void) pg_attrs_equal(curpg_i, mpg, ient->sc_fmri, 0);
+ internal_pgroup_free(curpg_i);
+ r = 0;
+ goto out;
+ }
+
+ internal_pgroup_free(curpg_i);
+
+ r = load_pg(imp_pg2, &curpg_i, ient->sc_fmri, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(cf_pg_missing, ient->sc_fmri, imp_str);
+ r = 0;
+ goto out;
+
+ case ECONNABORTED:
+ case EBADF:
+ case ENOMEM:
+ goto out;
+
+ default:
+ bad_error("load_pg", r);
+ }
+
+ if (pg_equal(lipg_i, curpg_i) &&
+ !pg_attrs_equal(lipg_i, mpg, NULL, 0)) {
+ int do_delete = 1;
+
+ if (g_verbose)
+ warn(gettext("%s: Upgrading property group \"%s\".\n"),
+ ient->sc_fmri, mpg->sc_pgroup_name);
+
+ internal_pgroup_free(curpg_i);
+
+ if (running != NULL &&
+ entity_get_pg(ent, issvc, imp_str, imp_pg2) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ r = ECANCELED;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ do_delete = 0;
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+ }
+
+ if (do_delete && scf_pg_delete(imp_pg2) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(emsg_pg_del_perm, mpg->sc_pgroup_name,
+ ient->sc_fmri);
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_pg_delete", scf_error());
+ }
+ }
+
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_parent = ent;
+ cbdata.sc_service = issvc;
+ cbdata.sc_flags = 0;
+ cbdata.sc_source_fmri = ient->sc_fmri;
+ cbdata.sc_target_fmri = ient->sc_fmri;
+
+ r = entity_pgroup_import(mpg, &cbdata);
+ switch (r) {
+ case UU_WALK_NEXT:
+ r = 0;
+ goto out;
+
+ case UU_WALK_ERROR:
+ if (cbdata.sc_err == EEXIST) {
+ warn(emsg_pg_added, ient->sc_fmri,
+ mpg->sc_pgroup_name);
+ r = EBUSY;
+ } else {
+ r = cbdata.sc_err;
+ }
+ goto out;
+
+ default:
+ bad_error("entity_pgroup_import", r);
+ }
+ }
+
+ if (running != NULL &&
+ entity_get_pg(ent, issvc, imp_str, imp_pg2) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_DELETED:
+ r = scferror2errno(scf_error());
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_parent = ent;
+ cbdata.sc_service = issvc;
+ cbdata.sc_flags = SCI_FORCE;
+ cbdata.sc_source_fmri = ient->sc_fmri;
+ cbdata.sc_target_fmri = ient->sc_fmri;
+
+ r = entity_pgroup_import(mpg, &cbdata);
+ switch (r) {
+ case UU_WALK_NEXT:
+ r = 0;
+ goto out;
+
+ case UU_WALK_ERROR:
+ if (cbdata.sc_err == EEXIST) {
+ warn(emsg_pg_added, ient->sc_fmri,
+ mpg->sc_pgroup_name);
+ r = EBUSY;
+ } else {
+ r = cbdata.sc_err;
+ }
+ goto out;
+
+ default:
+ bad_error("entity_pgroup_import", r);
+ }
+ }
+
+ r = upgrade_pg(imp_pg2, curpg_i, lipg_i, mpg, g_verbose, ient->sc_fmri);
+ internal_pgroup_free(curpg_i);
+ switch (r) {
+ case 0:
+ ient->sc_import_state = IMPORT_PROP_BEGUN;
+ break;
+
+ case ECANCELED:
+ warn(emsg_pg_deleted, ient->sc_fmri, mpg->sc_pgroup_name);
+ r = EBUSY;
+ break;
+
+ case EPERM:
+ warn(emsg_pg_mod_perm, mpg->sc_pgroup_name, ient->sc_fmri);
+ break;
+
+ case EBUSY:
+ warn(emsg_pg_changed, ient->sc_fmri, mpg->sc_pgroup_name);
+ break;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case ENOSPC:
+ case EROFS:
+ case EACCES:
+ case EINVAL:
+ break;
+
+ default:
+ bad_error("upgrade_pg", r);
+ }
+
+out:
+ internal_pgroup_free(lipg_i);
+ return (r);
+}
+
+/*
+ * Upgrade the properties of ent according to snpl & ient.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - configd is out of resources
+ * ECANCELED - ent was deleted
+ * ENODEV - entity containing snpl was deleted
+ * - entity containing running was deleted
+ * EBADF - imp_snpl is corrupt (error printed)
+ * - ent has corrupt pg (error printed)
+ * - dependent has corrupt pg (error printed)
+ * EBUSY - pg was added, changed, or deleted (error printed)
+ * - dependent target was deleted (error printed)
+ * - dependent pg changed (error printed)
+ * EINVAL - invalid property group name (error printed)
+ * - invalid property name (error printed)
+ * - invalid value (error printed)
+ * - ient has invalid pgroup or dependent (error printed)
+ * EPERM - could not create property group (permission denied) (error printed)
+ * - could not modify property group (permission denied) (error printed)
+ * - couldn't delete, upgrade, or import pg or dependent (error printed)
+ * EROFS - could not create property group (repository read-only)
+ * - couldn't delete, upgrade, or import pg or dependent
+ * EACCES - could not create property group (backend access denied)
+ * - couldn't delete, upgrade, or import pg or dependent
+ * EEXIST - dependent collision in target service (error printed)
+ */
+static int
+upgrade_props(void *ent, scf_snaplevel_t *running, scf_snaplevel_t *snpl,
+ entity_t *ient)
+{
+ pgroup_t *pg, *rpg;
+ int r;
+ uu_list_t *pgs = ient->sc_pgroups;
+
+ const int issvc = (ient->sc_etype == SVCCFG_SERVICE_OBJECT);
+
+ /* clear sc_sceen for pgs */
+ if (uu_list_walk(pgs, clear_int,
+ (void *)offsetof(pgroup_t, sc_pgroup_seen), UU_DEFAULT) != 0)
+ bad_error("uu_list_walk", uu_error());
+
+ if (scf_iter_snaplevel_pgs(imp_up_iter, snpl) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (ENODEV);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_iter_snaplevel_pgs", scf_error());
+ }
+ }
+
+ for (;;) {
+ r = scf_iter_next_pg(imp_up_iter, imp_pg);
+ if (r == 0)
+ break;
+ if (r == 1) {
+ r = process_old_pg(imp_pg, ient, ent, running);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case ENOSPC:
+ case ECANCELED:
+ case ENODEV:
+ case EPERM:
+ case EROFS:
+ case EACCES:
+ case EBADF:
+ case EBUSY:
+ case EINVAL:
+ return (r);
+
+ default:
+ bad_error("process_old_pg", r);
+ }
+ continue;
+ }
+ if (r != -1)
+ bad_error("scf_iter_next_pg", r);
+
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return (ENODEV);
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (ECONNABORTED);
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("scf_iter_next_pg", scf_error());
+ }
+ }
+
+ for (pg = uu_list_first(pgs); pg != NULL; pg = uu_list_next(pgs, pg)) {
+ if (pg->sc_pgroup_seen)
+ continue;
+
+ /* pg is new */
+
+ if (strcmp(pg->sc_pgroup_name, "dependents") == 0) {
+ r = upgrade_dependents(NULL, imp_snpl, ient, running,
+ ent);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ case ENOMEM:
+ case ENOSPC:
+ case ECANCELED:
+ case ENODEV:
+ case EBADF:
+ case EBUSY:
+ case EINVAL:
+ case EPERM:
+ case EROFS:
+ case EACCES:
+ case EEXIST:
+ return (r);
+
+ default:
+ bad_error("upgrade_dependents", r);
+ }
+ continue;
+ }
+
+ if (running != NULL)
+ r = scf_snaplevel_get_pg(running, pg->sc_pgroup_name,
+ imp_pg);
+ else
+ r = entity_get_pg(ent, issvc, pg->sc_pgroup_name,
+ imp_pg);
+ if (r != 0) {
+ scf_callback_t cbdata;
+
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_DELETED:
+ if (running != NULL)
+ return (ENODEV);
+ else
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(emsg_fmri_invalid_pg_name, ient->sc_fmri,
+ pg->sc_pgroup_name);
+ return (EINVAL);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("entity_get_pg", scf_error());
+ }
+
+ /* User doesn't have pg, so import it. */
+
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_parent = ent;
+ cbdata.sc_service = issvc;
+ cbdata.sc_flags = SCI_FORCE;
+ cbdata.sc_source_fmri = ient->sc_fmri;
+ cbdata.sc_target_fmri = ient->sc_fmri;
+
+ r = entity_pgroup_import(pg, &cbdata);
+ switch (r) {
+ case UU_WALK_NEXT:
+ ient->sc_import_state = IMPORT_PROP_BEGUN;
+ continue;
+
+ case UU_WALK_ERROR:
+ if (cbdata.sc_err == EEXIST) {
+ warn(emsg_pg_added, ient->sc_fmri,
+ pg->sc_pgroup_name);
+ return (EBUSY);
+ }
+ return (cbdata.sc_err);
+
+ default:
+ bad_error("entity_pgroup_import", r);
+ }
+ }
+
+ /* report differences between pg & current */
+ r = load_pg(imp_pg, &rpg, ient->sc_fmri, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(emsg_pg_deleted, ient->sc_fmri,
+ pg->sc_pgroup_name);
+ return (EBUSY);
+
+ case ECONNABORTED:
+ case EBADF:
+ case ENOMEM:
+ return (r);
+
+ default:
+ bad_error("load_pg", r);
+ }
+ report_pg_diffs(pg, rpg, ient->sc_fmri, 1);
+ internal_pgroup_free(rpg);
+ rpg = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * Create or update a snapshot of inst. Uses imp_snap.
+ *
+ * Returns
+ * 0 - success
+ * ECONNABORTED - repository connection broken
+ * EPERM - permission denied
+ * ENOSPC - configd is out of resources
+ * ECANCELED - inst was deleted
+ * -1 - unknown libscf error (message printed)
+ */
+static int
+take_snap(scf_instance_t *inst, const char *name)
+{
+again:
+ if (scf_instance_get_snapshot(inst, name, imp_snap) == 0) {
+ if (_scf_snapshot_take_attach(inst, imp_snap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_PERMISSION_DENIED:
+ case SCF_ERROR_NO_RESOURCES:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ default:
+ bad_error("_scf_snapshot_take_attach",
+ scf_error());
+ }
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (scferror2errno(scf_error()));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_snapshot", scf_error());
+ }
+
+ if (_scf_snapshot_take_new(inst, name, imp_snap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_EXISTS:
+ goto again;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_PERMISSION_DENIED:
+ return (scferror2errno(scf_error()));
+
+ default:
+ scfwarn();
+ return (-1);
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ bad_error("_scf_snapshot_take_new",
+ scf_error());
+ }
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Import an instance. If it doesn't exist, create it. If it has
+ * a last-import snapshot, upgrade its properties. Finish by updating its
+ * last-import snapshot. If it doesn't have a last-import snapshot then it
+ * could have been created for a dependent tag in another manifest. Import the
+ * new properties. If there's a conflict, don't override, like now?
+ *
+ * On success, returns UU_WALK_NEXT. On error returns UU_WALK_ERROR and sets
+ * lcbdata->sc_err to
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - svc.configd is out of resources
+ * EEXIST - dependency collision in dependent service (error printed)
+ * EPERM - couldn't create temporary instance (permission denied)
+ * - couldn't import into temporary instance (permission denied)
+ * - couldn't take snapshot (permission denied)
+ * - couldn't upgrade properties (permission denied)
+ * - couldn't import properties (permission denied)
+ * - couldn't import dependents (permission denied)
+ * EROFS - couldn't create temporary instance (repository read-only)
+ * - couldn't import into temporary instance (repository read-only)
+ * - couldn't upgrade properties (repository read-only)
+ * - couldn't import properties (repository read-only)
+ * - couldn't import dependents (repository read-only)
+ * EACCES - couldn't create temporary instance (backend access denied)
+ * - couldn't import into temporary instance (backend access denied)
+ * - couldn't upgrade properties (backend access denied)
+ * - couldn't import properties (backend access denied)
+ * - couldn't import dependents (backend access denied)
+ * EINVAL - invalid instance name (error printed)
+ * - invalid pgroup_t's (error printed)
+ * - invalid dependents (error printed)
+ * EBUSY - temporary service deleted (error printed)
+ * - temporary instance deleted (error printed)
+ * - temporary instance changed (error printed)
+ * - temporary instance already exists (error printed)
+ * - instance deleted (error printed)
+ * EBADF - instance has corrupt last-import snapshot (error printed)
+ * - instance is corrupt (error printed)
+ * - dependent has corrupt pg (error printed)
+ * -1 - unknown libscf error (error printed)
+ */
+static int
+lscf_instance_import(void *v, void *pvt)
+{
+ entity_t *inst = v;
+ scf_callback_t ctx;
+ scf_callback_t *lcbdata = pvt;
+ scf_service_t *rsvc = lcbdata->sc_parent;
+ int r;
+ scf_snaplevel_t *running;
+ int flags = lcbdata->sc_flags;
+
+ const char * const emsg_tdel =
+ gettext("Temporary instance svc:/%s:%s was deleted.\n");
+ const char * const emsg_tchg = gettext("Temporary instance svc:/%s:%s "
+ "changed unexpectedly.\n");
+ const char * const emsg_del = gettext("%s changed unexpectedly "
+ "(instance \"%s\" was deleted.\n");
+ const char * const emsg_badsnap = gettext(
+ "\"%s\" snapshot of %s is corrupt (missing a snaplevel).\n");
+
+ /*
+ * prepare last-import snapshot:
+ * create temporary instance (service was precreated)
+ * populate with properties from bundle
+ * take snapshot
+ */
+ if (scf_service_add_instance(imp_tsvc, inst->sc_name, imp_tinst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext("Temporary service svc:/%s "
+ "changed unexpectedly (instance \"%s\" added).\n"),
+ imp_tsname, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_DELETED:
+ warn(gettext("Temporary service svc:/%s "
+ "was deleted unexpectedly.\n"), imp_tsname);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(gettext("Invalid instance name \"%s\".\n"),
+ inst->sc_name);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not create temporary instance "
+ "\"%s\" in svc:/%s (permission denied).\n"),
+ inst->sc_name, imp_tsname);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_add_instance", scf_error());
+ }
+ }
+
+ r = snprintf(imp_str, imp_str_sz, "svc:/%s:%s", imp_tsname,
+ inst->sc_name);
+ if (r < 0)
+ bad_error("snprintf", errno);
+
+ r = lscf_import_instance_pgs(imp_tinst, imp_str, inst,
+ lcbdata->sc_flags | SCI_NOENABLED);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(emsg_tdel, imp_tsname, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case EEXIST:
+ warn(emsg_tchg, imp_tsname, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ENOMEM:
+ case ENOSPC:
+ case EPERM:
+ case EROFS:
+ case EACCES:
+ case EINVAL:
+ case EBUSY:
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("lscf_import_instance_pgs", r);
+ }
+
+ r = snprintf(imp_str, imp_str_sz, "svc:/%s:%s", imp_tsname,
+ inst->sc_name);
+ if (r < 0)
+ bad_error("snprintf", errno);
+
+ ctx.sc_handle = lcbdata->sc_handle;
+ ctx.sc_parent = imp_tinst;
+ ctx.sc_service = 0;
+ ctx.sc_source_fmri = inst->sc_fmri;
+ ctx.sc_target_fmri = imp_str;
+ if (uu_list_walk(inst->sc_dependents, entity_pgroup_import, &ctx,
+ UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ switch (ctx.sc_err) {
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(emsg_tdel, imp_tsname, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ break;
+
+ case EEXIST:
+ warn(emsg_tchg, imp_tsname, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ break;
+
+ default:
+ lcbdata->sc_err = ctx.sc_err;
+ }
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ if (_scf_snapshot_take_new_named(imp_tinst, inst->sc_parent->sc_name,
+ inst->sc_name, snap_lastimport, imp_tlisnap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NO_RESOURCES:
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_EXISTS:
+ warn(emsg_tchg, imp_tsname, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not take \"%s\" snapshot of %s "
+ "(permission denied).\n"), snap_lastimport,
+ imp_str);
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ default:
+ scfwarn();
+ lcbdata->sc_err = -1;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ bad_error("_scf_snapshot_take_new_named", scf_error());
+ }
+ }
+
+ if (lcbdata->sc_flags & SCI_FRESH)
+ goto fresh;
+
+ if (scf_service_get_instance(rsvc, inst->sc_name, imp_inst) == 0) {
+ if (scf_instance_get_snapshot(imp_inst, snap_lastimport,
+ imp_lisnap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ warn(emsg_del, inst->sc_parent->sc_fmri,
+ inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_NOT_FOUND:
+ flags |= SCI_FORCE;
+ goto nosnap;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_snapshot",
+ scf_error());
+ }
+ }
+
+ /* upgrade */
+
+ /*
+ * compare new properties with last-import properties
+ * upgrade current properties
+ */
+ /* clear sc_sceen for pgs */
+ if (uu_list_walk(inst->sc_pgroups, clear_int,
+ (void *)offsetof(pgroup_t, sc_pgroup_seen), UU_DEFAULT) !=
+ 0)
+ bad_error("uu_list_walk", uu_error());
+
+ r = get_snaplevel(imp_lisnap, 0, imp_snpl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(emsg_del, inst->sc_parent->sc_fmri, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ENOENT:
+ warn(emsg_badsnap, snap_lastimport, inst->sc_fmri);
+ lcbdata->sc_err = EBADF;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("get_snaplevel", r);
+ }
+
+ if (scf_instance_get_snapshot(imp_inst, snap_running,
+ imp_rsnap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ warn(emsg_del, inst->sc_parent->sc_fmri,
+ inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_snapshot",
+ scf_error());
+ }
+
+ running = NULL;
+ } else {
+ r = get_snaplevel(imp_rsnap, 0, imp_rsnpl);
+ switch (r) {
+ case 0:
+ running = imp_rsnpl;
+ break;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(emsg_del, inst->sc_parent->sc_fmri,
+ inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ENOENT:
+ warn(emsg_badsnap, snap_running, inst->sc_fmri);
+ lcbdata->sc_err = EBADF;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("get_snaplevel", r);
+ }
+ }
+
+ r = upgrade_props(imp_inst, running, imp_snpl, inst);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ case ENODEV:
+ warn(emsg_del, inst->sc_parent->sc_fmri, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ENOMEM:
+ case ENOSPC:
+ case EBADF:
+ case EBUSY:
+ case EINVAL:
+ case EPERM:
+ case EROFS:
+ case EACCES:
+ case EEXIST:
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("upgrade_props", r);
+ }
+
+ inst->sc_import_state = IMPORT_PROP_DONE;
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT: /* caught above */
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_get_instance", scf_error());
+ }
+
+fresh:
+ /* create instance */
+ if (scf_service_add_instance(rsvc, inst->sc_name,
+ imp_inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext("%s changed unexpectedly "
+ "(instance \"%s\" added).\n"),
+ inst->sc_parent->sc_fmri, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not create \"%s\" instance "
+ "in %s (permission denied).\n"),
+ inst->sc_name, inst->sc_parent->sc_fmri);
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_INVALID_ARGUMENT: /* caught above */
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_add_instance",
+ scf_error());
+ }
+ }
+
+nosnap:
+ /*
+ * Create a last-import snapshot to serve as an attachment
+ * point for the real one from the temporary instance. Since
+ * the contents is irrelevent, take it now, while the instance
+ * is empty, to minimize svc.configd's work.
+ */
+ if (_scf_snapshot_take_new(imp_inst, snap_lastimport,
+ imp_lisnap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NO_RESOURCES:
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext("%s changed unexpectedly "
+ "(snapshot \"%s\" added).\n"),
+ inst->sc_fmri, snap_lastimport);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not take \"%s\" snapshot "
+ "of %s (permission denied).\n"),
+ snap_lastimport, inst->sc_fmri);
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ default:
+ scfwarn();
+ lcbdata->sc_err = -1;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ bad_error("_scf_snapshot_take_new",
+ scf_error());
+ }
+ }
+
+ if (li_only)
+ goto lionly;
+
+ inst->sc_import_state = IMPORT_PROP_BEGUN;
+
+ r = lscf_import_instance_pgs(imp_inst, inst->sc_fmri, inst,
+ flags);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(gettext("%s changed unexpectedly "
+ "(instance \"%s\" deleted).\n"),
+ inst->sc_parent->sc_fmri, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case EEXIST:
+ warn(gettext("%s changed unexpectedly "
+ "(property group added).\n"), inst->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case EINVAL: /* caught above */
+ bad_error("lscf_import_instance_pgs", r);
+ }
+
+ ctx.sc_parent = imp_inst;
+ ctx.sc_service = 0;
+ ctx.sc_trans = NULL;
+ if (uu_list_walk(inst->sc_dependents, lscf_dependent_import,
+ &ctx, UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ if (ctx.sc_err == ECONNABORTED)
+ goto connaborted;
+ lcbdata->sc_err = ctx.sc_err;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ inst->sc_import_state = IMPORT_PROP_DONE;
+
+ if (g_verbose)
+ warn(gettext("Taking \"%s\" snapshot for %s.\n"),
+ snap_initial, inst->sc_fmri);
+ r = take_snap(imp_inst, snap_initial);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ENOSPC:
+ case -1:
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ECANCELED:
+ warn(gettext("%s changed unexpectedly "
+ "(instance %s deleted).\n"),
+ inst->sc_parent->sc_fmri, inst->sc_name);
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case EPERM:
+ warn(emsg_snap_perm, snap_initial, inst->sc_fmri);
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("take_snap", r);
+ }
+ }
+
+lionly:
+ /* transfer snapshot from temporary instance */
+ if (g_verbose)
+ warn(gettext("Taking \"%s\" snapshot for %s.\n"),
+ snap_lastimport, inst->sc_fmri);
+ if (_scf_snapshot_attach(imp_tlisnap, imp_lisnap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NO_RESOURCES:
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not take \"%s\" snapshot for %s "
+ "(permission denied).\n"), snap_lastimport,
+ inst->sc_fmri);
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("_scf_snapshot_attach", scf_error());
+ }
+ }
+
+ inst->sc_import_state = IMPORT_COMPLETE;
+
+ r = UU_WALK_NEXT;
+
+deltemp:
+ /* delete temporary instance */
+ if (scf_instance_delete(imp_tinst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_instance_delete", scf_error());
+ }
+ }
+
+ return (r);
+
+connaborted:
+ warn(gettext("Could not delete svc:/%s:%s "
+ "(repository connection broken).\n"), imp_tsname, inst->sc_name);
+ lcbdata->sc_err = ECONNABORTED;
+ return (UU_WALK_ERROR);
+}
+
+/*
+ * If the service is missing, create it, import its properties, and import the
+ * instances. Since the service is brand new, it should be empty, and if we
+ * run into any existing entities (SCF_ERROR_EXISTS), abort.
+ *
+ * If the service exists, we want to upgrade its properties and import the
+ * instances. Upgrade requires a last-import snapshot, though, which are
+ * children of instances, so first we'll have to go through the instances
+ * looking for a last-import snapshot. If we don't find one then we'll just
+ * override-import the service properties (but don't delete existing
+ * properties: another service might have declared us as a dependent). Before
+ * we change anything, though, we want to take the previous snapshots. We
+ * also give lscf_instance_import() a leg up on taking last-import snapshots
+ * by importing the manifest's service properties into a temporary service.
+ *
+ * On success, returns UU_WALK_NEXT. On failure, returns UU_WALK_ERROR and
+ * sets lcbdata->sc_err to
+ * ECONNABORTED - repository connection broken
+ * ENOMEM - out of memory
+ * ENOSPC - svc.configd is out of resources
+ * EPERM - couldn't create temporary service (error printed)
+ * - couldn't import into temp service (error printed)
+ * - couldn't create service (error printed)
+ * - couldn't import dependent (error printed)
+ * - couldn't take snapshot (error printed)
+ * - couldn't create instance (error printed)
+ * - couldn't create, modify, or delete pg (error printed)
+ * - couldn't create, modify, or delete dependent (error printed)
+ * - couldn't import instance (error printed)
+ * EROFS - couldn't create temporary service (repository read-only)
+ * - couldn't import into temporary service (repository read-only)
+ * - couldn't create service (repository read-only)
+ * - couldn't import dependent (repository read-only)
+ * - couldn't create instance (repository read-only)
+ * - couldn't create, modify, or delete pg or dependent
+ * - couldn't import instance (repository read-only)
+ * EACCES - couldn't create temporary service (backend access denied)
+ * - couldn't import into temporary service (backend access denied)
+ * - couldn't create service (backend access denied)
+ * - couldn't import dependent (backend access denied)
+ * - couldn't create instance (backend access denied)
+ * - couldn't create, modify, or delete pg or dependent
+ * - couldn't import instance (backend access denied)
+ * EINVAL - service name is invalid (error printed)
+ * - service name is too long (error printed)
+ * - s has invalid pgroup (error printed)
+ * - s has invalid dependent (error printed)
+ * - instance name is invalid (error printed)
+ * - instance entity_t is invalid (error printed)
+ * EEXIST - couldn't create temporary service (already exists) (error printed)
+ * - couldn't import dependent (dependency pg already exists) (printed)
+ * - dependency collision in dependent service (error printed)
+ * EBUSY - temporary service deleted (error printed)
+ * - property group added to temporary service (error printed)
+ * - new property group changed or was deleted (error printed)
+ * - service was added unexpectedly (error printed)
+ * - service was deleted unexpectedly (error printed)
+ * - property group added to new service (error printed)
+ * - instance added unexpectedly (error printed)
+ * - instance deleted unexpectedly (error printed)
+ * - dependent service deleted unexpectedly (error printed)
+ * - pg was added, changed, or deleted (error printed)
+ * - dependent pg changed (error printed)
+ * - temporary instance added, changed, or deleted (error printed)
+ * EBADF - a last-import snapshot is corrupt (error printed)
+ * - the service is corrupt (error printed)
+ * - a dependent is corrupt (error printed)
+ * - an instance is corrupt (error printed)
+ * - an instance has a corrupt last-import snapshot (error printed)
+ * -1 - unknown libscf error (error printed)
+ */
+static int
+lscf_service_import(void *v, void *pvt)
+{
+ entity_t *s = v;
+ scf_callback_t cbdata;
+ scf_callback_t *lcbdata = pvt;
+ scf_scope_t *scope = lcbdata->sc_parent;
+ entity_t *inst, linst;
+ int r;
+ int fresh = 0;
+ scf_snaplevel_t *running;
+ int have_ge;
+
+ const char * const ts_deleted = gettext("Temporary service svc:/%s "
+ "was deleted unexpectedly.\n");
+ const char * const ts_pg_added = gettext("Temporary service svc:/%s "
+ "changed unexpectedly (property group added).\n");
+ const char * const s_deleted =
+ gettext("%s was deleted unexpectedly.\n");
+ const char * const i_deleted =
+ gettext("%s changed unexpectedly (instance \"%s\" deleted).\n");
+ const char * const badsnap = gettext("\"%s\" snapshot of svc:/%s:%s "
+ "is corrupt (missing service snaplevel).\n");
+
+ /* Validate the service name */
+ if (scf_scope_get_service(scope, s->sc_name, imp_svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(gettext("\"%s\" is an invalid service name. "
+ "Cannot import.\n"), s->sc_name);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_scope_get_service", scf_error());
+ }
+ }
+
+ /* create temporary service */
+ r = snprintf(imp_tsname, max_scf_name_len + 1, "TEMP/%s", s->sc_name);
+ if (r < 0)
+ bad_error("snprintf", errno);
+ if (r > max_scf_name_len) {
+ warn(gettext(
+ "Service name \"%s\" is too long. Cannot import.\n"),
+ s->sc_name);
+ lcbdata->sc_err = EINVAL;
+ return (UU_WALK_ERROR);
+ }
+
+ if (scf_scope_add_service(imp_scope, imp_tsname, imp_tsvc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext(
+ "Temporary service \"%s\" must be deleted before "
+ "this manifest can be imported.\n"), imp_tsname);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not create temporary service "
+ "\"%s\" (permission denied).\n"), imp_tsname);
+ return (stash_scferror(lcbdata));
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_scope_add_service", scf_error());
+ }
+ }
+
+ r = snprintf(imp_str, imp_str_sz, "svc:/%s", imp_tsname);
+ if (r < 0)
+ bad_error("snprintf", errno);
+
+ cbdata.sc_handle = lcbdata->sc_handle;
+ cbdata.sc_parent = imp_tsvc;
+ cbdata.sc_service = 1;
+ cbdata.sc_source_fmri = s->sc_fmri;
+ cbdata.sc_target_fmri = imp_str;
+
+ if (uu_list_walk(s->sc_pgroups, entity_pgroup_import, &cbdata,
+ UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ lcbdata->sc_err = cbdata.sc_err;
+ switch (cbdata.sc_err) {
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(ts_deleted, imp_tsname);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case EEXIST:
+ warn(ts_pg_added, imp_tsname);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+ }
+
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ if (uu_list_walk(s->sc_dependents, entity_pgroup_import, &cbdata,
+ UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ lcbdata->sc_err = cbdata.sc_err;
+ switch (cbdata.sc_err) {
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(ts_deleted, imp_tsname);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case EEXIST:
+ warn(ts_pg_added, imp_tsname);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+ }
+
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ if (scf_scope_get_service(scope, s->sc_name, imp_svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_scope_get_service", scf_error());
+ }
+
+ if (scf_scope_add_service(scope, s->sc_name, imp_svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NO_RESOURCES:
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext("Scope \"%s\" changed unexpectedly"
+ " (service \"%s\" added).\n"),
+ SCF_SCOPE_LOCAL, s->sc_name);
+ lcbdata->sc_err = EBUSY;
+ goto deltemp;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not create service \"%s\" "
+ "(permission denied).\n"), s->sc_name);
+ goto deltemp;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_scope_add_service", scf_error());
+ }
+ }
+
+ s->sc_import_state = IMPORT_PROP_BEGUN;
+
+ /* import service properties */
+ cbdata.sc_handle = lcbdata->sc_handle;
+ cbdata.sc_parent = imp_svc;
+ cbdata.sc_service = 1;
+ cbdata.sc_flags = lcbdata->sc_flags;
+ cbdata.sc_source_fmri = s->sc_fmri;
+ cbdata.sc_target_fmri = s->sc_fmri;
+
+ if (uu_list_walk(s->sc_pgroups, entity_pgroup_import,
+ &cbdata, UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ lcbdata->sc_err = cbdata.sc_err;
+ switch (cbdata.sc_err) {
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case EEXIST:
+ warn(gettext("%s changed unexpectedly "
+ "(property group added).\n"), s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ return (UU_WALK_ERROR);
+
+ case EINVAL:
+ /* caught above */
+ bad_error("entity_pgroup_import",
+ cbdata.sc_err);
+ }
+
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ cbdata.sc_trans = NULL;
+ if (uu_list_walk(s->sc_dependents, lscf_dependent_import,
+ &cbdata, UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ lcbdata->sc_err = cbdata.sc_err;
+ if (cbdata.sc_err == ECONNABORTED)
+ goto connaborted;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ s->sc_import_state = IMPORT_PROP_DONE;
+
+ /*
+ * This is a new service, so we can't take previous snapshots
+ * or upgrade service properties.
+ */
+ fresh = 1;
+ goto instances;
+ }
+
+ /* Clear sc_seen for the instances. */
+ if (uu_list_walk(s->sc_u.sc_service.sc_service_instances, clear_int,
+ (void *)offsetof(entity_t, sc_seen), UU_DEFAULT) != 0)
+ bad_error("uu_list_walk", uu_error());
+
+ /*
+ * Take previous snapshots for all instances. Even for ones not
+ * mentioned in the bundle, since we might change their service
+ * properties.
+ */
+ if (scf_iter_service_instances(imp_iter, imp_svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_DELETED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_service_instances", scf_error());
+ }
+ }
+
+ for (;;) {
+ r = scf_iter_next_instance(imp_iter, imp_inst);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_next_instance",
+ scf_error());
+ }
+ }
+
+ if (scf_instance_get_name(imp_inst, imp_str, imp_str_sz) < 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_instance_get_name", scf_error());
+ }
+ }
+
+ if (g_verbose)
+ warn(gettext(
+ "Taking \"%s\" snapshot for svc:/%s:%s.\n"),
+ snap_previous, s->sc_name, imp_str);
+
+ r = take_snap(imp_inst, snap_previous);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ continue;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case EPERM:
+ warn(gettext("Could not take \"%s\" snapshot of "
+ "svc:/%s:%s (permission denied).\n"),
+ snap_previous, s->sc_name, imp_str);
+ lcbdata->sc_err = r;
+ return (UU_WALK_ERROR);
+
+ case ENOSPC:
+ case -1:
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("take_snap", r);
+ }
+
+ linst.sc_name = imp_str;
+ inst = uu_list_find(s->sc_u.sc_service.sc_service_instances,
+ &linst, NULL, NULL);
+ if (inst != NULL) {
+ inst->sc_import_state = IMPORT_PREVIOUS;
+ inst->sc_seen = 1;
+ }
+ }
+
+ /*
+ * Create the new instances and take previous snapshots of
+ * them. This is not necessary, but it maximizes data preservation.
+ */
+ for (inst = uu_list_first(s->sc_u.sc_service.sc_service_instances);
+ inst != NULL;
+ inst = uu_list_next(s->sc_u.sc_service.sc_service_instances,
+ inst)) {
+ if (inst->sc_seen)
+ continue;
+
+ if (scf_service_add_instance(imp_svc, inst->sc_name,
+ imp_inst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ case SCF_ERROR_BACKEND_ACCESS:
+ case SCF_ERROR_NO_RESOURCES:
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext("%s changed unexpectedly "
+ "(instance \"%s\" added).\n"), s->sc_fmri,
+ inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ warn(gettext("Service \"%s\" has instance with "
+ "invalid name \"%s\".\n"), s->sc_name,
+ inst->sc_name);
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ warn(gettext("Could not create instance \"%s\" "
+ "in %s (permission denied).\n"),
+ inst->sc_name, s->sc_fmri);
+ r = stash_scferror(lcbdata);
+ goto deltemp;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_service_add_instance",
+ scf_error());
+ }
+ }
+
+ if (g_verbose)
+ warn(gettext("Taking \"%s\" snapshot for "
+ "new service %s.\n"), snap_previous, inst->sc_fmri);
+ r = take_snap(imp_inst, snap_previous);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECANCELED:
+ warn(i_deleted, s->sc_fmri, inst->sc_name);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case EPERM:
+ warn(emsg_snap_perm, snap_previous, inst->sc_fmri);
+ lcbdata->sc_err = r;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case ENOSPC:
+ case -1:
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("take_snap", r);
+ }
+ }
+
+ s->sc_import_state = IMPORT_PREVIOUS;
+
+ /*
+ * Upgrade service properties, if we can find a last-import snapshot.
+ * Any will do because we don't support different service properties
+ * in different manifests, so all snaplevels of the service in all of
+ * the last-import snapshots of the instances should be the same.
+ */
+ if (scf_iter_service_instances(imp_iter, imp_svc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_DELETED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_service_instances", scf_error());
+ }
+ }
+
+ have_ge = 0;
+ li_only = 0;
+
+ for (;;) {
+ r = scf_iter_next_instance(imp_iter, imp_inst);
+ if (r == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_iter_next_instance",
+ scf_error());
+ }
+ }
+
+ if (r == 0) {
+ /*
+ * Didn't find any last-import snapshots. Override-
+ * import the properties. Unless one of the instances
+ * has a general/enabled property, in which case we're
+ * probably running a last-import-capable svccfg for
+ * the first time, and we should only take the
+ * last-import snapshot.
+ */
+ if (have_ge) {
+ li_only = 1;
+ no_refresh = 1;
+ break;
+ }
+
+ s->sc_import_state = IMPORT_PROP_BEGUN;
+
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_parent = imp_svc;
+ cbdata.sc_service = 1;
+ cbdata.sc_flags = SCI_FORCE;
+ cbdata.sc_source_fmri = s->sc_fmri;
+ cbdata.sc_target_fmri = s->sc_fmri;
+ if (uu_list_walk(s->sc_pgroups, entity_pgroup_import,
+ &cbdata, UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+ lcbdata->sc_err = cbdata.sc_err;
+ switch (cbdata.sc_err) {
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ break;
+
+ case EINVAL: /* caught above */
+ case EEXIST:
+ bad_error("entity_pgroup_import",
+ cbdata.sc_err);
+ }
+
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ cbdata.sc_trans = NULL;
+ if (uu_list_walk(s->sc_dependents,
+ lscf_dependent_import, &cbdata, UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+ lcbdata->sc_err = cbdata.sc_err;
+ if (cbdata.sc_err == ECONNABORTED)
+ goto connaborted;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+ break;
+ }
+
+ if (scf_instance_get_snapshot(imp_inst, snap_lastimport,
+ imp_snap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_snapshot",
+ scf_error());
+ }
+
+ if (have_ge)
+ continue;
+
+ /*
+ * Check for a general/enabled property. This is how
+ * we tell whether to import if there turn out to be
+ * no last-import snapshots.
+ */
+ if (scf_instance_get_pg(imp_inst, SCF_PG_GENERAL,
+ imp_pg) == 0) {
+ if (scf_pg_get_property(imp_pg,
+ SCF_PROPERTY_ENABLED, imp_prop) == 0) {
+ have_ge = 1;
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ continue;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_CONNECTION_BROKEN:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_pg_get_property",
+ scf_error());
+ }
+ }
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ continue;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_instance_get_pg",
+ scf_error());
+ }
+ }
+ continue;
+ }
+
+ /* find service snaplevel */
+ r = get_snaplevel(imp_snap, 1, imp_snpl);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ continue;
+
+ case ENOENT:
+ if (scf_instance_get_name(imp_inst, imp_str,
+ imp_str_sz) < 0)
+ (void) strcpy(imp_str, "?");
+ warn(badsnap, snap_lastimport, s->sc_name, imp_str);
+ lcbdata->sc_err = EBADF;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("get_snaplevel", r);
+ }
+
+ if (scf_instance_get_snapshot(imp_inst, snap_running,
+ imp_rsnap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ continue;
+
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_NOT_SET:
+ default:
+ bad_error("scf_instance_get_snapshot",
+ scf_error());
+ }
+ running = NULL;
+ } else {
+ r = get_snaplevel(imp_rsnap, 1, imp_rsnpl);
+ switch (r) {
+ case 0:
+ running = imp_rsnpl;
+ break;
+
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ continue;
+
+ case ENOENT:
+ if (scf_instance_get_name(imp_inst, imp_str,
+ imp_str_sz) < 0)
+ (void) strcpy(imp_str, "?");
+ warn(badsnap, snap_running, s->sc_name,
+ imp_str);
+ lcbdata->sc_err = EBADF;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+
+ default:
+ bad_error("get_snaplevel", r);
+ }
+ }
+
+ if (g_verbose) {
+ if (scf_instance_get_name(imp_inst, imp_str,
+ imp_str_sz) < 0)
+ (void) strcpy(imp_str, "?");
+ warn(gettext("Upgrading properties of %s according to "
+ "instance \"%s\".\n"), s->sc_fmri, imp_str);
+ }
+
+ /* upgrade service properties */
+ r = upgrade_props(imp_svc, running, imp_snpl, s);
+ if (r == 0)
+ break;
+
+ switch (r) {
+ case ECONNABORTED:
+ goto connaborted;
+
+ case ECANCELED:
+ warn(s_deleted, s->sc_fmri);
+ lcbdata->sc_err = EBUSY;
+ break;
+
+ case ENODEV:
+ if (scf_instance_get_name(imp_inst, imp_str,
+ imp_str_sz) < 0)
+ (void) strcpy(imp_str, "?");
+ warn(i_deleted, s->sc_fmri, imp_str);
+ lcbdata->sc_err = EBUSY;
+ break;
+
+ default:
+ lcbdata->sc_err = r;
+ }
+
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ s->sc_import_state = IMPORT_PROP_DONE;
+
+instances:
+ /* import instances */
+ cbdata.sc_handle = lcbdata->sc_handle;
+ cbdata.sc_parent = imp_svc;
+ cbdata.sc_service = 1;
+ cbdata.sc_flags = lcbdata->sc_flags | (fresh ? SCI_FRESH : 0);
+ cbdata.sc_general = NULL;
+
+ if (uu_list_walk(s->sc_u.sc_service.sc_service_instances,
+ lscf_instance_import, &cbdata, UU_DEFAULT) != 0) {
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+ lcbdata->sc_err = cbdata.sc_err;
+ if (cbdata.sc_err == ECONNABORTED)
+ goto connaborted;
+ r = UU_WALK_ERROR;
+ goto deltemp;
+ }
+
+ s->sc_import_state = IMPORT_COMPLETE;
+ r = UU_WALK_NEXT;
+
+deltemp:
+ /* delete temporary service */
+ if (scf_service_delete(imp_tsvc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ break;
+
+ case SCF_ERROR_CONNECTION_BROKEN:
+ goto connaborted;
+
+ case SCF_ERROR_EXISTS:
+ warn(gettext(
+ "Could not delete svc:/%s (instances exist).\n"),
+ imp_tsname);
+ break;
+
+ case SCF_ERROR_NOT_SET:
+ case SCF_ERROR_NOT_BOUND:
+ default:
+ bad_error("scf_service_delete", scf_error());
+ }
+ }
+
+ return (r);
+
+connaborted:
+ warn(gettext("Could not delete svc:/%s "
+ "(repository connection broken).\n"), imp_tsname);
+ lcbdata->sc_err = ECONNABORTED;
+ return (UU_WALK_ERROR);
+}
+
+static const char *
+import_progress(int st)
+{
+ switch (st) {
+ case 0:
+ return (gettext("not reached."));
+
+ case IMPORT_PREVIOUS:
+ return (gettext("previous snapshot taken."));
+
+ case IMPORT_PROP_BEGUN:
+ return (gettext("some properties imported."));
+
+ case IMPORT_PROP_DONE:
+ return (gettext("properties imported."));
+
+ case IMPORT_COMPLETE:
+ return (gettext("imported."));
+
+ case IMPORT_REFRESHED:
+ return (gettext("refresh requested."));
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: Unknown entity state %d.\n",
+ __FILE__, __LINE__, st);
+#endif
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Returns
+ * 0 - success
+ * - fmri wasn't found (error printed)
+ * - entity was deleted (error printed)
+ * - backend denied access (error printed)
+ * ENOMEM - out of memory (error printed)
+ * ECONNABORTED - repository connection broken (error printed)
+ * EPERM - permission denied (error printed)
+ * -1 - unknown libscf error (error printed)
+ */
+static int
+imp_refresh_fmri(const char *fmri, const char *name, const char *d_fmri)
+{
+ scf_error_t serr;
+ void *ent;
+ int issvc;
+ int r;
+
+ const char *deleted = gettext("Could not refresh %s (deleted).\n");
+ const char *dpt_deleted = gettext("Could not refresh %s "
+ "(dependent \"%s\" of %s) (deleted).\n");
+
+ serr = fmri_to_entity(g_hndl, fmri, &ent, &issvc);
+ switch (serr) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NO_MEMORY:
+ if (name == NULL)
+ warn(gettext("Could not refresh %s (out of memory).\n"),
+ fmri);
+ else
+ warn(gettext("Could not refresh %s "
+ "(dependent \"%s\" of %s) (out of memory).\n"),
+ fmri, name, d_fmri);
+ return (ENOMEM);
+
+ case SCF_ERROR_NOT_FOUND:
+ if (name == NULL)
+ warn(deleted, fmri);
+ else
+ warn(dpt_deleted, fmri, name, d_fmri);
+ return (0);
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ default:
+ bad_error("fmri_to_entity", serr);
+ }
+
+ r = refresh_entity(issvc, ent, fmri, imp_inst, imp_iter, imp_str);
+ switch (r) {
+ case 0:
+ break;
+
+ case ECONNABORTED:
+ if (name != NULL)
+ warn(gettext("Could not refresh %s "
+ "(dependent \"%s\" of %s) "
+ "(repository connection broken).\n"), fmri, name,
+ d_fmri);
+ return (r);
+
+ case ECANCELED:
+ if (name == NULL)
+ warn(deleted, fmri);
+ else
+ warn(dpt_deleted, fmri, name, d_fmri);
+ return (0);
+
+ case EACCES:
+ if (!g_verbose)
+ return (0);
+ if (name == NULL)
+ warn(gettext("Could not refresh %s "
+ "(backend access denied).\n"), fmri);
+ else
+ warn(gettext("Could not refresh %s "
+ "(dependent \"%s\" of %s) "
+ "(backend access denied).\n"), fmri, name, d_fmri);
+ return (0);
+
+ case EPERM:
+ if (name == NULL)
+ warn(gettext("Could not refresh %s "
+ "(permission denied).\n"), fmri);
+ else
+ warn(gettext("Could not refresh %s "
+ "(dependent \"%s\" of %s) "
+ "(permission denied).\n"), fmri, name, d_fmri);
+ return (r);
+
+ case -1:
+ scfwarn();
+ return (r);
+
+ default:
+ bad_error("refresh_entity", r);
+ }
+
+ if (issvc)
+ scf_service_destroy(ent);
+ else
+ scf_instance_destroy(ent);
+
+ return (0);
+}
+
+int
+lscf_bundle_import(bundle_t *bndl, const char *filename, uint_t flags)
+{
+ scf_callback_t cbdata;
+ int result = 0;
+ entity_t *svc, *inst;
+ uu_list_t *insts;
+ int r;
+
+ const char * const emsg_nomem = gettext("Out of memory.\n");
+ const char * const emsg_nores =
+ gettext("svc.configd is out of resources.\n");
+
+ lscf_prep_hndl();
+
+ imp_str_sz = ((max_scf_name_len > max_scf_fmri_len) ?
+ max_scf_name_len : max_scf_fmri_len) + 1;
+
+ if ((imp_scope = scf_scope_create(g_hndl)) == NULL ||
+ (imp_svc = scf_service_create(g_hndl)) == NULL ||
+ (imp_tsvc = scf_service_create(g_hndl)) == NULL ||
+ (imp_inst = scf_instance_create(g_hndl)) == NULL ||
+ (imp_tinst = scf_instance_create(g_hndl)) == NULL ||
+ (imp_snap = scf_snapshot_create(g_hndl)) == NULL ||
+ (imp_lisnap = scf_snapshot_create(g_hndl)) == NULL ||
+ (imp_tlisnap = scf_snapshot_create(g_hndl)) == NULL ||
+ (imp_rsnap = scf_snapshot_create(g_hndl)) == NULL ||
+ (imp_snpl = scf_snaplevel_create(g_hndl)) == NULL ||
+ (imp_rsnpl = scf_snaplevel_create(g_hndl)) == NULL ||
+ (imp_pg = scf_pg_create(g_hndl)) == NULL ||
+ (imp_pg2 = scf_pg_create(g_hndl)) == NULL ||
+ (imp_prop = scf_property_create(g_hndl)) == NULL ||
+ (imp_iter = scf_iter_create(g_hndl)) == NULL ||
+ (imp_rpg_iter = scf_iter_create(g_hndl)) == NULL ||
+ (imp_up_iter = scf_iter_create(g_hndl)) == NULL ||
+ (imp_tx = scf_transaction_create(g_hndl)) == NULL ||
+ (imp_tx2 = scf_transaction_create(g_hndl)) == NULL ||
+ (imp_str = malloc(imp_str_sz)) == NULL ||
+ (imp_tsname = malloc(max_scf_name_len + 1)) == NULL ||
+ (imp_fe1 = malloc(max_scf_fmri_len + 1)) == NULL ||
+ (imp_fe2 = malloc(max_scf_fmri_len + 1)) == NULL ||
+ (ud_inst = scf_instance_create(g_hndl)) == NULL ||
+ (ud_snpl = scf_snaplevel_create(g_hndl)) == NULL ||
+ (ud_pg = scf_pg_create(g_hndl)) == NULL ||
+ (ud_cur_depts_pg = scf_pg_create(g_hndl)) == NULL ||
+ (ud_prop = scf_property_create(g_hndl)) == NULL ||
+ (ud_dpt_prop = scf_property_create(g_hndl)) == NULL ||
+ (ud_val = scf_value_create(g_hndl)) == NULL ||
+ (ud_iter = scf_iter_create(g_hndl)) == NULL ||
+ (ud_iter2 = scf_iter_create(g_hndl)) == NULL ||
+ (ud_ctarg = malloc(max_scf_value_len + 1)) == NULL ||
+ (ud_oldtarg = malloc(max_scf_value_len + 1)) == NULL ||
+ (ud_name = malloc(max_scf_name_len + 1)) == NULL) {
+ if (scf_error() == SCF_ERROR_NO_RESOURCES)
+ warn(emsg_nores);
+ else
+ warn(emsg_nomem);
+ result = -1;
+ goto out;
+ }
+
+ r = load_init();
+ switch (r) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ warn(emsg_nomem);
+ result = -1;
+ goto out;
+
+ default:
+ bad_error("load_init", r);
+ }
+
+ if (scf_handle_get_scope(g_hndl, SCF_SCOPE_LOCAL, imp_scope) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_CONNECTION_BROKEN:
+ warn(gettext("Repository connection broken.\n"));
+ repository_teardown();
+ result = -1;
+ goto out;
+
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_NOT_BOUND:
+ case SCF_ERROR_HANDLE_MISMATCH:
+ default:
+ bad_error("scf_handle_get_scope", scf_error());
+ }
+ }
+
+ /*
+ * Clear the sc_import_state's of all services & instances so we can
+ * report how far we got if we fail.
+ */
+ for (svc = uu_list_first(bndl->sc_bundle_services);
+ svc != NULL;
+ svc = uu_list_next(bndl->sc_bundle_services, svc)) {
+ svc->sc_import_state = 0;
+
+ if (uu_list_walk(svc->sc_u.sc_service.sc_service_instances,
+ clear_int, (void *)offsetof(entity_t, sc_import_state),
+ UU_DEFAULT) != 0)
+ bad_error("uu_list_walk", uu_error());
+ }
+
+ cbdata.sc_handle = g_hndl;
+ cbdata.sc_parent = imp_scope;
+ cbdata.sc_flags = flags;
+ cbdata.sc_general = NULL;
+
+ if (uu_list_walk(bndl->sc_bundle_services, lscf_service_import,
+ &cbdata, UU_DEFAULT) == 0) {
+ /* Success. Refresh everything. */
+
+ if (flags & SCI_NOREFRESH || no_refresh) {
+ result = 0;
+ goto out;
+ }
+
+ for (svc = uu_list_first(bndl->sc_bundle_services);
+ svc != NULL;
+ svc = uu_list_next(bndl->sc_bundle_services, svc)) {
+ pgroup_t *dpt;
+
+ insts = svc->sc_u.sc_service.sc_service_instances;
+
+ for (inst = uu_list_first(insts);
+ inst != NULL;
+ inst = uu_list_next(insts, inst)) {
+ r = imp_refresh_fmri(inst->sc_fmri, NULL, NULL);
+ switch (r) {
+ case 0:
+ break;
+
+ case ENOMEM:
+ case ECONNABORTED:
+ case EPERM:
+ case -1:
+ goto progress;
+
+ default:
+ bad_error("imp_refresh_fmri", r);
+ }
+
+ inst->sc_import_state = IMPORT_REFRESHED;
+
+ for (dpt = uu_list_first(inst->sc_dependents);
+ dpt != NULL;
+ dpt = uu_list_next(inst->sc_dependents,
+ dpt))
+ if (imp_refresh_fmri(
+ dpt->sc_pgroup_fmri,
+ dpt->sc_pgroup_name,
+ inst->sc_fmri) != 0)
+ goto progress;
+ }
+
+ for (dpt = uu_list_first(svc->sc_dependents);
+ dpt != NULL;
+ dpt = uu_list_next(svc->sc_dependents, dpt))
+ if (imp_refresh_fmri(dpt->sc_pgroup_fmri,
+ dpt->sc_pgroup_name, svc->sc_fmri) != 0)
+ goto progress;
+ }
+
+ result = 0;
+ goto out;
+ }
+
+ if (uu_error() != UU_ERROR_CALLBACK_FAILED)
+ bad_error("uu_list_walk", uu_error());
+
+printerr:
+ /* If the error hasn't been printed yet, do so here. */
+ switch (cbdata.sc_err) {
+ case ECONNABORTED:
+ warn(gettext("Repository connection broken.\n"));
+ break;
+
+ case ENOMEM:
+ warn(emsg_nomem);
+ break;
+
+ case ENOSPC:
+ warn(emsg_nores);
+ break;
+
+ case EROFS:
+ warn(gettext("Repository is read-only.\n"));
+ break;
+
+ case EACCES:
+ warn(gettext("Repository backend denied access.\n"));
+ break;
+
+ case EPERM:
+ case EINVAL:
+ case EEXIST:
+ case EBUSY:
+ case EBADF:
+ case -1:
+ break;
+
+ default:
+ bad_error("lscf_service_import", cbdata.sc_err);
+ }
+
+progress:
+ warn(gettext("Import of %s failed. Progress:\n"), filename);
+
+ for (svc = uu_list_first(bndl->sc_bundle_services);
+ svc != NULL;
+ svc = uu_list_next(bndl->sc_bundle_services, svc)) {
+ insts = svc->sc_u.sc_service.sc_service_instances;
+
+ warn(gettext(" Service \"%s\": %s\n"), svc->sc_name,
+ import_progress(svc->sc_import_state));
+
+ for (inst = uu_list_first(insts);
+ inst != NULL;
+ inst = uu_list_next(insts, inst))
+ warn(gettext(" Instance \"%s\": %s\n"),
+ inst->sc_name,
+ import_progress(inst->sc_import_state));
+ }
+
+ if (cbdata.sc_err == ECONNABORTED)
+ repository_teardown();
+
+
+ result = -1;
+
+out:
+ load_fini();
+
+ free(ud_ctarg);
+ free(ud_oldtarg);
+ free(ud_name);
+ ud_ctarg = ud_oldtarg = ud_name = NULL;
+
+ scf_iter_destroy(ud_iter);
+ scf_iter_destroy(ud_iter2);
+ ud_iter = ud_iter2 = NULL;
+ scf_value_destroy(ud_val);
+ ud_val = NULL;
+ scf_property_destroy(ud_prop);
+ scf_property_destroy(ud_dpt_prop);
+ ud_prop = ud_dpt_prop = NULL;
+ scf_pg_destroy(ud_pg);
+ scf_pg_destroy(ud_cur_depts_pg);
+ ud_pg = ud_cur_depts_pg = NULL;
+ scf_snaplevel_destroy(ud_snpl);
+ ud_snpl = NULL;
+ scf_instance_destroy(ud_inst);
+ ud_inst = NULL;
+
+ free(imp_str);
+ free(imp_tsname);
+ free(imp_fe1);
+ free(imp_fe2);
+ imp_str = imp_tsname = imp_fe1 = imp_fe2 = NULL;
+
+ scf_transaction_destroy(imp_tx);
+ scf_transaction_destroy(imp_tx2);
+ imp_tx = imp_tx2 = NULL;
+ scf_iter_destroy(imp_iter);
+ scf_iter_destroy(imp_rpg_iter);
+ scf_iter_destroy(imp_up_iter);
+ imp_iter = imp_rpg_iter = imp_up_iter = NULL;
+ scf_property_destroy(imp_prop);
+ imp_prop = NULL;
+ scf_pg_destroy(imp_pg);
+ scf_pg_destroy(imp_pg2);
+ imp_pg = imp_pg2 = NULL;
+ scf_snaplevel_destroy(imp_snpl);
+ scf_snaplevel_destroy(imp_rsnpl);
+ imp_snpl = imp_rsnpl = NULL;
+ scf_snapshot_destroy(imp_snap);
+ scf_snapshot_destroy(imp_lisnap);
+ scf_snapshot_destroy(imp_tlisnap);
+ scf_snapshot_destroy(imp_rsnap);
+ imp_snap = imp_lisnap = imp_tlisnap = imp_rsnap = NULL;
+ scf_instance_destroy(imp_inst);
+ scf_instance_destroy(imp_tinst);
+ imp_inst = imp_tinst = NULL;
+ scf_service_destroy(imp_svc);
+ scf_service_destroy(imp_tsvc);
+ imp_svc = imp_tsvc = NULL;
+ scf_scope_destroy(imp_scope);
+ imp_scope = NULL;
+
+ return (result);
+}
+
+
+/*
+ * Returns
+ * 0 - success
+ * -1 - lscf_import_instance_pgs() failed.
+ */
+int
+lscf_bundle_apply(bundle_t *bndl)
+{
+ entity_t *svc, *inst;
+ scf_scope_t *rscope;
+ scf_service_t *rsvc;
+ scf_instance_t *rinst;
+ int r;
+
+ lscf_prep_hndl();
+
+ if ((rscope = scf_scope_create(g_hndl)) == NULL ||
+ (rsvc = scf_service_create(g_hndl)) == NULL ||
+ (rinst = scf_instance_create(g_hndl)) == NULL ||
+ (imp_pg = scf_pg_create(g_hndl)) == NULL ||
+ (imp_tx = scf_transaction_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_handle_get_scope(g_hndl, SCF_SCOPE_LOCAL, rscope) != 0)
+ scfdie();
+
+ for (svc = uu_list_first(bndl->sc_bundle_services);
+ svc != NULL;
+ svc = uu_list_next(bndl->sc_bundle_services, svc)) {
+ if (scf_scope_get_service(rscope, svc->sc_name, rsvc) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ if (g_verbose)
+ warn(gettext("Ignoring nonexistent "
+ "service %s.\n"), svc->sc_name);
+ continue;
+
+ default:
+ scfdie();
+ }
+ }
+
+ for (inst = uu_list_first(
+ svc->sc_u.sc_service.sc_service_instances);
+ inst != NULL;
+ inst = uu_list_next(
+ svc->sc_u.sc_service.sc_service_instances, inst)) {
+ if (scf_service_get_instance(rsvc, inst->sc_name,
+ rinst) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ if (g_verbose)
+ warn(gettext("Ignoring "
+ "nonexistant instance "
+ "%s:%s.\n"),
+ inst->sc_parent->sc_name,
+ inst->sc_name);
+ continue;
+
+ default:
+ scfdie();
+ }
+ }
+
+ r = lscf_import_instance_pgs(rinst, inst->sc_fmri, inst,
+ SCI_FORCE | SCI_KEEP);
+ switch (r) {
+ case 0:
+ if (g_verbose)
+ warn(gettext("%s updated.\n"),
+ inst->sc_fmri);
+ break;
+
+ case ECONNABORTED:
+ warn(gettext("Could not update %s "
+ "(repository connection broken).\n"),
+ inst->sc_fmri);
+ goto out;
+
+ case ENOMEM:
+ warn(gettext("Could not update %s "
+ "(out of memory).\n"), inst->sc_fmri);
+ goto out;
+
+ case ENOSPC:
+ warn(gettext("Could not update %s "
+ "(repository server out of resources).\n"),
+ inst->sc_fmri);
+ goto out;
+
+ case ECANCELED:
+ warn(gettext(
+ "Could not update %s (deleted).\n"),
+ inst->sc_fmri);
+ break;
+
+ case EPERM:
+ case EINVAL:
+ case EBUSY:
+ break;
+
+ case EROFS:
+ warn(gettext("Could not update %s "
+ "(repository read-only).\n"),
+ inst->sc_fmri);
+ goto out;
+
+ case EACCES:
+ warn(gettext("Could not update %s "
+ "(backend access denied).\n"),
+ inst->sc_fmri);
+ break;
+
+ case EEXIST:
+ default:
+ bad_error("lscf_import_instance_pgs", r);
+ }
+ }
+ }
+
+out:
+ scf_transaction_destroy(imp_tx);
+ imp_tx = NULL;
+ scf_pg_destroy(imp_pg);
+ imp_pg = NULL;
+
+ scf_instance_destroy(rinst);
+ scf_service_destroy(rsvc);
+ scf_scope_destroy(rscope);
+ return (0);
+}
+
+
+/*
+ * Export. These functions create and output an XML tree of a service
+ * description from the repository. This is largely the inverse of
+ * lxml_get_bundle() in svccfg_xml.c, but with some kickers:
+ *
+ * - We must include any properties which are not represented specifically by
+ * a service manifest, e.g., properties created by an admin post-import. To
+ * do so we'll iterate through all properties and deal with each
+ * apropriately.
+ *
+ * - Children of services and instances must must be in the order set by the
+ * DTD, but we iterate over the properties in undefined order. The elements
+ * are not easily (or efficiently) sortable by name. Since there's a fixed
+ * number of classes of them, however, we'll keep the classes separate and
+ * assemble them in order.
+ */
+
+/*
+ * Convenience function to handle xmlSetProp errors (and type casting).
+ */
+static void
+safe_setprop(xmlNodePtr n, const char *name, const char *val)
+{
+ if (xmlSetProp(n, (const xmlChar *)name, (const xmlChar *)val) == NULL)
+ uu_die(gettext("Could not set XML property.\n"));
+}
+
+/*
+ * Convenience function to set an XML attribute to the single value of an
+ * astring property. If the value happens to be the default, don't set the
+ * attribute. "dval" should be the default value supplied by the DTD, or
+ * NULL for no default.
+ */
+static int
+set_attr_from_prop_default(scf_property_t *prop, xmlNodePtr n,
+ const char *name, const char *dval)
+{
+ scf_value_t *val;
+ ssize_t len;
+ char *str;
+
+ val = scf_value_create(g_hndl);
+ if (val == NULL)
+ scfdie();
+
+ if (prop_get_val(prop, val) != 0) {
+ scf_value_destroy(val);
+ return (-1);
+ }
+
+ len = scf_value_get_as_string(val, NULL, 0);
+ if (len < 0)
+ scfdie();
+
+ str = safe_malloc(len + 1);
+
+ if (scf_value_get_as_string(val, str, len + 1) < 0)
+ scfdie();
+
+ scf_value_destroy(val);
+
+ if (dval == NULL || strcmp(str, dval) != 0)
+ safe_setprop(n, name, str);
+
+ free(str);
+
+ return (0);
+}
+
+/*
+ * As above, but the attribute is always set.
+ */
+static int
+set_attr_from_prop(scf_property_t *prop, xmlNodePtr n, const char *name)
+{
+ return (set_attr_from_prop_default(prop, n, name, NULL));
+}
+
+/*
+ * Dump the given document onto f, with "'s replaced by ''s.
+ */
+static int
+write_service_bundle(xmlDocPtr doc, FILE *f)
+{
+ xmlChar *mem;
+ int sz, i;
+
+ mem = NULL;
+ xmlDocDumpFormatMemory(doc, &mem, &sz, 1);
+
+ if (mem == NULL) {
+ semerr(gettext("Could not dump XML tree.\n"));
+ return (-1);
+ }
+
+ /*
+ * Fortunately libxml produces &quot; instead of ", so we can blindly
+ * replace all " with '. Cursed libxml2! Why must you #ifdef out the
+ * &apos; code?!
+ */
+ for (i = 0; i < sz; ++i) {
+ char c = (char)mem[i];
+
+ if (c == '"')
+ (void) fputc('\'', f);
+ else if (c == '\'')
+ (void) fwrite("&apos;", sizeof ("&apos;") - 1, 1, f);
+ else
+ (void) fputc(c, f);
+ }
+
+ return (0);
+}
+
+/*
+ * Create the DOM elements in elts necessary to (generically) represent prop
+ * (i.e., a property or propval element). If the name of the property is
+ * known, it should be passed as name_arg. Otherwise, pass NULL.
+ */
+static void
+export_property(scf_property_t *prop, const char *name_arg,
+ struct pg_elts *elts)
+{
+ const char *type;
+ scf_error_t err;
+ xmlNodePtr pnode, lnode;
+ char *lnname;
+ int ret;
+
+ /* name */
+ if (name_arg != NULL) {
+ (void) strcpy(exp_str, name_arg);
+ } else {
+ if (scf_property_get_name(prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+ }
+
+ /* type */
+ type = prop_to_typestr(prop);
+ if (type == NULL)
+ uu_die(gettext("Can't export property %s: unknown type.\n"),
+ exp_str);
+
+ /* Is there a single value? */
+ if (scf_property_get_value(prop, exp_val) == SCF_SUCCESS) {
+ xmlNodePtr n;
+
+ /* Single value, so use propval */
+ n = xmlNewNode(NULL, (xmlChar *)"propval");
+ if (n == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(n, name_attr, exp_str);
+ safe_setprop(n, type_attr, type);
+
+ if (scf_value_get_as_string(exp_val, exp_str, exp_str_sz) < 0)
+ scfdie();
+ safe_setprop(n, value_attr, exp_str);
+
+ if (elts->propvals == NULL)
+ elts->propvals = n;
+ else
+ (void) xmlAddSibling(elts->propvals, n);
+
+ return;
+ }
+
+ err = scf_error();
+ if (err != SCF_ERROR_CONSTRAINT_VIOLATED && err != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ /* Multiple (or no) values, so use property */
+ pnode = xmlNewNode(NULL, (xmlChar *)"property");
+ if (pnode == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(pnode, name_attr, exp_str);
+ safe_setprop(pnode, type_attr, type);
+
+ if (err == SCF_ERROR_CONSTRAINT_VIOLATED) {
+ lnname = uu_msprintf("%s_list", type);
+ if (lnname == NULL)
+ uu_die(gettext("Could not create string"));
+
+ lnode = xmlNewChild(pnode, NULL, (xmlChar *)lnname, NULL);
+ if (lnode == NULL)
+ uu_die(emsg_create_xml);
+
+ uu_free(lnname);
+
+ if (scf_iter_property_values(exp_val_iter, prop) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_value(exp_val_iter, exp_val)) ==
+ 1) {
+ xmlNodePtr vn;
+
+ vn = xmlNewChild(lnode, NULL, (xmlChar *)"value_node",
+ NULL);
+ if (vn == NULL)
+ uu_die(emsg_create_xml);
+
+ if (scf_value_get_as_string(exp_val, exp_str,
+ exp_str_sz) < 0)
+ scfdie();
+ safe_setprop(vn, value_attr, exp_str);
+ }
+ if (ret != 0)
+ scfdie();
+ }
+
+ if (elts->properties == NULL)
+ elts->properties = pnode;
+ else
+ (void) xmlAddSibling(elts->properties, pnode);
+}
+
+/*
+ * Add a property_group element for this property group to elts.
+ */
+static void
+export_pg(scf_propertygroup_t *pg, struct entity_elts *eelts)
+{
+ xmlNodePtr n;
+ struct pg_elts elts;
+ int ret;
+
+ n = xmlNewNode(NULL, (xmlChar *)"property_group");
+
+ /* name */
+ if (scf_pg_get_name(pg, exp_str, max_scf_name_len + 1) < 0)
+ scfdie();
+ safe_setprop(n, name_attr, exp_str);
+
+ /* type */
+ if (scf_pg_get_type(pg, exp_str, exp_str_sz) < 0)
+ scfdie();
+ safe_setprop(n, type_attr, exp_str);
+
+ /* properties */
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ (void) memset(&elts, 0, sizeof (elts));
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_PROPERTY_STABILITY) == 0) {
+ xmlNodePtr m;
+
+ m = xmlNewNode(NULL, (xmlChar *)"stability");
+ if (m == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, m, value_attr) == 0) {
+ elts.stability = m;
+ continue;
+ }
+
+ xmlFreeNode(m);
+ }
+
+ export_property(exp_prop, NULL, &elts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ (void) xmlAddChild(n, elts.stability);
+ (void) xmlAddChildList(n, elts.propvals);
+ (void) xmlAddChildList(n, elts.properties);
+
+ if (eelts->property_groups == NULL)
+ eelts->property_groups = n;
+ else
+ (void) xmlAddSibling(eelts->property_groups, n);
+}
+
+/*
+ * Create an XML node representing the dependency described by the given
+ * property group and put it in eelts. Unless the dependency is not valid, in
+ * which case create a generic property_group element which represents it and
+ * put it in eelts.
+ */
+static void
+export_dependency(scf_propertygroup_t *pg, struct entity_elts *eelts)
+{
+ xmlNodePtr n;
+ int err = 0, ret;
+ struct pg_elts elts;
+
+ n = xmlNewNode(NULL, (xmlChar *)"dependency");
+ if (n == NULL)
+ uu_die(emsg_create_xml);
+
+ /*
+ * If the external flag is present, skip this dependency because it
+ * should have been created by another manifest.
+ */
+ if (scf_pg_get_property(pg, scf_property_external, exp_prop) == 0) {
+ if (prop_check_type(exp_prop, SCF_TYPE_BOOLEAN) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ uint8_t b;
+
+ if (scf_value_get_boolean(exp_val, &b) != SCF_SUCCESS)
+ scfdie();
+
+ if (b)
+ return;
+ }
+ } else if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ /* Get the required attributes. */
+
+ /* name */
+ if (scf_pg_get_name(pg, exp_str, max_scf_name_len + 1) < 0)
+ scfdie();
+ safe_setprop(n, name_attr, exp_str);
+
+ /* grouping */
+ if (pg_get_prop(pg, SCF_PROPERTY_GROUPING, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, "grouping") != 0)
+ err = 1;
+
+ /* restart_on */
+ if (pg_get_prop(pg, SCF_PROPERTY_RESTART_ON, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, "restart_on") != 0)
+ err = 1;
+
+ /* type */
+ if (pg_get_prop(pg, SCF_PROPERTY_TYPE, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, type_attr) != 0)
+ err = 1;
+
+ /*
+ * entities: Not required, but if we create no children, it will be
+ * created as empty on import, so fail if it's missing.
+ */
+ if (pg_get_prop(pg, SCF_PROPERTY_ENTITIES, exp_prop) == 0 &&
+ prop_check_type(exp_prop, SCF_TYPE_FMRI) == 0) {
+ scf_iter_t *eiter;
+ int ret2;
+
+ eiter = scf_iter_create(g_hndl);
+ if (eiter == NULL)
+ scfdie();
+
+ if (scf_iter_property_values(eiter, exp_prop) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret2 = scf_iter_next_value(eiter, exp_val)) == 1) {
+ xmlNodePtr ch;
+
+ if (scf_value_get_astring(exp_val, exp_str,
+ exp_str_sz) < 0)
+ scfdie();
+
+ /*
+ * service_fmri's must be first, so we can add them
+ * here.
+ */
+ ch = xmlNewChild(n, NULL, (xmlChar *)"service_fmri",
+ NULL);
+ if (ch == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(ch, value_attr, exp_str);
+ }
+ if (ret2 == -1)
+ scfdie();
+
+ scf_iter_destroy(eiter);
+ } else
+ err = 1;
+
+ if (err) {
+ xmlFreeNode(n);
+
+ export_pg(pg, eelts);
+
+ return;
+ }
+
+ /* Iterate through the properties & handle each. */
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ (void) memset(&elts, 0, sizeof (elts));
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_PROPERTY_GROUPING) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_RESTART_ON) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_TYPE) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_ENTITIES) == 0) {
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_STABILITY) == 0) {
+ xmlNodePtr m;
+
+ m = xmlNewNode(NULL, (xmlChar *)"stability");
+ if (m == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, m, value_attr) == 0) {
+ elts.stability = m;
+ continue;
+ }
+
+ xmlFreeNode(m);
+ }
+
+ export_property(exp_prop, exp_str, &elts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ (void) xmlAddChild(n, elts.stability);
+ (void) xmlAddChildList(n, elts.propvals);
+ (void) xmlAddChildList(n, elts.properties);
+
+ if (eelts->dependencies == NULL)
+ eelts->dependencies = n;
+ else
+ (void) xmlAddSibling(eelts->dependencies, n);
+}
+
+static xmlNodePtr
+export_method_environment(scf_propertygroup_t *pg)
+{
+ xmlNodePtr env;
+ int ret;
+ int children = 0;
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_ENVIRONMENT, NULL) != 0)
+ return (NULL);
+
+ env = xmlNewNode(NULL, (xmlChar *)"method_environment");
+ if (env == NULL)
+ uu_die(emsg_create_xml);
+
+ if (pg_get_prop(pg, SCF_PROPERTY_ENVIRONMENT, exp_prop) != 0)
+ scfdie();
+
+ if (scf_iter_property_values(exp_val_iter, exp_prop) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_value(exp_val_iter, exp_val)) == 1) {
+ xmlNodePtr ev;
+ char *cp;
+
+ if (scf_value_get_as_string(exp_val, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if ((cp = strchr(exp_str, '=')) == NULL || cp == exp_str) {
+ warn(gettext("Invalid environment variable \"%s\".\n"),
+ exp_str);
+ continue;
+ } else if (strncmp(exp_str, "SMF_", 4) == 0) {
+ warn(gettext("Invalid environment variable \"%s\"; "
+ "\"SMF_\" prefix is reserved.\n"), exp_str);
+ continue;
+ }
+
+ *cp = '\0';
+ cp++;
+
+ ev = xmlNewChild(env, NULL, (xmlChar *)"envvar", NULL);
+ if (ev == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(ev, name_attr, exp_str);
+ safe_setprop(ev, value_attr, cp);
+ children++;
+ }
+
+ if (ret != 0)
+ scfdie();
+
+ if (children == 0) {
+ xmlFreeNode(env);
+ return (NULL);
+ }
+
+ return (env);
+}
+
+/*
+ * As above, but for a method property group.
+ */
+static void
+export_method(scf_propertygroup_t *pg, struct entity_elts *eelts)
+{
+ xmlNodePtr n, env;
+ char *str;
+ int err = 0, nonenv, ret;
+ uint8_t use_profile;
+ struct pg_elts elts;
+ xmlNodePtr ctxt;
+
+ n = xmlNewNode(NULL, (xmlChar *)"exec_method");
+
+ /* Get the required attributes. */
+
+ /* name */
+ if (scf_pg_get_name(pg, exp_str, max_scf_name_len + 1) < 0)
+ scfdie();
+ safe_setprop(n, name_attr, exp_str);
+
+ /* type */
+ if (pg_get_prop(pg, SCF_PROPERTY_TYPE, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, type_attr) != 0)
+ err = 1;
+
+ /* exec */
+ if (pg_get_prop(pg, SCF_PROPERTY_EXEC, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, "exec") != 0)
+ err = 1;
+
+ /* timeout */
+ if (pg_get_prop(pg, SCF_PROPERTY_TIMEOUT, exp_prop) == 0 &&
+ prop_check_type(exp_prop, SCF_TYPE_COUNT) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ uint64_t c;
+
+ if (scf_value_get_count(exp_val, &c) != SCF_SUCCESS)
+ scfdie();
+
+ str = uu_msprintf("%llu", c);
+ if (str == NULL)
+ uu_die(gettext("Could not create string"));
+
+ safe_setprop(n, "timeout_seconds", str);
+ free(str);
+ } else
+ err = 1;
+
+ if (err) {
+ xmlFreeNode(n);
+
+ export_pg(pg, eelts);
+
+ return;
+ }
+
+ ctxt = xmlNewNode(NULL, (xmlChar *)"method_context");
+ if (ctxt == NULL)
+ uu_die(emsg_create_xml);
+
+ /*
+ * If we're going to have a method_context child, we need to know
+ * before we iterate through the properties. Since method_context's
+ * are optional, we don't want to complain about any properties
+ * missing if none of them are there. Thus we can't use the
+ * convenience functions.
+ */
+ nonenv =
+ scf_pg_get_property(pg, SCF_PROPERTY_WORKING_DIRECTORY, NULL) ==
+ SCF_SUCCESS ||
+ scf_pg_get_property(pg, SCF_PROPERTY_PROJECT, NULL) ==
+ SCF_SUCCESS ||
+ scf_pg_get_property(pg, SCF_PROPERTY_RESOURCE_POOL, NULL) ==
+ SCF_SUCCESS ||
+ scf_pg_get_property(pg, SCF_PROPERTY_USE_PROFILE, NULL) ==
+ SCF_SUCCESS;
+
+ if (nonenv) {
+ /*
+ * We only want to complain about profile or credential
+ * properties if we will use them. To determine that we must
+ * examine USE_PROFILE.
+ */
+ if (pg_get_prop(pg, SCF_PROPERTY_USE_PROFILE, exp_prop) == 0 &&
+ prop_check_type(exp_prop, SCF_TYPE_BOOLEAN) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ if (scf_value_get_boolean(exp_val, &use_profile) !=
+ SCF_SUCCESS)
+ scfdie();
+ } else
+ /*
+ * USE_PROFILE is misconfigured. Since we should have
+ * complained just now, we don't want to complain
+ * about any of the other properties, so don't look
+ * for them.
+ */
+ nonenv = 0;
+ }
+
+ if (nonenv) {
+
+ if (pg_get_prop(pg, SCF_PROPERTY_WORKING_DIRECTORY, exp_prop) !=
+ 0 ||
+ set_attr_from_prop_default(exp_prop, ctxt,
+ "working_directory", ":default") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_PROJECT, exp_prop) != 0 ||
+ set_attr_from_prop_default(exp_prop, ctxt, "project",
+ ":default") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_RESOURCE_POOL, exp_prop) !=
+ 0 ||
+ set_attr_from_prop_default(exp_prop, ctxt,
+ "resource_pool", ":default") != 0)
+ err = 1;
+
+ if (use_profile) {
+ xmlNodePtr prof;
+
+ prof = xmlNewChild(ctxt, NULL,
+ (xmlChar *)"method_profile", NULL);
+ if (prof == NULL)
+ uu_die(emsg_create_xml);
+
+ if (pg_get_prop(pg, SCF_PROPERTY_PROFILE, exp_prop) !=
+ 0 || set_attr_from_prop(exp_prop, prof,
+ name_attr) != 0)
+ err = 1;
+ } else {
+ xmlNodePtr cred;
+
+ cred = xmlNewChild(ctxt, NULL,
+ (xmlChar *)"method_credential", NULL);
+ if (cred == NULL)
+ uu_die(emsg_create_xml);
+
+ if (pg_get_prop(pg, SCF_PROPERTY_USER, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, cred, "user") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_GROUP, exp_prop) !=
+ 0 ||
+ set_attr_from_prop_default(exp_prop, cred,
+ "group", ":default") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_SUPP_GROUPS,
+ exp_prop) != 0 ||
+ set_attr_from_prop_default(exp_prop, cred,
+ "supp_groups", ":default") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_PRIVILEGES,
+ exp_prop) != 0 ||
+ set_attr_from_prop_default(exp_prop, cred,
+ "privileges", ":default") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_LIMIT_PRIVILEGES,
+ exp_prop) != 0 ||
+ set_attr_from_prop_default(exp_prop, cred,
+ "limit_privileges", ":default") != 0)
+ err = 1;
+ }
+ }
+
+ if ((env = export_method_environment(pg)) != NULL)
+ (void) xmlAddChild(ctxt, env);
+
+ if (env != NULL || err == 0)
+ (void) xmlAddChild(n, ctxt);
+ else
+ xmlFreeNode(ctxt);
+
+ nonenv = (err == 0);
+
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ (void) memset(&elts, 0, sizeof (elts));
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_PROPERTY_TYPE) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_EXEC) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_TIMEOUT) == 0) {
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_STABILITY) == 0) {
+ xmlNodePtr m;
+
+ m = xmlNewNode(NULL, (xmlChar *)"stability");
+ if (m == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, m, value_attr) == 0) {
+ elts.stability = m;
+ continue;
+ }
+
+ xmlFreeNode(m);
+ } else if (strcmp(exp_str, SCF_PROPERTY_WORKING_DIRECTORY) ==
+ 0 ||
+ strcmp(exp_str, SCF_PROPERTY_PROJECT) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_RESOURCE_POOL) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_USE_PROFILE) == 0) {
+ if (nonenv)
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_USER) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_GROUP) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_SUPP_GROUPS) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_PRIVILEGES) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_LIMIT_PRIVILEGES) == 0) {
+ if (nonenv && !use_profile)
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_PROFILE) == 0) {
+ if (nonenv && use_profile)
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_ENVIRONMENT) == 0) {
+ if (env != NULL)
+ continue;
+ }
+
+ export_property(exp_prop, exp_str, &elts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ (void) xmlAddChild(n, elts.stability);
+ (void) xmlAddChildList(n, elts.propvals);
+ (void) xmlAddChildList(n, elts.properties);
+
+ if (eelts->exec_methods == NULL)
+ eelts->exec_methods = n;
+ else
+ (void) xmlAddSibling(eelts->exec_methods, n);
+}
+
+static void
+export_pg_elts(struct pg_elts *elts, const char *name, const char *type,
+ struct entity_elts *eelts)
+{
+ xmlNodePtr pgnode;
+
+ pgnode = xmlNewNode(NULL, (xmlChar *)"property_group");
+ if (pgnode == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(pgnode, name_attr, name);
+ safe_setprop(pgnode, type_attr, type);
+
+ (void) xmlAddChildList(pgnode, elts->propvals);
+ (void) xmlAddChildList(pgnode, elts->properties);
+
+ if (eelts->property_groups == NULL)
+ eelts->property_groups = pgnode;
+ else
+ (void) xmlAddSibling(eelts->property_groups, pgnode);
+}
+
+/*
+ * Process the general property group for a service. This is the one with the
+ * goodies.
+ */
+static void
+export_svc_general(scf_propertygroup_t *pg, struct entity_elts *selts)
+{
+ struct pg_elts elts;
+ int ret;
+
+ /*
+ * In case there are properties which don't correspond to child
+ * entities of the service entity, we'll set up a pg_elts structure to
+ * put them in.
+ */
+ (void) memset(&elts, 0, sizeof (elts));
+
+ /* Walk the properties, looking for special ones. */
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_PROPERTY_SINGLE_INSTANCE) == 0) {
+ if (prop_check_type(exp_prop, SCF_TYPE_BOOLEAN) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ uint8_t b;
+
+ if (scf_value_get_boolean(exp_val, &b) !=
+ SCF_SUCCESS)
+ scfdie();
+
+ if (b) {
+ selts->single_instance =
+ xmlNewNode(NULL,
+ (xmlChar *)"single_instance");
+ if (selts->single_instance == NULL)
+ uu_die(emsg_create_xml);
+ }
+
+ continue;
+ }
+ } else if (strcmp(exp_str, SCF_PROPERTY_RESTARTER) == 0) {
+ xmlNodePtr rnode, sfnode;
+
+ rnode = xmlNewNode(NULL, (xmlChar *)"restarter");
+ if (rnode == NULL)
+ uu_die(emsg_create_xml);
+
+ sfnode = xmlNewChild(rnode, NULL,
+ (xmlChar *)"service_fmri", NULL);
+ if (sfnode == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, sfnode,
+ value_attr) == 0) {
+ selts->restarter = rnode;
+ continue;
+ }
+
+ xmlFreeNode(rnode);
+ } else if (strcmp(exp_str, SCF_PROPERTY_ENTITY_STABILITY) ==
+ 0) {
+ xmlNodePtr s;
+
+ s = xmlNewNode(NULL, (xmlChar *)"stability");
+ if (s == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, s, value_attr) == 0) {
+ selts->stability = s;
+ continue;
+ }
+
+ xmlFreeNode(s);
+ }
+
+ export_property(exp_prop, exp_str, &elts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ if (elts.propvals != NULL || elts.properties != NULL)
+ export_pg_elts(&elts, scf_pg_general, scf_group_framework,
+ selts);
+}
+
+static void
+export_method_context(scf_propertygroup_t *pg, struct entity_elts *elts)
+{
+ xmlNodePtr n, prof, cred, env;
+ uint8_t use_profile;
+ int ret, err = 0;
+
+ n = xmlNewNode(NULL, (xmlChar *)"method_context");
+
+ env = export_method_environment(pg);
+
+ /* Need to know whether we'll use a profile or not. */
+ if (pg_get_prop(pg, SCF_PROPERTY_USE_PROFILE, exp_prop) != 0 ||
+ prop_check_type(exp_prop, SCF_TYPE_BOOLEAN) != 0 ||
+ prop_get_val(exp_prop, exp_val) != 0) {
+ if (env != NULL) {
+ (void) xmlAddChild(n, env);
+ elts->method_context = n;
+ } else {
+ xmlFreeNode(n);
+ export_pg(pg, elts);
+ }
+ return;
+ }
+
+ if (scf_value_get_boolean(exp_val, &use_profile) != SCF_SUCCESS)
+ scfdie();
+
+ if (use_profile)
+ prof = xmlNewChild(n, NULL, (xmlChar *)"method_profile", NULL);
+ else
+ cred =
+ xmlNewChild(n, NULL, (xmlChar *)"method_credential", NULL);
+
+ if (env != NULL)
+ (void) xmlAddChild(n, env);
+
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_PROPERTY_WORKING_DIRECTORY) == 0) {
+ if (set_attr_from_prop(exp_prop, n,
+ "working_directory") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_PROJECT) == 0) {
+ if (set_attr_from_prop(exp_prop, n, "project") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_RESOURCE_POOL) == 0) {
+ if (set_attr_from_prop(exp_prop, n,
+ "resource_pool") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_USE_PROFILE) == 0) {
+ /* EMPTY */
+ } else if (strcmp(exp_str, SCF_PROPERTY_USER) == 0) {
+ if (use_profile ||
+ set_attr_from_prop(exp_prop, cred, "user") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_GROUP) == 0) {
+ if (use_profile ||
+ set_attr_from_prop(exp_prop, cred, "group") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_SUPP_GROUPS) == 0) {
+ if (use_profile || set_attr_from_prop(exp_prop, cred,
+ "supp_groups") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_PRIVILEGES) == 0) {
+ if (use_profile || set_attr_from_prop(exp_prop, cred,
+ "privileges") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_LIMIT_PRIVILEGES) ==
+ 0) {
+ if (use_profile || set_attr_from_prop(exp_prop, cred,
+ "limit_privileges") != 0)
+ err = 1;
+ } else if (strcmp(exp_str, SCF_PROPERTY_PROFILE) == 0) {
+ if (!use_profile || set_attr_from_prop(exp_prop,
+ prof, name_attr) != 0)
+ err = 1;
+ } else {
+ /* Can't have generic properties in method_context's */
+ err = 1;
+ }
+ }
+ if (ret == -1)
+ scfdie();
+
+ if (err && env == NULL) {
+ xmlFreeNode(n);
+ export_pg(pg, elts);
+ return;
+ }
+
+ elts->method_context = n;
+}
+
+/*
+ * Given a dependency property group in the tfmri entity (target fmri), return
+ * a dependent element which represents it.
+ */
+static xmlNodePtr
+export_dependent(scf_propertygroup_t *pg, const char *name, const char *tfmri)
+{
+ uint8_t b;
+ xmlNodePtr n, sf;
+ int err = 0, ret;
+ struct pg_elts pgelts;
+
+ /*
+ * If external isn't set to true then exporting the service will
+ * export this as a normal dependency, so we should stop to avoid
+ * duplication.
+ */
+ if (scf_pg_get_property(pg, scf_property_external, exp_prop) != 0 ||
+ scf_property_get_value(exp_prop, exp_val) != 0 ||
+ scf_value_get_boolean(exp_val, &b) != 0 || !b) {
+ if (g_verbose) {
+ warn(gettext("Dependent \"%s\" cannot be exported "
+ "properly because the \"%s\" property of the "
+ "\"%s\" dependency of %s is not set to true.\n"),
+ name, scf_property_external, name, tfmri);
+ }
+
+ return (NULL);
+ }
+
+ n = xmlNewNode(NULL, (xmlChar *)"dependent");
+ if (n == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(n, name_attr, name);
+
+ /* Get the required attributes */
+ if (pg_get_prop(pg, SCF_PROPERTY_RESTART_ON, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, "restart_on") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_GROUPING, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, n, "grouping") != 0)
+ err = 1;
+
+ if (pg_get_prop(pg, SCF_PROPERTY_ENTITIES, exp_prop) == 0 &&
+ prop_check_type(exp_prop, SCF_TYPE_FMRI) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ /* EMPTY */
+ } else
+ err = 1;
+
+ if (err) {
+ xmlFreeNode(n);
+ return (NULL);
+ }
+
+ sf = xmlNewChild(n, NULL, (xmlChar *)"service_fmri", NULL);
+ if (sf == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(sf, value_attr, tfmri);
+
+ /*
+ * Now add elements for the other properties.
+ */
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ (void) memset(&pgelts, 0, sizeof (pgelts));
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, scf_property_external) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_RESTART_ON) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_GROUPING) == 0 ||
+ strcmp(exp_str, SCF_PROPERTY_ENTITIES) == 0) {
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_TYPE) == 0) {
+ if (prop_check_type(exp_prop, SCF_TYPE_ASTRING) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ char type[sizeof ("service") + 1];
+
+ if (scf_value_get_astring(exp_val, type,
+ sizeof (type)) < 0)
+ scfdie();
+
+ if (strcmp(type, "service") == 0)
+ continue;
+ }
+ } else if (strcmp(exp_str, SCF_PROPERTY_STABILITY) == 0) {
+ xmlNodePtr s;
+
+ s = xmlNewNode(NULL, (xmlChar *)"stability");
+ if (s == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, s, value_attr) == 0) {
+ pgelts.stability = s;
+ continue;
+ }
+
+ xmlFreeNode(s);
+ }
+
+ export_property(exp_prop, exp_str, &pgelts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ (void) xmlAddChild(n, pgelts.stability);
+ (void) xmlAddChildList(n, pgelts.propvals);
+ (void) xmlAddChildList(n, pgelts.properties);
+
+ return (n);
+}
+
+static void
+export_dependents(scf_propertygroup_t *pg, struct entity_elts *eelts)
+{
+ scf_propertygroup_t *opg;
+ scf_iter_t *iter;
+ char *type, *fmri;
+ int ret;
+ struct pg_elts pgelts;
+ xmlNodePtr n;
+ scf_error_t serr;
+
+ if ((opg = scf_pg_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ /* Can't use exp_prop_iter due to export_dependent(). */
+ if (scf_iter_pg_properties(iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ type = safe_malloc(max_scf_pg_type_len + 1);
+
+ /* Get an extra byte so we can tell if values are too long. */
+ fmri = safe_malloc(max_scf_fmri_len + 2);
+
+ (void) memset(&pgelts, 0, sizeof (pgelts));
+
+ while ((ret = scf_iter_next_property(iter, exp_prop)) == 1) {
+ void *entity;
+ int isservice;
+ scf_type_t ty;
+
+ if (scf_property_type(exp_prop, &ty) != SCF_SUCCESS)
+ scfdie();
+
+ if ((ty != SCF_TYPE_ASTRING &&
+ prop_check_type(exp_prop, SCF_TYPE_FMRI) != 0) ||
+ prop_get_val(exp_prop, exp_val) != 0) {
+ export_property(exp_prop, NULL, &pgelts);
+ continue;
+ }
+
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (scf_value_get_astring(exp_val, fmri,
+ max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ /* Look for a dependency group in the target fmri. */
+ serr = fmri_to_entity(g_hndl, fmri, &entity, &isservice);
+ switch (serr) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NO_MEMORY:
+ uu_die(gettext("Out of memory.\n"));
+ /* NOTREACHED */
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ if (g_verbose) {
+ if (scf_property_to_fmri(exp_prop, fmri,
+ max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ warn(gettext("The value of %s is not a valid "
+ "FMRI.\n"), fmri);
+ }
+
+ export_property(exp_prop, exp_str, &pgelts);
+ continue;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ if (g_verbose) {
+ if (scf_property_to_fmri(exp_prop, fmri,
+ max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ warn(gettext("The value of %s does not specify "
+ "a service or an instance.\n"), fmri);
+ }
+
+ export_property(exp_prop, exp_str, &pgelts);
+ continue;
+
+ case SCF_ERROR_NOT_FOUND:
+ if (g_verbose) {
+ if (scf_property_to_fmri(exp_prop, fmri,
+ max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ warn(gettext("The entity specified by %s does "
+ "not exist.\n"), fmri);
+ }
+
+ export_property(exp_prop, exp_str, &pgelts);
+ continue;
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: %s() failed with "
+ "unexpected error %d.\n", __FILE__, __LINE__,
+ "fmri_to_entity", serr);
+#endif
+ abort();
+ }
+
+ if (entity_get_pg(entity, isservice, exp_str, opg) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ warn(gettext("Entity %s is missing dependency property "
+ "group %s.\n"), fmri, exp_str);
+
+ export_property(exp_prop, NULL, &pgelts);
+ continue;
+ }
+
+ if (scf_pg_get_type(opg, type, max_scf_pg_type_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(type, SCF_GROUP_DEPENDENCY) != 0) {
+ if (scf_pg_to_fmri(opg, fmri, max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ warn(gettext("Property group %s is not of "
+ "expected type %s.\n"), fmri, SCF_GROUP_DEPENDENCY);
+
+ export_property(exp_prop, NULL, &pgelts);
+ continue;
+ }
+
+ n = export_dependent(opg, exp_str, fmri);
+ if (n == NULL)
+ export_property(exp_prop, exp_str, &pgelts);
+ else {
+ if (eelts->dependents == NULL)
+ eelts->dependents = n;
+ else
+ (void) xmlAddSibling(eelts->dependents,
+ n);
+ }
+ }
+ if (ret == -1)
+ scfdie();
+
+ free(fmri);
+ free(type);
+
+ scf_iter_destroy(iter);
+ scf_pg_destroy(opg);
+
+ if (pgelts.propvals != NULL || pgelts.properties != NULL)
+ export_pg_elts(&pgelts, SCF_PG_DEPENDENTS, scf_group_framework,
+ eelts);
+}
+
+static void
+make_node(xmlNodePtr *nodep, const char *name)
+{
+ if (*nodep == NULL) {
+ *nodep = xmlNewNode(NULL, (xmlChar *)name);
+ if (*nodep == NULL)
+ uu_die(emsg_create_xml);
+ }
+}
+
+static xmlNodePtr
+export_tm_loctext(scf_propertygroup_t *pg, const char *parname)
+{
+ int ret;
+ xmlNodePtr parent = NULL;
+ xmlNodePtr loctext = NULL;
+
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (prop_check_type(exp_prop, SCF_TYPE_USTRING) != 0 ||
+ prop_get_val(exp_prop, exp_val) != 0)
+ continue;
+
+ if (scf_value_get_ustring(exp_val, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ make_node(&parent, parname);
+ loctext = xmlNewTextChild(parent, NULL, (xmlChar *)"loctext",
+ (xmlChar *)exp_str);
+ if (loctext == NULL)
+ uu_die(emsg_create_xml);
+
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ safe_setprop(loctext, "xml:lang", exp_str);
+ }
+
+ if (ret == -1)
+ scfdie();
+
+ return (parent);
+}
+
+static xmlNodePtr
+export_tm_manpage(scf_propertygroup_t *pg)
+{
+ xmlNodePtr manpage = xmlNewNode(NULL, (xmlChar *)"manpage");
+ if (manpage == NULL)
+ uu_die(emsg_create_xml);
+
+ if (pg_get_prop(pg, SCF_PROPERTY_TM_TITLE, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, manpage, "title") != 0 ||
+ pg_get_prop(pg, SCF_PROPERTY_TM_SECTION, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, manpage, "section") != 0) {
+ xmlFreeNode(manpage);
+ return (NULL);
+ }
+
+ if (pg_get_prop(pg, SCF_PROPERTY_TM_MANPATH, exp_prop) == 0)
+ (void) set_attr_from_prop_default(exp_prop,
+ manpage, "manpath", ":default");
+
+ return (manpage);
+}
+
+static xmlNodePtr
+export_tm_doc_link(scf_propertygroup_t *pg)
+{
+ xmlNodePtr doc_link = xmlNewNode(NULL, (xmlChar *)"doc_link");
+ if (doc_link == NULL)
+ uu_die(emsg_create_xml);
+
+ if (pg_get_prop(pg, SCF_PROPERTY_TM_NAME, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, doc_link, "name") != 0 ||
+ pg_get_prop(pg, SCF_PROPERTY_TM_URI, exp_prop) != 0 ||
+ set_attr_from_prop(exp_prop, doc_link, "uri") != 0) {
+ xmlFreeNode(doc_link);
+ return (NULL);
+ }
+ return (doc_link);
+}
+
+/*
+ * Process template information for a service or instances.
+ */
+static void
+export_template(scf_propertygroup_t *pg, struct entity_elts *elts,
+ struct template_elts *telts)
+{
+ size_t mansz = strlen(SCF_PG_TM_MAN_PREFIX);
+ size_t docsz = strlen(SCF_PG_TM_DOC_PREFIX);
+ xmlNodePtr child = NULL;
+
+ if (scf_pg_get_name(pg, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_PG_TM_COMMON_NAME) == 0) {
+ telts->common_name = export_tm_loctext(pg, "common_name");
+ if (telts->common_name == NULL)
+ export_pg(pg, elts);
+ return;
+ } else if (strcmp(exp_str, SCF_PG_TM_DESCRIPTION) == 0) {
+ telts->description = export_tm_loctext(pg, "description");
+ if (telts->description == NULL)
+ export_pg(pg, elts);
+ return;
+ }
+
+ if (strncmp(exp_str, SCF_PG_TM_MAN_PREFIX, mansz) == 0) {
+ child = export_tm_manpage(pg);
+ } else if (strncmp(exp_str, SCF_PG_TM_DOC_PREFIX, docsz) == 0) {
+ child = export_tm_doc_link(pg);
+ }
+
+ if (child != NULL) {
+ make_node(&telts->documentation, "documentation");
+ (void) xmlAddChild(telts->documentation, child);
+ } else {
+ export_pg(pg, elts);
+ }
+}
+
+/*
+ * Process the general property group for an instance.
+ */
+static void
+export_inst_general(scf_propertygroup_t *pg, xmlNodePtr inode,
+ struct entity_elts *elts)
+{
+ uint8_t enabled;
+ struct pg_elts pgelts;
+ int ret;
+
+ /* enabled */
+ if (pg_get_prop(pg, scf_property_enabled, exp_prop) == 0 &&
+ prop_check_type(exp_prop, SCF_TYPE_BOOLEAN) == 0 &&
+ prop_get_val(exp_prop, exp_val) == 0) {
+ if (scf_value_get_boolean(exp_val, &enabled) != SCF_SUCCESS)
+ scfdie();
+ } else {
+ enabled = 0;
+ }
+
+ safe_setprop(inode, enabled_attr, enabled ? true : false);
+
+ if (scf_iter_pg_properties(exp_prop_iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ (void) memset(&pgelts, 0, sizeof (pgelts));
+
+ while ((ret = scf_iter_next_property(exp_prop_iter, exp_prop)) == 1) {
+ if (scf_property_get_name(exp_prop, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, scf_property_enabled) == 0) {
+ continue;
+ } else if (strcmp(exp_str, SCF_PROPERTY_RESTARTER) == 0) {
+ xmlNodePtr rnode, sfnode;
+
+ rnode = xmlNewNode(NULL, (xmlChar *)"restarter");
+ if (rnode == NULL)
+ uu_die(emsg_create_xml);
+
+ sfnode = xmlNewChild(rnode, NULL,
+ (xmlChar *)"service_fmri", NULL);
+ if (sfnode == NULL)
+ uu_die(emsg_create_xml);
+
+ if (set_attr_from_prop(exp_prop, sfnode,
+ value_attr) == 0) {
+ elts->restarter = rnode;
+ continue;
+ }
+
+ xmlFreeNode(rnode);
+ }
+
+ export_property(exp_prop, exp_str, &pgelts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ if (pgelts.propvals != NULL || pgelts.properties != NULL)
+ export_pg_elts(&pgelts, scf_pg_general, scf_group_framework,
+ elts);
+}
+
+/*
+ * Put an instance element for the given instance into selts.
+ */
+static void
+export_instance(scf_instance_t *inst, struct entity_elts *selts)
+{
+ xmlNodePtr n;
+ boolean_t isdefault;
+ struct entity_elts elts;
+ struct template_elts template_elts;
+ int ret;
+
+ n = xmlNewNode(NULL, (xmlChar *)"instance");
+ if (n == NULL)
+ uu_die(emsg_create_xml);
+
+ /* name */
+ if (scf_instance_get_name(inst, exp_str, exp_str_sz) < 0)
+ scfdie();
+ safe_setprop(n, name_attr, exp_str);
+ isdefault = strcmp(exp_str, "default") == 0;
+
+ /* check existance of general pg (since general/enabled is required) */
+ if (scf_instance_get_pg(inst, scf_pg_general, exp_pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (g_verbose) {
+ if (scf_instance_to_fmri(inst, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ warn(gettext("Instance %s has no general property "
+ "group; it will be marked disabled.\n"), exp_str);
+ }
+
+ safe_setprop(n, enabled_attr, false);
+ } else if (scf_pg_get_type(exp_pg, exp_str, exp_str_sz) < 0 ||
+ strcmp(exp_str, scf_group_framework) != 0) {
+ if (g_verbose) {
+ if (scf_pg_to_fmri(exp_pg, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ warn(gettext("Property group %s is not of type "
+ "framework; the instance will be marked "
+ "disabled.\n"), exp_str);
+ }
+
+ safe_setprop(n, enabled_attr, false);
+ }
+
+ /* property groups */
+ if (scf_iter_instance_pgs(exp_pg_iter, inst) < 0)
+ scfdie();
+
+ (void) memset(&elts, 0, sizeof (elts));
+ (void) memset(&template_elts, 0, sizeof (template_elts));
+
+ while ((ret = scf_iter_next_pg(exp_pg_iter, exp_pg)) == 1) {
+ uint32_t flags;
+
+ if (scf_pg_get_flags(exp_pg, &flags) != 0)
+ scfdie();
+
+ if (flags & SCF_PG_FLAG_NONPERSISTENT)
+ continue;
+
+ if (scf_pg_get_type(exp_pg, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_GROUP_DEPENDENCY) == 0) {
+ export_dependency(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, SCF_GROUP_METHOD) == 0) {
+ export_method(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, scf_group_framework) == 0) {
+ if (scf_pg_get_name(exp_pg, exp_str,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, scf_pg_general) == 0) {
+ export_inst_general(exp_pg, n, &elts);
+ continue;
+ } else if (strcmp(exp_str, SCF_PG_METHOD_CONTEXT) ==
+ 0) {
+ export_method_context(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, SCF_PG_DEPENDENTS) == 0) {
+ export_dependents(exp_pg, &elts);
+ continue;
+ }
+ } else if (strcmp(exp_str, SCF_GROUP_TEMPLATE) == 0) {
+ export_template(exp_pg, &elts, &template_elts);
+ continue;
+ }
+
+ /* Ordinary pg. */
+ export_pg(exp_pg, &elts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ if (template_elts.common_name != NULL) {
+ elts.template = xmlNewNode(NULL, (xmlChar *)"template");
+ (void) xmlAddChild(elts.template, template_elts.common_name);
+ (void) xmlAddChild(elts.template, template_elts.description);
+ (void) xmlAddChild(elts.template, template_elts.documentation);
+ } else {
+ xmlFreeNode(template_elts.description);
+ xmlFreeNode(template_elts.documentation);
+ }
+
+ if (isdefault && elts.restarter == NULL &&
+ elts.dependencies == NULL && elts.method_context == NULL &&
+ elts.exec_methods == NULL && elts.property_groups == NULL &&
+ elts.template == NULL) {
+ xmlChar *eval;
+
+ /* This is a default instance */
+ eval = xmlGetProp(n, (xmlChar *)enabled_attr);
+
+ xmlFreeNode(n);
+
+ n = xmlNewNode(NULL, (xmlChar *)"create_default_instance");
+ if (n == NULL)
+ uu_die(emsg_create_xml);
+
+ safe_setprop(n, enabled_attr, (char *)eval);
+ xmlFree(eval);
+
+ selts->create_default_instance = n;
+ } else {
+ /* Assemble the children in order. */
+ (void) xmlAddChild(n, elts.restarter);
+ (void) xmlAddChildList(n, elts.dependencies);
+ (void) xmlAddChildList(n, elts.dependents);
+ (void) xmlAddChild(n, elts.method_context);
+ (void) xmlAddChildList(n, elts.exec_methods);
+ (void) xmlAddChildList(n, elts.property_groups);
+ (void) xmlAddChild(n, elts.template);
+
+ if (selts->instances == NULL)
+ selts->instances = n;
+ else
+ (void) xmlAddSibling(selts->instances, n);
+ }
+}
+
+/*
+ * Return a service element for the given service.
+ */
+static xmlNodePtr
+export_service(scf_service_t *svc)
+{
+ xmlNodePtr snode;
+ struct entity_elts elts;
+ struct template_elts template_elts;
+ int ret;
+
+ snode = xmlNewNode(NULL, (xmlChar *)"service");
+ if (snode == NULL)
+ uu_die(emsg_create_xml);
+
+ /* Get & set name attribute */
+ if (scf_service_get_name(svc, exp_str, max_scf_name_len + 1) < 0)
+ scfdie();
+ safe_setprop(snode, name_attr, exp_str);
+
+ safe_setprop(snode, type_attr, "service");
+ safe_setprop(snode, "version", "0");
+
+ /* Acquire child elements. */
+ if (scf_iter_service_pgs(exp_pg_iter, svc) != SCF_SUCCESS)
+ scfdie();
+
+ (void) memset(&elts, 0, sizeof (elts));
+ (void) memset(&template_elts, 0, sizeof (template_elts));
+
+ while ((ret = scf_iter_next_pg(exp_pg_iter, exp_pg)) == 1) {
+ uint32_t flags;
+
+ if (scf_pg_get_flags(exp_pg, &flags) != 0)
+ scfdie();
+
+ if (flags & SCF_PG_FLAG_NONPERSISTENT)
+ continue;
+
+ if (scf_pg_get_type(exp_pg, exp_str, exp_str_sz) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_GROUP_DEPENDENCY) == 0) {
+ export_dependency(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, SCF_GROUP_METHOD) == 0) {
+ export_method(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, scf_group_framework) == 0) {
+ if (scf_pg_get_name(exp_pg, exp_str,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, scf_pg_general) == 0) {
+ export_svc_general(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, SCF_PG_METHOD_CONTEXT) ==
+ 0) {
+ export_method_context(exp_pg, &elts);
+ continue;
+ } else if (strcmp(exp_str, SCF_PG_DEPENDENTS) == 0) {
+ export_dependents(exp_pg, &elts);
+ continue;
+ }
+ } else if (strcmp(exp_str, SCF_GROUP_TEMPLATE) == 0) {
+ export_template(exp_pg, &elts, &template_elts);
+ continue;
+ }
+
+ export_pg(exp_pg, &elts);
+ }
+ if (ret == -1)
+ scfdie();
+
+ if (template_elts.common_name != NULL) {
+ elts.template = xmlNewNode(NULL, (xmlChar *)"template");
+ (void) xmlAddChild(elts.template, template_elts.common_name);
+ (void) xmlAddChild(elts.template, template_elts.description);
+ (void) xmlAddChild(elts.template, template_elts.documentation);
+ } else {
+ xmlFreeNode(template_elts.description);
+ xmlFreeNode(template_elts.documentation);
+ }
+
+ /* Iterate instances */
+ if (scf_iter_service_instances(exp_inst_iter, svc) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_instance(exp_inst_iter, exp_inst)) == 1)
+ export_instance(exp_inst, &elts);
+ if (ret == -1)
+ scfdie();
+
+ /* Now add all of the accumulated elements in order. */
+ (void) xmlAddChild(snode, elts.create_default_instance);
+ (void) xmlAddChild(snode, elts.single_instance);
+ (void) xmlAddChild(snode, elts.restarter);
+ (void) xmlAddChildList(snode, elts.dependencies);
+ (void) xmlAddChildList(snode, elts.dependents);
+ (void) xmlAddChild(snode, elts.method_context);
+ (void) xmlAddChildList(snode, elts.exec_methods);
+ (void) xmlAddChildList(snode, elts.property_groups);
+ (void) xmlAddChildList(snode, elts.instances);
+ (void) xmlAddChild(snode, elts.stability);
+ (void) xmlAddChild(snode, elts.template);
+
+ return (snode);
+}
+
+static int
+export_callback(void *data, scf_walkinfo_t *wip)
+{
+ FILE *f;
+ xmlDocPtr doc;
+ xmlNodePtr sb;
+ int result;
+ char *filename = data;
+
+ if ((exp_inst = scf_instance_create(g_hndl)) == NULL ||
+ (exp_pg = scf_pg_create(g_hndl)) == NULL ||
+ (exp_prop = scf_property_create(g_hndl)) == NULL ||
+ (exp_val = scf_value_create(g_hndl)) == NULL ||
+ (exp_inst_iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_pg_iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_prop_iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_val_iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ exp_str_sz = max_scf_len + 1;
+ exp_str = safe_malloc(exp_str_sz);
+
+ if (filename != NULL) {
+ errno = 0;
+ f = fopen(filename, "wb");
+ if (f == NULL) {
+ if (errno == 0)
+ uu_die(gettext("Could not open \"%s\": no free "
+ "stdio streams.\n"), filename);
+ else
+ uu_die(gettext("Could not open \"%s\""),
+ filename);
+ }
+ } else
+ f = stdout;
+
+ doc = xmlNewDoc((xmlChar *)"1.0");
+ if (doc == NULL)
+ uu_die(gettext("Could not create XML document.\n"));
+
+ if (xmlCreateIntSubset(doc, (xmlChar *)"service_bundle", NULL,
+ (xmlChar *)MANIFEST_DTD_PATH) == NULL)
+ uu_die(emsg_create_xml);
+
+ sb = xmlNewNode(NULL, (xmlChar *)"service_bundle");
+ if (sb == NULL)
+ uu_die(emsg_create_xml);
+ safe_setprop(sb, type_attr, "manifest");
+ safe_setprop(sb, name_attr, "export");
+ (void) xmlAddSibling(doc->children, sb);
+
+ (void) xmlAddChild(sb, export_service(wip->svc));
+
+ result = write_service_bundle(doc, f);
+
+ free(exp_str);
+ scf_iter_destroy(exp_val_iter);
+ scf_iter_destroy(exp_prop_iter);
+ scf_iter_destroy(exp_pg_iter);
+ scf_iter_destroy(exp_inst_iter);
+ scf_value_destroy(exp_val);
+ scf_property_destroy(exp_prop);
+ scf_pg_destroy(exp_pg);
+ scf_instance_destroy(exp_inst);
+
+ xmlFreeDoc(doc);
+
+ if (f != stdout)
+ (void) fclose(f);
+
+ return (result);
+}
+
+/*
+ * Get the service named by fmri, build an XML tree which represents it, and
+ * dump it into filename (or stdout if filename is NULL).
+ */
+int
+lscf_service_export(char *fmri, const char *filename)
+{
+ int ret, err;
+
+ lscf_prep_hndl();
+
+ err = 0;
+ if ((ret = scf_walk_fmri(g_hndl, 1, (char **)&fmri,
+ SCF_WALK_SERVICE | SCF_WALK_NOINSTANCE, export_callback,
+ (void *)filename, &err, semerr)) != 0) {
+ if (ret != -1)
+ semerr(gettext("Failed to walk instances: %s\n"),
+ scf_strerror(ret));
+ return (-1);
+ }
+
+ /*
+ * Error message has already been printed.
+ */
+ if (err != 0)
+ return (-1);
+
+ return (0);
+}
+
+
+/*
+ * Archive
+ */
+
+static xmlNodePtr
+make_archive()
+{
+ xmlNodePtr sb;
+ scf_scope_t *scope;
+ scf_service_t *svc;
+ scf_iter_t *iter;
+ int r;
+
+ if ((scope = scf_scope_create(g_hndl)) == NULL ||
+ (svc = scf_service_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_inst = scf_instance_create(g_hndl)) == NULL ||
+ (exp_pg = scf_pg_create(g_hndl)) == NULL ||
+ (exp_prop = scf_property_create(g_hndl)) == NULL ||
+ (exp_val = scf_value_create(g_hndl)) == NULL ||
+ (exp_inst_iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_pg_iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_prop_iter = scf_iter_create(g_hndl)) == NULL ||
+ (exp_val_iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ exp_str_sz = max_scf_len + 1;
+ exp_str = safe_malloc(exp_str_sz);
+
+ sb = xmlNewNode(NULL, (xmlChar *)"service_bundle");
+ if (sb == NULL)
+ uu_die(emsg_create_xml);
+ safe_setprop(sb, type_attr, "archive");
+ safe_setprop(sb, name_attr, "none");
+
+ if (scf_handle_get_scope(g_hndl, SCF_SCOPE_LOCAL, scope) != 0)
+ scfdie();
+ if (scf_iter_scope_services(iter, scope) != 0)
+ scfdie();
+
+ for (;;) {
+ r = scf_iter_next_service(iter, svc);
+ if (r == 0)
+ break;
+ if (r != 1)
+ scfdie();
+
+ if (scf_service_get_name(svc, exp_str,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(exp_str, SCF_LEGACY_SERVICE) == 0)
+ continue;
+
+ xmlAddChild(sb, export_service(svc));
+ }
+
+ free(exp_str);
+
+ scf_iter_destroy(exp_val_iter);
+ scf_iter_destroy(exp_prop_iter);
+ scf_iter_destroy(exp_pg_iter);
+ scf_iter_destroy(exp_inst_iter);
+ scf_value_destroy(exp_val);
+ scf_property_destroy(exp_prop);
+ scf_pg_destroy(exp_pg);
+ scf_instance_destroy(exp_inst);
+ scf_iter_destroy(iter);
+ scf_service_destroy(svc);
+ scf_scope_destroy(scope);
+
+ return (sb);
+}
+
+int
+lscf_archive(const char *filename)
+{
+ FILE *f;
+ xmlDocPtr doc;
+ int result;
+
+ lscf_prep_hndl();
+
+ if (filename != NULL) {
+ errno = 0;
+ f = fopen(filename, "wb");
+ if (f == NULL) {
+ if (errno == 0)
+ uu_die(gettext("Could not open \"%s\": no free "
+ "stdio streams.\n"), filename);
+ else
+ uu_die(gettext("Could not open \"%s\""),
+ filename);
+ }
+ } else
+ f = stdout;
+
+ doc = xmlNewDoc((xmlChar *)"1.0");
+ if (doc == NULL)
+ uu_die(gettext("Could not create XML document.\n"));
+
+ if (xmlCreateIntSubset(doc, (xmlChar *)"service_bundle", NULL,
+ (xmlChar *)MANIFEST_DTD_PATH) == NULL)
+ uu_die(emsg_create_xml);
+
+ (void) xmlAddSibling(doc->children, make_archive());
+
+ result = write_service_bundle(doc, f);
+
+ xmlFreeDoc(doc);
+
+ if (f != stdout)
+ (void) fclose(f);
+
+ return (result);
+}
+
+
+/*
+ * "Extract" a profile.
+ */
+int
+lscf_profile_extract(const char *filename)
+{
+ FILE *f;
+ xmlDocPtr doc;
+ xmlNodePtr sb, snode, inode;
+ scf_scope_t *scope;
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ scf_iter_t *siter, *iiter;
+ int r, s;
+ char *namebuf;
+ uint8_t b;
+ int result;
+
+ lscf_prep_hndl();
+
+ if (filename != NULL) {
+ errno = 0;
+ f = fopen(filename, "wb");
+ if (f == NULL) {
+ if (errno == 0)
+ uu_die(gettext("Could not open \"%s\": no "
+ "free stdio streams.\n"), filename);
+ else
+ uu_die(gettext("Could not open \"%s\""),
+ filename);
+ }
+ } else
+ f = stdout;
+
+ doc = xmlNewDoc((xmlChar *)"1.0");
+ if (doc == NULL)
+ uu_die(gettext("Could not create XML document.\n"));
+
+ if (xmlCreateIntSubset(doc, (xmlChar *)"service_bundle", NULL,
+ (xmlChar *)MANIFEST_DTD_PATH) == NULL)
+ uu_die(emsg_create_xml);
+
+ sb = xmlNewNode(NULL, (xmlChar *)"service_bundle");
+ if (sb == NULL)
+ uu_die(emsg_create_xml);
+ safe_setprop(sb, type_attr, "profile");
+ safe_setprop(sb, name_attr, "extract");
+ (void) xmlAddSibling(doc->children, sb);
+
+ if ((scope = scf_scope_create(g_hndl)) == NULL ||
+ (svc = scf_service_create(g_hndl)) == NULL ||
+ (inst = scf_instance_create(g_hndl)) == NULL ||
+ (pg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL ||
+ (siter = scf_iter_create(g_hndl)) == NULL ||
+ (iiter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_handle_get_local_scope(g_hndl, scope) != SCF_SUCCESS)
+ scfdie();
+
+ if (scf_iter_scope_services(siter, scope) != SCF_SUCCESS)
+ scfdie();
+
+ namebuf = safe_malloc(max_scf_name_len + 1);
+
+ while ((r = scf_iter_next_service(siter, svc)) == 1) {
+ if (scf_iter_service_instances(iiter, svc) != SCF_SUCCESS)
+ scfdie();
+
+ snode = xmlNewNode(NULL, (xmlChar *)"service");
+ if (snode == NULL)
+ uu_die(emsg_create_xml);
+
+ if (scf_service_get_name(svc, namebuf, max_scf_name_len + 1) <
+ 0)
+ scfdie();
+
+ safe_setprop(snode, name_attr, namebuf);
+
+ safe_setprop(snode, type_attr, "service");
+ safe_setprop(snode, "version", "0");
+
+ while ((s = scf_iter_next_instance(iiter, inst)) == 1) {
+ if (scf_instance_get_pg(inst, scf_pg_general, pg) !=
+ SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (g_verbose) {
+ ssize_t len;
+ char *fmri;
+
+ len =
+ scf_instance_to_fmri(inst, NULL, 0);
+ if (len < 0)
+ scfdie();
+
+ fmri = safe_malloc(len + 1);
+
+ if (scf_instance_to_fmri(inst, fmri,
+ len + 1) < 0)
+ scfdie();
+
+ warn("Instance %s has no \"%s\" "
+ "property group.\n", fmri,
+ scf_pg_general);
+
+ free(fmri);
+ }
+
+ continue;
+ }
+
+ if (pg_get_prop(pg, scf_property_enabled, prop) != 0 ||
+ prop_check_type(prop, SCF_TYPE_BOOLEAN) != 0 ||
+ prop_get_val(prop, val) != 0)
+ continue;
+
+ inode = xmlNewChild(snode, NULL, (xmlChar *)"instance",
+ NULL);
+ if (inode == NULL)
+ uu_die(emsg_create_xml);
+
+ if (scf_instance_get_name(inst, namebuf,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ safe_setprop(inode, name_attr, namebuf);
+
+ if (scf_value_get_boolean(val, &b) != SCF_SUCCESS)
+ scfdie();
+
+ safe_setprop(inode, enabled_attr, b ? true : false);
+ }
+ if (s < 0)
+ scfdie();
+
+ if (snode->children != NULL)
+ xmlAddChild(sb, snode);
+ else
+ xmlFreeNode(snode);
+ }
+ if (r < 0)
+ scfdie();
+
+ free(namebuf);
+
+ result = write_service_bundle(doc, f);
+
+ xmlFreeDoc(doc);
+
+ if (f != stdout)
+ (void) fclose(f);
+
+ return (result);
+}
+
+
+/*
+ * Entity manipulation commands
+ */
+
+/*
+ * Entity selection. If no entity is selected, then the current scope is in
+ * cur_scope, and cur_svc and cur_inst are NULL. When a service is selected,
+ * only cur_inst is NULL, and when an instance is selected, none are NULL.
+ * When the snaplevel of a snapshot is selected, cur_level, cur_snap, and
+ * cur_inst will be non-NULL.
+ */
+
+/* Returns 1 if maybe absolute fmri, 0 on success (dies on failure) */
+static int
+select_inst(const char *name)
+{
+ scf_instance_t *inst;
+ scf_error_t err;
+
+ assert(cur_svc != NULL);
+
+ inst = scf_instance_create(g_hndl);
+ if (inst == NULL)
+ scfdie();
+
+ if (scf_service_get_instance(cur_svc, name, inst) == SCF_SUCCESS) {
+ cur_inst = inst;
+ return (0);
+ }
+
+ err = scf_error();
+ if (err != SCF_ERROR_NOT_FOUND && err != SCF_ERROR_INVALID_ARGUMENT)
+ scfdie();
+
+ scf_instance_destroy(inst);
+ return (1);
+}
+
+/* Returns as above. */
+static int
+select_svc(const char *name)
+{
+ scf_service_t *svc;
+ scf_error_t err;
+
+ assert(cur_scope != NULL);
+
+ svc = scf_service_create(g_hndl);
+ if (svc == NULL)
+ scfdie();
+
+ if (scf_scope_get_service(cur_scope, name, svc) == SCF_SUCCESS) {
+ cur_svc = svc;
+ return (0);
+ }
+
+ err = scf_error();
+ if (err != SCF_ERROR_NOT_FOUND && err != SCF_ERROR_INVALID_ARGUMENT)
+ scfdie();
+
+ scf_service_destroy(svc);
+ return (1);
+}
+
+/* ARGSUSED */
+static int
+select_callback(void *unused, scf_walkinfo_t *wip)
+{
+ scf_instance_t *inst;
+ scf_service_t *svc;
+ scf_scope_t *scope;
+
+ if (wip->inst != NULL) {
+ if ((scope = scf_scope_create(g_hndl)) == NULL ||
+ (svc = scf_service_create(g_hndl)) == NULL ||
+ (inst = scf_instance_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_handle_decode_fmri(g_hndl, wip->fmri, scope, svc,
+ inst, NULL, NULL, SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS)
+ scfdie();
+ } else {
+ assert(wip->svc != NULL);
+
+ if ((scope = scf_scope_create(g_hndl)) == NULL ||
+ (svc = scf_service_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_handle_decode_fmri(g_hndl, wip->fmri, scope, svc,
+ NULL, NULL, NULL, SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS)
+ scfdie();
+
+ inst = NULL;
+ }
+
+ /* Clear out the current selection */
+ assert(cur_scope != NULL);
+ scf_scope_destroy(cur_scope);
+ scf_service_destroy(cur_svc);
+ scf_instance_destroy(cur_inst);
+
+ cur_scope = scope;
+ cur_svc = svc;
+ cur_inst = inst;
+
+ return (0);
+}
+
+void
+lscf_select(const char *fmri)
+{
+ int ret, err;
+
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ struct snaplevel *elt;
+ char *buf;
+
+ /* Error unless name is that of the next level. */
+ elt = uu_list_next(cur_levels, cur_elt);
+ if (elt == NULL) {
+ semerr(gettext("No children.\n"));
+ return;
+ }
+
+ buf = safe_malloc(max_scf_name_len + 1);
+
+ if (scf_snaplevel_get_instance_name(elt->sl, buf,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(buf, fmri) != 0) {
+ semerr(gettext("No such child.\n"));
+ free(buf);
+ return;
+ }
+
+ free(buf);
+
+ cur_elt = elt;
+ cur_level = elt->sl;
+ return;
+ }
+
+ /*
+ * Special case for 'svc:', which takes the user to the scope level.
+ */
+ if (strcmp(fmri, "svc:") == 0) {
+ scf_instance_destroy(cur_inst);
+ scf_service_destroy(cur_svc);
+ cur_inst = NULL;
+ cur_svc = NULL;
+ return;
+ }
+
+ /*
+ * Special case for ':properties'. This appears as part of 'list' but
+ * can't be selected. Give a more helpful error message in this case.
+ */
+ if (strcmp(fmri, ":properties") == 0) {
+ semerr(gettext(":properties is not an entity. Try 'listprop' "
+ "to list properties.\n"));
+ return;
+ }
+
+ /*
+ * First try the argument as relative to the current selection.
+ */
+ if (cur_inst != NULL) {
+ /* EMPTY */;
+ } else if (cur_svc != NULL) {
+ if (select_inst(fmri) != 1)
+ return;
+ } else {
+ if (select_svc(fmri) != 1)
+ return;
+ }
+
+ err = 0;
+ if ((ret = scf_walk_fmri(g_hndl, 1, (char **)&fmri, SCF_WALK_SERVICE,
+ select_callback, NULL, &err, semerr)) != 0) {
+ semerr(gettext("Failed to walk instances: %s\n"),
+ scf_strerror(ret));
+ }
+}
+
+void
+lscf_unselect(void)
+{
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ struct snaplevel *elt;
+
+ elt = uu_list_prev(cur_levels, cur_elt);
+ if (elt == NULL) {
+ semerr(gettext("No parent levels.\n"));
+ } else {
+ cur_elt = elt;
+ cur_level = elt->sl;
+ }
+ } else if (cur_inst != NULL) {
+ scf_instance_destroy(cur_inst);
+ cur_inst = NULL;
+ } else if (cur_svc != NULL) {
+ scf_service_destroy(cur_svc);
+ cur_svc = NULL;
+ } else {
+ semerr(gettext("Cannot unselect at scope level.\n"));
+ }
+}
+
+/*
+ * Return the FMRI of the current selection, for the prompt.
+ */
+void
+lscf_get_selection_str(char *buf, size_t bufsz)
+{
+ char *cp;
+ ssize_t fmrilen, szret;
+ boolean_t deleted = B_FALSE;
+
+ if (g_hndl == NULL) {
+ (void) strlcpy(buf, "svc:", bufsz);
+ return;
+ }
+
+ if (cur_level != NULL) {
+ assert(cur_snap != NULL);
+
+ /* [ snapshot ] FMRI [: instance ] */
+ assert(bufsz >= 1 + max_scf_name_len + 1 + max_scf_fmri_len
+ + 2 + max_scf_name_len + 1 + 1);
+
+ buf[0] = '[';
+
+ szret = scf_snapshot_get_name(cur_snap, buf + 1,
+ max_scf_name_len + 1);
+ if (szret < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ goto snap_deleted;
+ }
+
+ (void) strcat(buf, "]svc:/");
+
+ cp = strchr(buf, '\0');
+
+ szret = scf_snaplevel_get_service_name(cur_level, cp,
+ max_scf_name_len + 1);
+ if (szret < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ goto snap_deleted;
+ }
+
+ cp = strchr(cp, '\0');
+
+ if (snaplevel_is_instance(cur_level)) {
+ *cp++ = ':';
+
+ if (scf_snaplevel_get_instance_name(cur_level, cp,
+ max_scf_name_len + 1) < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ goto snap_deleted;
+ }
+ } else {
+ *cp++ = '[';
+ *cp++ = ':';
+
+ if (scf_instance_get_name(cur_inst, cp,
+ max_scf_name_len + 1) < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ goto snap_deleted;
+ }
+
+ (void) strcat(buf, "]");
+ }
+
+ return;
+
+snap_deleted:
+ deleted = B_TRUE;
+ free(buf);
+ unselect_cursnap();
+ }
+
+ assert(cur_snap == NULL);
+
+ if (cur_inst != NULL) {
+ assert(cur_svc != NULL);
+ assert(cur_scope != NULL);
+
+ fmrilen = scf_instance_to_fmri(cur_inst, buf, bufsz);
+ if (fmrilen >= 0) {
+ assert(fmrilen < bufsz);
+ if (deleted)
+ warn(emsg_deleted);
+ return;
+ }
+
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ deleted = B_TRUE;
+
+ scf_instance_destroy(cur_inst);
+ cur_inst = NULL;
+ }
+
+ if (cur_svc != NULL) {
+ assert(cur_scope != NULL);
+
+ szret = scf_service_to_fmri(cur_svc, buf, bufsz);
+ if (szret >= 0) {
+ assert(szret < bufsz);
+ if (deleted)
+ warn(emsg_deleted);
+ return;
+ }
+
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ deleted = B_TRUE;
+ scf_service_destroy(cur_svc);
+ cur_svc = NULL;
+ }
+
+ assert(cur_scope != NULL);
+ fmrilen = scf_scope_to_fmri(cur_scope, buf, bufsz);
+
+ if (fmrilen < 0)
+ scfdie();
+
+ assert(fmrilen < bufsz);
+ if (deleted)
+ warn(emsg_deleted);
+}
+
+/*
+ * Entity listing. Entities and colon namespaces (e.g., :properties and
+ * :statistics) are listed for the current selection.
+ */
+void
+lscf_list(const char *pattern)
+{
+ scf_iter_t *iter;
+ char *buf;
+ int ret;
+
+ lscf_prep_hndl();
+
+ if (cur_level != NULL) {
+ struct snaplevel *elt;
+
+ (void) fputs(COLON_NAMESPACES, stdout);
+
+ elt = uu_list_next(cur_levels, cur_elt);
+ if (elt == NULL)
+ return;
+
+ /*
+ * For now, we know that the next level is an instance. But
+ * if we ever have multiple scopes, this could be complicated.
+ */
+ buf = safe_malloc(max_scf_name_len + 1);
+ if (scf_snaplevel_get_instance_name(elt->sl, buf,
+ max_scf_name_len + 1) >= 0) {
+ (void) puts(buf);
+ } else {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ }
+
+ free(buf);
+
+ return;
+ }
+
+ if (cur_inst != NULL) {
+ (void) fputs(COLON_NAMESPACES, stdout);
+ return;
+ }
+
+ iter = scf_iter_create(g_hndl);
+ if (iter == NULL)
+ scfdie();
+
+ buf = safe_malloc(max_scf_name_len + 1);
+
+ if (cur_svc != NULL) {
+ /* List the instances in this service. */
+ scf_instance_t *inst;
+
+ inst = scf_instance_create(g_hndl);
+ if (inst == NULL)
+ scfdie();
+
+ if (scf_iter_service_instances(iter, cur_svc) == 0) {
+ safe_printf(COLON_NAMESPACES);
+
+ for (;;) {
+ ret = scf_iter_next_instance(iter, inst);
+ if (ret == 0)
+ break;
+ if (ret != 1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ break;
+ }
+
+ if (scf_instance_get_name(inst, buf,
+ max_scf_name_len + 1) >= 0) {
+ if (pattern == NULL ||
+ fnmatch(pattern, buf, 0) == 0)
+ (void) puts(buf);
+ } else {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ }
+ }
+ } else {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ }
+
+ scf_instance_destroy(inst);
+ } else {
+ /* List the services in this scope. */
+ scf_service_t *svc;
+
+ assert(cur_scope != NULL);
+
+ svc = scf_service_create(g_hndl);
+ if (svc == NULL)
+ scfdie();
+
+ if (scf_iter_scope_services(iter, cur_scope) != SCF_SUCCESS)
+ scfdie();
+
+ for (;;) {
+ ret = scf_iter_next_service(iter, svc);
+ if (ret == 0)
+ break;
+ if (ret != 1)
+ scfdie();
+
+ if (scf_service_get_name(svc, buf,
+ max_scf_name_len + 1) >= 0) {
+ if (pattern == NULL ||
+ fnmatch(pattern, buf, 0) == 0)
+ safe_printf("%s\n", buf);
+ } else {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ }
+ }
+
+ scf_service_destroy(svc);
+ }
+
+ free(buf);
+ scf_iter_destroy(iter);
+}
+
+/*
+ * Entity addition. Creates an empty entity in the current selection.
+ */
+void
+lscf_add(const char *name)
+{
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ } else if (cur_inst != NULL) {
+ semerr(gettext("Cannot add entities to an instance.\n"));
+ } else if (cur_svc != NULL) {
+
+ if (scf_service_add_instance(cur_svc, name, NULL) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("Invalid name.\n"));
+ break;
+
+ case SCF_ERROR_EXISTS:
+ semerr(gettext("Instance already exists.\n"));
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ semerr(emsg_permission_denied);
+ break;
+
+ default:
+ scfdie();
+ }
+ }
+ } else {
+ assert(cur_scope != NULL);
+
+ if (scf_scope_add_service(cur_scope, name, NULL) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("Invalid name.\n"));
+ break;
+
+ case SCF_ERROR_EXISTS:
+ semerr(gettext("Service already exists.\n"));
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ semerr(emsg_permission_denied);
+ break;
+
+ case SCF_ERROR_BACKEND_READONLY:
+ semerr(emsg_read_only);
+ break;
+
+ default:
+ scfdie();
+ }
+ }
+ }
+}
+
+/*
+ * Entity deletion.
+ */
+
+/*
+ * Delete the property group <fmri>/:properties/<name>. Returns
+ * SCF_ERROR_NONE on success (or if the entity is not found),
+ * SCF_ERROR_INVALID_ARGUMENT if the fmri is bad, SCF_ERROR_TYPE_MISMATCH if
+ * the pg is the wrong type, or SCF_ERROR_PERMISSION_DENIED if permission was
+ * denied.
+ */
+static scf_error_t
+delete_dependency_pg(const char *fmri, const char *name)
+{
+ void *entity = NULL;
+ int isservice;
+ scf_propertygroup_t *pg = NULL;
+ scf_error_t result;
+ char *pgty;
+
+ result = fmri_to_entity(g_hndl, fmri, &entity, &isservice);
+ switch (result) {
+ case SCF_ERROR_NONE:
+ break;
+
+ case SCF_ERROR_NO_MEMORY:
+ uu_die(gettext("Out of memory.\n"));
+ /* NOTREACHED */
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (SCF_ERROR_INVALID_ARGUMENT);
+
+ case SCF_ERROR_NOT_FOUND:
+ result = SCF_ERROR_NONE;
+ goto out;
+
+ default:
+ bad_error("fmri_to_entity", result);
+ }
+
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+
+ if (entity_get_pg(entity, isservice, name, pg) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ result = SCF_ERROR_NONE;
+ goto out;
+ }
+
+ pgty = safe_malloc(max_scf_pg_type_len + 1);
+
+ if (scf_pg_get_type(pg, pgty, max_scf_pg_type_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(pgty, SCF_GROUP_DEPENDENCY) != 0) {
+ result = SCF_ERROR_TYPE_MISMATCH;
+ free(pgty);
+ goto out;
+ }
+
+ free(pgty);
+
+ if (scf_pg_delete(pg) == 0) {
+ scf_instance_t *inst = NULL;
+ scf_iter_t *iter = NULL;
+ char *name_buf = NULL;
+
+ result = SCF_ERROR_NONE;
+
+ if (isservice) {
+ if ((inst = scf_instance_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ name_buf = safe_malloc(max_scf_name_len + 1);
+ }
+ (void) refresh_entity(isservice, entity, fmri, inst, iter,
+ name_buf);
+
+ free(name_buf);
+ scf_iter_destroy(iter);
+ scf_instance_destroy(inst);
+ } else if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+ else
+ result = SCF_ERROR_PERMISSION_DENIED;
+
+out:
+ scf_pg_destroy(pg);
+ if (entity != NULL)
+ entity_destroy(entity, isservice);
+
+ return (result);
+}
+
+static int
+delete_dependents(scf_propertygroup_t *pg)
+{
+ char *pgty, *name, *fmri;
+ scf_property_t *prop;
+ scf_value_t *val;
+ scf_iter_t *iter;
+ int r;
+ scf_error_t err;
+
+ /* Verify that the pg has the correct type. */
+ pgty = safe_malloc(max_scf_pg_type_len + 1);
+ if (scf_pg_get_type(pg, pgty, max_scf_pg_type_len + 1) < 0)
+ scfdie();
+
+ if (strcmp(pgty, scf_group_framework) != 0) {
+ if (g_verbose) {
+ fmri = safe_malloc(max_scf_fmri_len + 1);
+ if (scf_pg_to_fmri(pg, fmri, max_scf_fmri_len + 1) < 0)
+ scfdie();
+
+ warn(gettext("Property group %s is not of expected "
+ "type %s.\n"), fmri, scf_group_framework);
+
+ free(fmri);
+ }
+
+ free(pgty);
+ return (-1);
+ }
+
+ free(pgty);
+
+ /* map delete_dependency_pg onto the properties. */
+ if ((prop = scf_property_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_pg_properties(iter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ name = safe_malloc(max_scf_name_len + 1);
+ fmri = safe_malloc(max_scf_fmri_len + 2);
+
+ while ((r = scf_iter_next_property(iter, prop)) == 1) {
+ scf_type_t ty;
+
+ if (scf_property_get_name(prop, name, max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (scf_property_type(prop, &ty) != SCF_SUCCESS)
+ scfdie();
+
+ if ((ty != SCF_TYPE_ASTRING &&
+ prop_check_type(prop, SCF_TYPE_FMRI) != 0) ||
+ prop_get_val(prop, val) != 0)
+ continue;
+
+ if (scf_value_get_astring(val, fmri, max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ err = delete_dependency_pg(fmri, name);
+ if (err == SCF_ERROR_INVALID_ARGUMENT && g_verbose) {
+ if (scf_property_to_fmri(prop, fmri,
+ max_scf_fmri_len + 2) < 0)
+ scfdie();
+
+ warn(gettext("Value of %s is not a valid FMRI.\n"),
+ fmri);
+ } else if (err == SCF_ERROR_TYPE_MISMATCH && g_verbose) {
+ warn(gettext("Property group \"%s\" of entity \"%s\" "
+ "does not have dependency type.\n"), name, fmri);
+ } else if (err == SCF_ERROR_PERMISSION_DENIED && g_verbose) {
+ warn(gettext("Could not delete property group \"%s\" "
+ "of entity \"%s\" (permission denied).\n"), name,
+ fmri);
+ }
+ }
+ if (r == -1)
+ scfdie();
+
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+
+ return (0);
+}
+
+/*
+ * Returns 1 if the instance may be running, and 0 otherwise.
+ */
+static int
+inst_is_running(scf_instance_t *inst)
+{
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ char buf[MAX_SCF_STATE_STRING_SZ];
+ int ret = 0;
+ ssize_t szret;
+
+ if ((pg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ goto out;
+ }
+
+ if (pg_get_prop(pg, SCF_PROPERTY_STATE, prop) != 0 ||
+ prop_check_type(prop, SCF_TYPE_ASTRING) != 0 ||
+ prop_get_val(prop, val) != 0)
+ goto out;
+
+ szret = scf_value_get_astring(val, buf, sizeof (buf));
+ assert(szret >= 0);
+
+ ret = (strcmp(buf, SCF_STATE_STRING_ONLINE) == 0 ||
+ strcmp(buf, SCF_STATE_STRING_DEGRADED) == 0) ? 1 : 0;
+
+out:
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ return (ret);
+}
+
+static int
+lscf_instance_delete(scf_instance_t *inst, int force)
+{
+ scf_propertygroup_t *pg;
+
+ /* If we're not forcing and the instance is running, refuse. */
+ if (!force && inst_is_running(inst)) {
+ char *fmri;
+
+ fmri = safe_malloc(max_scf_fmri_len + 1);
+
+ if (scf_instance_to_fmri(inst, fmri, max_scf_fmri_len + 1) < 0)
+ scfdie();
+
+ semerr(gettext("Instance %s may be running. "
+ "Use delete -f if it is not.\n"), fmri);
+
+ free(fmri);
+ return (-1);
+ }
+
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, SCF_PG_DEPENDENTS, pg) == SCF_SUCCESS)
+ (void) delete_dependents(pg);
+ else if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_pg_destroy(pg);
+
+ if (scf_instance_delete(inst) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+
+ return (-1);
+ }
+
+ return (0);
+}
+
+static int
+lscf_service_delete(scf_service_t *svc, int force)
+{
+ int r;
+ scf_instance_t *inst;
+ scf_propertygroup_t *pg;
+ scf_iter_t *iter;
+
+ if ((inst = scf_instance_create(g_hndl)) == NULL ||
+ (pg = scf_pg_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_service_instances(iter, svc) != SCF_SUCCESS)
+ scfdie();
+
+ for (r = scf_iter_next_instance(iter, inst);
+ r == 1;
+ r = scf_iter_next_instance(iter, inst)) {
+ if (lscf_instance_delete(inst, force) == -1) {
+ scf_iter_destroy(iter);
+ scf_pg_destroy(pg);
+ scf_instance_destroy(inst);
+ return (-1);
+ }
+ }
+
+ if (r != 0)
+ scfdie();
+
+ /* Delete dependency property groups in dependent services. */
+ if (scf_service_get_pg(svc, SCF_PG_DEPENDENTS, pg) == SCF_SUCCESS)
+ (void) delete_dependents(pg);
+ else if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_iter_destroy(iter);
+ scf_pg_destroy(pg);
+ scf_instance_destroy(inst);
+
+ if (r != 0)
+ return (-1);
+
+ if (scf_service_delete(svc) == SCF_SUCCESS)
+ return (0);
+
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ return (-1);
+}
+
+static int
+delete_callback(void *data, scf_walkinfo_t *wip)
+{
+ int force = (int)data;
+
+ if (wip->inst != NULL)
+ (void) lscf_instance_delete(wip->inst, force);
+ else
+ (void) lscf_service_delete(wip->svc, force);
+
+ return (0);
+}
+
+void
+lscf_delete(const char *fmri, int force)
+{
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ int ret;
+
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ if (!snaplevel_is_instance(cur_level)) {
+ char *buf;
+
+ buf = safe_malloc(max_scf_name_len + 1);
+ if (scf_instance_get_name(cur_inst, buf,
+ max_scf_name_len + 1) >= 0) {
+ if (strcmp(buf, fmri) == 0) {
+ semerr(emsg_cant_modify_snapshots);
+ free(buf);
+ return;
+ }
+ } else if (scf_error() != SCF_ERROR_DELETED) {
+ scfdie();
+ }
+ free(buf);
+ }
+ } else if (cur_inst != NULL) {
+ /* EMPTY */;
+ } else if (cur_svc != NULL) {
+ inst = scf_instance_create(g_hndl);
+ if (inst == NULL)
+ scfdie();
+
+ if (scf_service_get_instance(cur_svc, fmri, inst) ==
+ SCF_SUCCESS) {
+ (void) lscf_instance_delete(inst, force);
+ scf_instance_destroy(inst);
+ return;
+ }
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND &&
+ scf_error() != SCF_ERROR_INVALID_ARGUMENT)
+ scfdie();
+
+ scf_instance_destroy(inst);
+ } else {
+ assert(cur_scope != NULL);
+
+ svc = scf_service_create(g_hndl);
+ if (svc == NULL)
+ scfdie();
+
+ if (scf_scope_get_service(cur_scope, fmri, svc) ==
+ SCF_SUCCESS) {
+ (void) lscf_service_delete(svc, force);
+ scf_service_destroy(svc);
+ return;
+ }
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND &&
+ scf_error() != SCF_ERROR_INVALID_ARGUMENT)
+ scfdie();
+
+ scf_service_destroy(svc);
+ }
+
+ /*
+ * Match FMRI to entity.
+ */
+ if ((ret = scf_walk_fmri(g_hndl, 1, (char **)&fmri, SCF_WALK_SERVICE,
+ delete_callback, (void *)force, NULL, semerr)) != 0) {
+ semerr(gettext("Failed to walk instances: %s\n"),
+ scf_strerror(ret));
+ }
+}
+
+
+
+/*
+ * :properties commands. These all end with "pg" or "prop" and generally
+ * operate on the currently selected entity.
+ */
+
+/*
+ * Property listing. List the property groups, properties, their types and
+ * their values for the currently selected entity.
+ */
+static void
+list_pg_info(const scf_propertygroup_t *pg, const char *name, size_t namewidth)
+{
+ char *buf;
+ uint32_t flags;
+
+ buf = safe_malloc(max_scf_pg_type_len + 1);
+
+ if (scf_pg_get_type(pg, buf, max_scf_pg_type_len + 1) < 0)
+ scfdie();
+
+ if (scf_pg_get_flags(pg, &flags) != SCF_SUCCESS)
+ scfdie();
+
+ safe_printf("%-*s %s", namewidth, name, buf);
+
+ if (flags & SCF_PG_FLAG_NONPERSISTENT)
+ safe_printf("\tNONPERSISTENT");
+
+ safe_printf("\n");
+
+ free(buf);
+}
+
+static boolean_t
+prop_has_multiple_values(const scf_property_t *prop, scf_value_t *val)
+{
+ if (scf_property_get_value(prop, val) == 0) {
+ return (B_FALSE);
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ return (B_FALSE);
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ return (B_TRUE);
+ default:
+ scfdie();
+ /*NOTREACHED*/
+ }
+ }
+}
+
+static void
+list_prop_info(const scf_property_t *prop, const char *name, size_t len)
+{
+ scf_iter_t *iter;
+ scf_value_t *val;
+ const char *type;
+ int multiple_strings = 0;
+ int ret;
+
+ if ((iter = scf_iter_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL)
+ scfdie();
+
+ type = prop_to_typestr(prop);
+ assert(type != NULL);
+
+ safe_printf("%-*s %-7s ", len, name, type);
+
+ if (prop_has_multiple_values(prop, val) &&
+ (scf_value_type(val) == SCF_TYPE_ASTRING ||
+ scf_value_type(val) == SCF_TYPE_USTRING))
+ multiple_strings = 1;
+
+ if (scf_iter_property_values(iter, prop) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_value(iter, val)) == 1) {
+ char *buf;
+ ssize_t vlen, szret;
+
+ vlen = scf_value_get_as_string(val, NULL, 0);
+ if (vlen < 0)
+ scfdie();
+
+ buf = safe_malloc(vlen + 1);
+
+ szret = scf_value_get_as_string(val, buf, vlen + 1);
+ if (szret < 0)
+ scfdie();
+ assert(szret <= vlen);
+
+ /* This is to be human-readable, so don't use CHARS_TO_QUOTE */
+ if (multiple_strings || strpbrk(buf, " \t\n\"()") != NULL) {
+ safe_printf(" \"");
+ (void) quote_and_print(buf, stdout);
+ (void) putchar('"');
+ if (ferror(stdout)) {
+ (void) putchar('\n');
+ uu_die(gettext("Error writing to stdout.\n"));
+ }
+ } else {
+ safe_printf(" %s", buf);
+ }
+
+ free(buf);
+ }
+ if (ret != 0)
+ scfdie();
+
+ if (putchar('\n') != '\n')
+ uu_die(gettext("Could not output newline"));
+}
+
+static void
+listprop(const char *pattern, int only_pgs)
+{
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_iter_t *iter, *piter;
+ char *pgnbuf, *prnbuf, *ppnbuf;
+
+ void **objects;
+ char **names;
+ int allocd, i;
+
+ int ret;
+ ssize_t pgnlen, prnlen, szret;
+ size_t max_len = 0;
+
+ if (cur_svc == NULL && cur_inst == NULL) {
+ semerr(emsg_entity_not_selected);
+ return;
+ }
+
+ if ((pg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL ||
+ (piter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ prnbuf = safe_malloc(max_scf_name_len + 1);
+
+ if (cur_level != NULL)
+ ret = scf_iter_snaplevel_pgs(iter, cur_level);
+ else if (cur_inst != NULL)
+ ret = scf_iter_instance_pgs(iter, cur_inst);
+ else
+ ret = scf_iter_service_pgs(iter, cur_svc);
+ if (ret != 0) {
+ if (scf_error() == SCF_ERROR_DELETED)
+ scfdie();
+ return;
+ }
+
+ /*
+ * We want to only list items which match pattern, and we want the
+ * second column to line up, so during the first pass we'll save
+ * matching items & their names in objects and names, computing the
+ * maximum name length as we go, and then we'll print them out.
+ *
+ * Note: We always keep an extra slot available so the array can be
+ * NULL-terminated.
+ */
+ i = 0;
+ allocd = 1;
+ objects = safe_malloc(sizeof (*objects));
+ names = safe_malloc(sizeof (*names));
+
+ while ((ret = scf_iter_next_pg(iter, pg)) == 1) {
+ int new_pg = 0;
+
+ pgnlen = scf_pg_get_name(pg, NULL, 0);
+ if (pgnlen < 0)
+ scfdie();
+
+ pgnbuf = safe_malloc(pgnlen + 1);
+
+ szret = scf_pg_get_name(pg, pgnbuf, pgnlen + 1);
+ if (szret < 0)
+ scfdie();
+ assert(szret <= pgnlen);
+
+ if (pattern == NULL ||
+ fnmatch(pattern, pgnbuf, 0) == 0) {
+ if (i+1 >= allocd) {
+ allocd *= 2;
+ objects = realloc(objects,
+ sizeof (*objects) * allocd);
+ names =
+ realloc(names, sizeof (*names) * allocd);
+ if (objects == NULL || names == NULL)
+ uu_die(gettext("Out of memory"));
+ }
+ objects[i] = pg;
+ names[i] = pgnbuf;
+ ++i;
+
+ if (pgnlen > max_len)
+ max_len = pgnlen;
+
+ new_pg = 1;
+ }
+
+ if (only_pgs) {
+ if (new_pg) {
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+ } else
+ free(pgnbuf);
+
+ continue;
+ }
+
+ if (scf_iter_pg_properties(piter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_property(piter, prop)) == 1) {
+ prnlen = scf_property_get_name(prop, prnbuf,
+ max_scf_name_len + 1);
+ if (prnlen < 0)
+ scfdie();
+
+ /* Will prepend the property group name and a slash. */
+ prnlen += pgnlen + 1;
+
+ ppnbuf = safe_malloc(prnlen + 1);
+
+ if (snprintf(ppnbuf, prnlen + 1, "%s/%s", pgnbuf,
+ prnbuf) < 0)
+ uu_die("snprintf");
+
+ if (pattern == NULL ||
+ fnmatch(pattern, ppnbuf, 0) == 0) {
+ if (i+1 >= allocd) {
+ allocd *= 2;
+ objects = realloc(objects,
+ sizeof (*objects) * allocd);
+ names = realloc(names,
+ sizeof (*names) * allocd);
+ if (objects == NULL || names == NULL)
+ uu_die(gettext("Out of "
+ "memory"));
+ }
+
+ objects[i] = prop;
+ names[i] = ppnbuf;
+ ++i;
+
+ if (prnlen > max_len)
+ max_len = prnlen;
+
+ prop = scf_property_create(g_hndl);
+ } else {
+ free(ppnbuf);
+ }
+ }
+
+ if (new_pg) {
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+ } else
+ free(pgnbuf);
+ }
+ if (ret != 0)
+ scfdie();
+
+ objects[i] = NULL;
+
+ scf_pg_destroy(pg);
+ scf_property_destroy(prop);
+
+ for (i = 0; objects[i] != NULL; ++i) {
+ if (strchr(names[i], '/') == NULL) {
+ /* property group */
+ pg = (scf_propertygroup_t *)objects[i];
+ list_pg_info(pg, names[i], max_len);
+ free(names[i]);
+ scf_pg_destroy(pg);
+ } else {
+ /* property */
+ prop = (scf_property_t *)objects[i];
+ list_prop_info(prop, names[i], max_len);
+ free(names[i]);
+ scf_property_destroy(prop);
+ }
+ }
+
+ free(names);
+ free(objects);
+}
+
+void
+lscf_listpg(const char *pattern)
+{
+ lscf_prep_hndl();
+
+ listprop(pattern, 1);
+}
+
+/*
+ * Property group and property creation, setting, and deletion. setprop (and
+ * its alias, addprop) can either create a property group of a given type, or
+ * it can create or set a property to a given type and list of values.
+ */
+void
+lscf_addpg(const char *name, const char *type, const char *flags)
+{
+ scf_propertygroup_t *pg;
+ int ret;
+ uint32_t flgs = 0;
+ const char *cp;
+
+
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ return;
+ }
+
+ if (cur_inst == NULL && cur_svc == NULL) {
+ semerr(emsg_entity_not_selected);
+ return;
+ }
+
+ if (flags != NULL) {
+ for (cp = flags; *cp != '\0'; ++cp) {
+ switch (*cp) {
+ case 'P':
+ flgs |= SCF_PG_FLAG_NONPERSISTENT;
+ break;
+
+ case 'p':
+ flgs &= ~SCF_PG_FLAG_NONPERSISTENT;
+ break;
+
+ default:
+ semerr(gettext("Invalid property group flag "
+ "%c."), *cp);
+ return;
+ }
+ }
+ }
+
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+
+ if (cur_inst != NULL)
+ ret = scf_instance_add_pg(cur_inst, name, type, flgs, pg);
+ else
+ ret = scf_service_add_pg(cur_svc, name, type, flgs, pg);
+
+ if (ret != SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("Name, type, or flags are invalid.\n"));
+ break;
+
+ case SCF_ERROR_EXISTS:
+ semerr(gettext("Property group already exists.\n"));
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ semerr(emsg_permission_denied);
+ break;
+
+ case SCF_ERROR_BACKEND_ACCESS:
+ semerr(gettext("Backend refused access.\n"));
+ break;
+
+ default:
+ scfdie();
+ }
+ }
+
+ scf_pg_destroy(pg);
+}
+
+void
+lscf_delpg(char *name)
+{
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ return;
+ }
+
+ if (cur_inst == NULL && cur_svc == NULL) {
+ semerr(emsg_entity_not_selected);
+ return;
+ }
+
+ if (strchr(name, '/') != NULL) {
+ semerr(emsg_invalid_pg_name, name);
+ return;
+ }
+
+ lscf_delprop(name);
+}
+
+void
+lscf_listprop(const char *pattern)
+{
+ lscf_prep_hndl();
+
+ listprop(pattern, 0);
+}
+
+int
+lscf_setprop(const char *pgname, const char *type, const char *value,
+ const uu_list_t *values)
+{
+ scf_type_t ty;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ int ret, result = 0;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e;
+ scf_value_t *v;
+ uu_list_walk_t *walk;
+ string_list_t *sp;
+ char *propname;
+ int req_quotes = 0;
+
+ lscf_prep_hndl();
+
+ if ((e = scf_entry_create(g_hndl)) == NULL ||
+ (pg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (tx = scf_transaction_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ goto fail;
+ }
+
+ if (cur_inst == NULL && cur_svc == NULL) {
+ semerr(emsg_entity_not_selected);
+ goto fail;
+ }
+
+ propname = strchr(pgname, '/');
+ if (propname == NULL) {
+ semerr(gettext("Property names must contain a `/'.\n"));
+ goto fail;
+ }
+
+ *propname = '\0';
+ ++propname;
+
+ if (type != NULL) {
+ ty = string_to_type(type);
+ if (ty == SCF_TYPE_INVALID) {
+ semerr(gettext("Unknown type \"%s\".\n"), type);
+ goto fail;
+ }
+ }
+
+ if (cur_inst != NULL)
+ ret = scf_instance_get_pg(cur_inst, pgname, pg);
+ else
+ ret = scf_service_get_pg(cur_svc, pgname, pg);
+ if (ret != SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ semerr(emsg_no_such_pg, pgname);
+ goto fail;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(emsg_invalid_pg_name, pgname);
+ goto fail;
+
+ default:
+ scfdie();
+ break;
+ }
+ }
+
+ do {
+ if (scf_pg_update(pg) == -1)
+ scfdie();
+ if (scf_transaction_start(tx, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ goto fail;
+ }
+
+ ret = scf_pg_get_property(pg, propname, prop);
+ if (ret == SCF_SUCCESS) {
+ scf_type_t current_ty;
+
+ if (scf_property_type(prop, &current_ty) != SCF_SUCCESS)
+ scfdie();
+
+ if (type == NULL)
+ ty = current_ty;
+ if (scf_transaction_property_change_type(tx, e,
+ propname, ty) == -1)
+ scfdie();
+
+ } else if (scf_error() == SCF_ERROR_NOT_FOUND) {
+ if (type == NULL) {
+ semerr(
+ gettext("Type required for new properties.\n"));
+ goto fail;
+ }
+ if (scf_transaction_property_new(tx, e, propname,
+ ty) == -1)
+ scfdie();
+ } else if (scf_error() == SCF_ERROR_INVALID_ARGUMENT) {
+ semerr(emsg_invalid_prop_name, propname);
+ goto fail;
+ } else {
+ scfdie();
+ }
+
+ if (ty == SCF_TYPE_ASTRING || ty == SCF_TYPE_USTRING)
+ req_quotes = 1;
+
+ if (value != NULL) {
+ v = string_to_value(value, ty, 0);
+
+ if (v == NULL)
+ goto fail;
+
+ ret = scf_entry_add_value(e, v);
+ assert(ret == SCF_SUCCESS);
+ } else {
+ assert(values != NULL);
+
+ walk = uu_list_walk_start((uu_list_t *)values,
+ UU_WALK_REVERSE);
+ if (walk == NULL)
+ uu_die(gettext("Could not walk list"));
+
+ for (sp = uu_list_walk_next(walk); sp != NULL;
+ sp = uu_list_walk_next(walk)) {
+ v = string_to_value(sp->str, ty, req_quotes);
+
+ if (v == NULL) {
+ scf_entry_destroy_children(e);
+ goto fail;
+ }
+
+ ret = scf_entry_add_value(e, v);
+ assert(ret == SCF_SUCCESS);
+ }
+ uu_list_walk_end(walk);
+ }
+ result = scf_transaction_commit(tx);
+
+ scf_transaction_reset(tx);
+ scf_entry_destroy_children(e);
+ } while (result == 0);
+
+ if (result < 0) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ goto fail;
+ }
+
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e);
+ scf_pg_destroy(pg);
+ scf_property_destroy(prop);
+
+ return (0);
+
+fail:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e);
+ scf_pg_destroy(pg);
+ scf_property_destroy(prop);
+
+ return (-1);
+}
+
+void
+lscf_delprop(char *pgn)
+{
+ char *slash, *pn;
+ scf_propertygroup_t *pg;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e;
+ int ret;
+
+
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ return;
+ }
+
+ if (cur_inst == NULL && cur_svc == NULL) {
+ semerr(emsg_entity_not_selected);
+ return;
+ }
+
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+
+ slash = strchr(pgn, '/');
+ if (slash == NULL) {
+ pn = NULL;
+ } else {
+ *slash = '\0';
+ pn = slash + 1;
+ }
+
+ if (cur_inst != NULL)
+ ret = scf_instance_get_pg(cur_inst, pgn, pg);
+ else
+ ret = scf_service_get_pg(cur_svc, pgn, pg);
+ if (ret != SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ semerr(emsg_no_such_pg, pgn);
+ break;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(emsg_invalid_pg_name, pgn);
+ break;
+
+ default:
+ scfdie();
+ }
+
+ scf_pg_destroy(pg);
+
+ return;
+ }
+
+ if (pn == NULL) {
+ /* Try to delete the property group. */
+ if (scf_pg_delete(pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ }
+
+ scf_pg_destroy(pg);
+ return;
+ }
+
+ e = scf_entry_create(g_hndl);
+ tx = scf_transaction_create(g_hndl);
+
+ do {
+ if (scf_pg_update(pg) == -1)
+ scfdie();
+ if (scf_transaction_start(tx, pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ break;
+ }
+
+ if (scf_transaction_property_delete(tx, e, pn) != SCF_SUCCESS) {
+ if (scf_error() == SCF_ERROR_NOT_FOUND) {
+ semerr(gettext("No such property %s/%s.\n"),
+ pgn, pn);
+ break;
+ } else if (scf_error() == SCF_ERROR_INVALID_ARGUMENT) {
+ semerr(emsg_invalid_prop_name, pn);
+ break;
+ } else {
+ scfdie();
+ }
+ }
+
+ ret = scf_transaction_commit(tx);
+
+ if (ret == 0)
+ scf_transaction_reset(tx);
+ } while (ret == 0);
+
+ if (ret < 0) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ }
+
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e);
+ scf_pg_destroy(pg);
+}
+
+/*
+ * Property editing.
+ */
+
+static int
+write_edit_script(FILE *strm)
+{
+ char *fmribuf;
+ ssize_t fmrilen;
+
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ scf_type_t ty;
+ int ret, result = 0;
+ scf_iter_t *iter, *piter, *viter;
+ char *buf, *tybuf, *pname;
+ const char *emsg_write_error;
+
+
+ emsg_write_error = gettext("Error writing temoprary file: %s.\n");
+
+
+ /* select fmri */
+ if (cur_inst != NULL) {
+ fmrilen = scf_instance_to_fmri(cur_inst, NULL, 0);
+ if (fmrilen < 0)
+ scfdie();
+ fmribuf = safe_malloc(fmrilen + 1);
+ if (scf_instance_to_fmri(cur_inst, fmribuf, fmrilen + 1) < 0)
+ scfdie();
+ } else {
+ assert(cur_svc != NULL);
+ fmrilen = scf_service_to_fmri(cur_svc, NULL, 0);
+ if (fmrilen < 0)
+ scfdie();
+ fmribuf = safe_malloc(fmrilen + 1);
+ if (scf_service_to_fmri(cur_svc, fmribuf, fmrilen + 1) < 0)
+ scfdie();
+ }
+
+ if (fprintf(strm, "select %s\n\n", fmribuf) < 0) {
+ warn(emsg_write_error, strerror(errno));
+ free(fmribuf);
+ return (-1);
+ }
+
+ free(fmribuf);
+
+
+ if ((pg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL ||
+ (piter = scf_iter_create(g_hndl)) == NULL ||
+ (viter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ buf = safe_malloc(max_scf_name_len + 1);
+ tybuf = safe_malloc(max_scf_pg_type_len + 1);
+ pname = safe_malloc(max_scf_name_len + 1);
+
+ if (cur_inst != NULL)
+ ret = scf_iter_instance_pgs(iter, cur_inst);
+ else
+ ret = scf_iter_service_pgs(iter, cur_svc);
+ if (ret != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_pg(iter, pg)) == 1) {
+ int ret2;
+
+ /*
+ * # delprop pg
+ * # addpg pg type
+ */
+ if (scf_pg_get_name(pg, buf, max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (scf_pg_get_type(pg, tybuf, max_scf_pg_type_len + 1) < 0)
+ scfdie();
+
+ if (fprintf(strm, "# Property group \"%s\"\n"
+ "# delprop %s\n"
+ "# addpg %s %s\n", buf, buf, buf, tybuf) < 0) {
+ warn(emsg_write_error, strerror(errno));
+ result = -1;
+ goto out;
+ }
+
+ /* # setprop pg/prop = (values) */
+
+ if (scf_iter_pg_properties(piter, pg) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret2 = scf_iter_next_property(piter, prop)) == 1) {
+ int first = 1;
+ int ret3;
+ int multiple_strings = 0;
+
+ if (scf_property_get_name(prop, pname,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (scf_property_type(prop, &ty) != 0)
+ scfdie();
+
+ if (fprintf(strm, "# setprop %s/%s = %s: (", buf,
+ pname, scf_type_to_string(ty)) < 0) {
+ warn(emsg_write_error, strerror(errno));
+ result = -1;
+ goto out;
+ }
+
+ if (prop_has_multiple_values(prop, val) &&
+ (ty == SCF_TYPE_ASTRING || ty == SCF_TYPE_USTRING))
+ multiple_strings = 1;
+
+ if (scf_iter_property_values(viter, prop) !=
+ SCF_SUCCESS)
+ scfdie();
+
+ while ((ret3 = scf_iter_next_value(viter, val)) == 1) {
+ char *buf;
+ ssize_t buflen;
+
+ buflen = scf_value_get_as_string(val, NULL, 0);
+ if (buflen < 0)
+ scfdie();
+
+ buf = safe_malloc(buflen + 1);
+
+ if (scf_value_get_as_string(val, buf,
+ buflen + 1) < 0)
+ scfdie();
+
+ if (first)
+ first = 0;
+ else {
+ if (putc(' ', strm) != ' ') {
+ warn(emsg_write_error,
+ strerror(errno));
+ result = -1;
+ goto out;
+ }
+ }
+
+ if (multiple_strings ||
+ strpbrk(buf, CHARS_TO_QUOTE) != NULL) {
+ (void) putc('"', strm);
+ (void) quote_and_print(buf, strm);
+ (void) putc('"', strm);
+
+ if (ferror(strm)) {
+ warn(emsg_write_error,
+ strerror(errno));
+ result = -1;
+ goto out;
+ }
+ } else {
+ if (fprintf(strm, "%s", buf) < 0) {
+ warn(emsg_write_error,
+ strerror(errno));
+ result = -1;
+ goto out;
+ }
+ }
+
+ free(buf);
+ }
+ if (ret3 < 0)
+ scfdie();
+
+ if (fputs(")\n", strm) < 0) {
+ warn(emsg_write_error, strerror(errno));
+ result = -1;
+ goto out;
+ }
+ }
+ if (ret2 < 0)
+ scfdie();
+
+ if (fputc('\n', strm) == EOF) {
+ warn(emsg_write_error, strerror(errno));
+ result = -1;
+ goto out;
+ }
+ }
+ if (ret < 0)
+ scfdie();
+
+out:
+ free(pname);
+ free(tybuf);
+ free(buf);
+ scf_iter_destroy(viter);
+ scf_iter_destroy(piter);
+ scf_iter_destroy(iter);
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+
+ if (result == 0) {
+ if (fflush(strm) != 0) {
+ warn(emsg_write_error, strerror(errno));
+ return (-1);
+ }
+ }
+
+ return (result);
+}
+
+int
+lscf_editprop()
+{
+ char *buf, *editor;
+ size_t bufsz;
+ int tmpfd;
+ char tempname[] = TEMP_FILE_PATTERN;
+
+ lscf_prep_hndl();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ return (-1);
+ }
+
+ if (cur_svc == NULL && cur_inst == NULL) {
+ semerr(emsg_entity_not_selected);
+ return (-1);
+ }
+
+ tmpfd = mkstemp(tempname);
+ if (tmpfd == -1) {
+ semerr(gettext("Could not create temporary file.\n"));
+ return (-1);
+ }
+
+ (void) strcpy(tempfilename, tempname);
+
+ tempfile = fdopen(tmpfd, "r+");
+ if (tempfile == NULL) {
+ warn(gettext("Could not create temporary file.\n"));
+ if (close(tmpfd) == -1)
+ warn(gettext("Could not close temporary file: %s.\n"),
+ strerror(errno));
+
+ remove_tempfile();
+
+ return (-1);
+ }
+
+ if (write_edit_script(tempfile) == -1) {
+ remove_tempfile();
+ return (-1);
+ }
+
+ editor = getenv("EDITOR");
+ if (editor == NULL)
+ editor = "vi";
+
+ bufsz = strlen(editor) + 1 + strlen(tempname) + 1;
+ buf = safe_malloc(bufsz);
+
+ if (snprintf(buf, bufsz, "%s %s", editor, tempname) < 0)
+ uu_die(gettext("Error creating editor command"));
+
+ if (system(buf) == -1) {
+ semerr(gettext("Could not launch editor %s: %s\n"), editor,
+ strerror(errno));
+ free(buf);
+ remove_tempfile();
+ return (-1);
+ }
+
+ free(buf);
+
+ (void) engine_source(tempname, est->sc_cmd_flags & SC_CMD_IACTIVE);
+
+ remove_tempfile();
+
+ return (0);
+}
+
+static void
+add_string(uu_list_t *strlist, const char *str)
+{
+ string_list_t *elem;
+ elem = safe_malloc(sizeof (*elem));
+ uu_list_node_init(elem, &elem->node, string_pool);
+ elem->str = safe_strdup(str);
+ if (uu_list_prepend(strlist, elem) != 0)
+ uu_die(gettext("libuutil error: %s\n"),
+ uu_strerror(uu_error()));
+}
+
+/*
+ * Get all property values that don't match the given glob pattern,
+ * if a pattern is specified.
+ */
+static void
+get_prop_values(scf_property_t *prop, uu_list_t *values,
+ const char *pattern)
+{
+ scf_iter_t *iter;
+ scf_value_t *val;
+ int ret;
+
+ if ((iter = scf_iter_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_property_values(iter, prop) != 0)
+ scfdie();
+
+ while ((ret = scf_iter_next_value(iter, val)) == 1) {
+ char *buf;
+ ssize_t vlen, szret;
+
+ vlen = scf_value_get_as_string(val, NULL, 0);
+ if (vlen < 0)
+ scfdie();
+
+ buf = safe_malloc(vlen + 1);
+
+ szret = scf_value_get_as_string(val, buf, vlen + 1);
+ if (szret < 0)
+ scfdie();
+ assert(szret <= vlen);
+
+ if (pattern == NULL || fnmatch(pattern, buf, 0) != 0)
+ add_string(values, buf);
+
+ free(buf);
+ }
+
+ if (ret == -1)
+ scfdie();
+
+ scf_value_destroy(val);
+ scf_iter_destroy(iter);
+}
+
+static int
+lscf_setpropvalue(const char *pgname, const char *type,
+ const char *arg, int isadd, int isnotfoundok)
+{
+ scf_type_t ty;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ int ret, result = 0;
+ scf_transaction_t *tx;
+ scf_transaction_entry_t *e;
+ scf_value_t *v;
+ string_list_t *sp;
+ char *propname;
+ uu_list_t *values;
+ uu_list_walk_t *walk;
+ void *cookie = NULL;
+ char *pattern = NULL;
+
+ lscf_prep_hndl();
+
+ if ((values = uu_list_create(string_pool, NULL, 0)) == NULL)
+ uu_die(gettext("Could not create property list: %s\n"),
+ uu_strerror(uu_error()));
+
+ if (!isadd)
+ pattern = safe_strdup(arg);
+
+ if ((e = scf_entry_create(g_hndl)) == NULL ||
+ (pg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (tx = scf_transaction_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ goto fail;
+ }
+
+ if (cur_inst == NULL && cur_svc == NULL) {
+ semerr(emsg_entity_not_selected);
+ goto fail;
+ }
+
+ propname = strchr(pgname, '/');
+ if (propname == NULL) {
+ semerr(gettext("Property names must contain a `/'.\n"));
+ goto fail;
+ }
+
+ *propname = '\0';
+ ++propname;
+
+ if (type != NULL) {
+ ty = string_to_type(type);
+ if (ty == SCF_TYPE_INVALID) {
+ semerr(gettext("Unknown type \"%s\".\n"), type);
+ goto fail;
+ }
+ }
+
+ if (cur_inst != NULL)
+ ret = scf_instance_get_pg(cur_inst, pgname, pg);
+ else
+ ret = scf_service_get_pg(cur_svc, pgname, pg);
+ if (ret != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ if (isnotfoundok) {
+ result = 0;
+ } else {
+ semerr(emsg_no_such_pg, pgname);
+ result = -1;
+ }
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(emsg_invalid_pg_name, pgname);
+ goto fail;
+
+ default:
+ scfdie();
+ }
+ }
+
+ do {
+ if (scf_pg_update(pg) == -1)
+ scfdie();
+ if (scf_transaction_start(tx, pg) != 0) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ goto fail;
+ }
+
+ ret = scf_pg_get_property(pg, propname, prop);
+ if (ret == 0) {
+ scf_type_t ptype;
+ char *pat = pattern;
+
+ if (scf_property_type(prop, &ptype) != 0)
+ scfdie();
+
+ if (isadd) {
+ if (type != NULL && ptype != ty) {
+ semerr(gettext("Property \"%s\" is not "
+ "of type \"%s\".\n"), propname,
+ type);
+ goto fail;
+ }
+
+ pat = NULL;
+ } else {
+ size_t len = strlen(pat);
+ if (len > 0 && pat[len - 1] == '\"')
+ pat[len - 1] = '\0';
+ if (len > 0 && pat[0] == '\"')
+ pat++;
+ }
+
+ ty = ptype;
+
+ get_prop_values(prop, values, pat);
+
+ if (isadd)
+ add_string(values, arg);
+
+ if (scf_transaction_property_change(tx, e,
+ propname, ty) == -1)
+ scfdie();
+ } else if (scf_error() == SCF_ERROR_NOT_FOUND) {
+ if (isadd) {
+ if (type == NULL) {
+ semerr(gettext("Type required "
+ "for new properties.\n"));
+ goto fail;
+ }
+
+ add_string(values, arg);
+
+ if (scf_transaction_property_new(tx, e,
+ propname, ty) == -1)
+ scfdie();
+ } else if (isnotfoundok) {
+ result = 0;
+ goto out;
+ } else {
+ semerr(gettext("No such property %s/%s.\n"),
+ pgname, propname);
+ result = -1;
+ goto out;
+ }
+ } else if (scf_error() == SCF_ERROR_INVALID_ARGUMENT) {
+ semerr(emsg_invalid_prop_name, propname);
+ goto fail;
+ } else {
+ scfdie();
+ }
+
+ walk = uu_list_walk_start(values, UU_DEFAULT);
+ if (walk == NULL)
+ uu_die(gettext("Could not walk property list.\n"));
+
+ for (sp = uu_list_walk_next(walk); sp != NULL;
+ sp = uu_list_walk_next(walk)) {
+ v = string_to_value(sp->str, ty, 0);
+
+ if (v == NULL) {
+ scf_entry_destroy_children(e);
+ goto fail;
+ }
+ ret = scf_entry_add_value(e, v);
+ assert(ret == 0);
+ }
+ uu_list_walk_end(walk);
+
+ result = scf_transaction_commit(tx);
+
+ scf_transaction_reset(tx);
+ scf_entry_destroy_children(e);
+ } while (result == 0);
+
+ if (result < 0) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ goto fail;
+ }
+
+ result = 0;
+
+out:
+ scf_transaction_destroy(tx);
+ scf_entry_destroy(e);
+ scf_pg_destroy(pg);
+ scf_property_destroy(prop);
+ free(pattern);
+
+ while ((sp = uu_list_teardown(values, &cookie)) != NULL) {
+ free(sp->str);
+ free(sp);
+ }
+
+ uu_list_destroy(values);
+
+ return (result);
+
+fail:
+ result = -1;
+ goto out;
+}
+
+int
+lscf_addpropvalue(const char *pgname, const char *type, const char *value)
+{
+ return (lscf_setpropvalue(pgname, type, value, 1, 0));
+}
+
+int
+lscf_delpropvalue(const char *pgname, const char *pattern, int isnotfoundok)
+{
+ return (lscf_setpropvalue(pgname, NULL, pattern, 0, isnotfoundok));
+}
+
+/*
+ * Look for a standard start method, first in the instance (if any),
+ * then the service.
+ */
+static const char *
+start_method_name(int *in_instance)
+{
+ scf_propertygroup_t *pg;
+ char **p;
+ int ret;
+ scf_instance_t *inst = cur_inst;
+
+ if ((pg = scf_pg_create(g_hndl)) == NULL)
+ scfdie();
+
+again:
+ for (p = start_method_names; *p != NULL; p++) {
+ if (inst != NULL)
+ ret = scf_instance_get_pg(inst, *p, pg);
+ else
+ ret = scf_service_get_pg(cur_svc, *p, pg);
+
+ if (ret == 0) {
+ size_t bufsz = strlen(SCF_GROUP_METHOD) + 1;
+ char *buf = safe_malloc(bufsz);
+
+ if ((ret = scf_pg_get_type(pg, buf, bufsz)) < 0) {
+ free(buf);
+ continue;
+ }
+ if (strcmp(buf, SCF_GROUP_METHOD) != 0) {
+ free(buf);
+ continue;
+ }
+
+ free(buf);
+ *in_instance = (inst != NULL);
+ scf_pg_destroy(pg);
+ return (*p);
+ }
+
+ if (scf_error() == SCF_ERROR_NOT_FOUND)
+ continue;
+
+ scfdie();
+ }
+
+ if (inst != NULL) {
+ inst = NULL;
+ goto again;
+ }
+
+ scf_pg_destroy(pg);
+ return (NULL);
+}
+
+static int
+addpg(const char *name, const char *type)
+{
+ scf_propertygroup_t *pg;
+ int ret;
+
+ pg = scf_pg_create(g_hndl);
+ if (pg == NULL)
+ scfdie();
+
+ if (cur_inst != NULL)
+ ret = scf_instance_add_pg(cur_inst, name, type, 0, pg);
+ else
+ ret = scf_service_add_pg(cur_svc, name, type, 0, pg);
+
+ if (ret != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_EXISTS:
+ ret = 0;
+ break;
+
+ case SCF_ERROR_PERMISSION_DENIED:
+ semerr(emsg_permission_denied);
+ break;
+
+ default:
+ scfdie();
+ }
+ }
+
+ scf_pg_destroy(pg);
+ return (ret);
+}
+
+int
+lscf_setenv(uu_list_t *args, int isunset)
+{
+ int ret = 0;
+ size_t i;
+ int argc;
+ char **argv = NULL;
+ string_list_t *slp;
+ char *pattern;
+ char *prop;
+ int do_service = 0;
+ int do_instance = 0;
+ const char *method = NULL;
+ const char *name = NULL;
+ const char *value = NULL;
+ scf_instance_t *saved_cur_inst = cur_inst;
+
+ lscf_prep_hndl();
+
+ argc = uu_list_numnodes(args);
+ if (argc < 1)
+ goto usage;
+
+ argv = calloc(argc + 1, sizeof (char *));
+ if (argv == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ for (slp = uu_list_first(args), i = 0;
+ slp != NULL;
+ slp = uu_list_next(args, slp), ++i)
+ argv[i] = slp->str;
+
+ argv[i] = NULL;
+
+ opterr = 0;
+ optind = 0;
+ for (;;) {
+ ret = getopt(argc, argv, "sim:");
+ if (ret == -1)
+ break;
+
+ switch (ret) {
+ case 's':
+ do_service = 1;
+ cur_inst = NULL;
+ break;
+
+ case 'i':
+ do_instance = 1;
+ break;
+
+ case 'm':
+ method = optarg;
+ break;
+
+ case '?':
+ goto usage;
+
+ default:
+ bad_error("getopt", ret);
+ }
+ }
+
+ argc -= optind;
+ if ((do_service && do_instance) ||
+ (isunset && argc != 1) ||
+ (!isunset && argc != 2))
+ goto usage;
+
+ name = argv[optind];
+ if (!isunset)
+ value = argv[optind + 1];
+
+ if (cur_snap != NULL) {
+ semerr(emsg_cant_modify_snapshots);
+ ret = -1;
+ goto out;
+ }
+
+ if (cur_inst == NULL && cur_svc == NULL) {
+ semerr(emsg_entity_not_selected);
+ ret = -1;
+ goto out;
+ }
+
+ if (do_instance && cur_inst == NULL) {
+ semerr(gettext("No instance is selected.\n"));
+ ret = -1;
+ goto out;
+ }
+
+ if (do_service && cur_svc == NULL) {
+ semerr(gettext("No service is selected.\n"));
+ ret = -1;
+ goto out;
+ }
+
+ if (method == NULL) {
+ if (do_instance || do_service) {
+ method = "method_context";
+ if (!isunset) {
+ ret = addpg("method_context",
+ SCF_GROUP_FRAMEWORK);
+ if (ret != 0)
+ goto out;
+ }
+ } else {
+ int in_instance;
+ method = start_method_name(&in_instance);
+ if (method == NULL) {
+ semerr(gettext(
+ "Couldn't find start method; please "
+ "specify a method with '-m'.\n"));
+ ret = -1;
+ goto out;
+ }
+ if (!in_instance)
+ cur_inst = NULL;
+ }
+ } else {
+ scf_propertygroup_t *pg;
+ size_t bufsz;
+ char *buf;
+ int ret;
+
+ if ((pg = scf_pg_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (cur_inst != NULL)
+ ret = scf_instance_get_pg(cur_inst, method, pg);
+ else
+ ret = scf_service_get_pg(cur_svc, method, pg);
+
+ if (ret != 0) {
+ scf_pg_destroy(pg);
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ semerr(gettext("Couldn't find the method "
+ "\"%s\".\n"), method);
+ goto out;
+
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("Invalid method name \"%s\".\n"),
+ method);
+ goto out;
+
+ default:
+ scfdie();
+ }
+ }
+
+ bufsz = strlen(SCF_GROUP_METHOD) + 1;
+ buf = safe_malloc(bufsz);
+
+ if (scf_pg_get_type(pg, buf, bufsz) < 0 ||
+ strcmp(buf, SCF_GROUP_METHOD) != 0) {
+ semerr(gettext("Property group \"%s\" is not of type "
+ "\"method\".\n"), method);
+ ret = -1;
+ free(buf);
+ scf_pg_destroy(pg);
+ goto out;
+ }
+
+ free(buf);
+ scf_pg_destroy(pg);
+ }
+
+ prop = uu_msprintf("%s/environment", method);
+ pattern = uu_msprintf("%s=*", name);
+
+ if (prop == NULL || pattern == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ ret = lscf_delpropvalue(prop, pattern, !isunset);
+
+ if (ret == 0 && !isunset) {
+ uu_free(pattern);
+ uu_free(prop);
+ prop = uu_msprintf("%s/environment", method);
+ pattern = uu_msprintf("%s=%s", name, value);
+ if (prop == NULL || pattern == NULL)
+ uu_die(gettext("Out of memory.\n"));
+ ret = lscf_addpropvalue(prop, "astring:", pattern);
+ }
+ uu_free(pattern);
+ uu_free(prop);
+
+out:
+ cur_inst = saved_cur_inst;
+
+ free(argv);
+ return (ret);
+usage:
+ ret = -2;
+ goto out;
+}
+
+/*
+ * Snapshot commands
+ */
+
+void
+lscf_listsnap()
+{
+ scf_snapshot_t *snap;
+ scf_iter_t *iter;
+ char *nb;
+ int r;
+
+ lscf_prep_hndl();
+
+ if (cur_inst == NULL) {
+ semerr(gettext("Instance not selected.\n"));
+ return;
+ }
+
+ if ((snap = scf_snapshot_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_instance_snapshots(iter, cur_inst) != SCF_SUCCESS)
+ scfdie();
+
+ nb = safe_malloc(max_scf_name_len + 1);
+
+ while ((r = scf_iter_next_snapshot(iter, snap)) == 1) {
+ if (scf_snapshot_get_name(snap, nb, max_scf_name_len + 1) < 0)
+ scfdie();
+
+ (void) puts(nb);
+ }
+ if (r < 0)
+ scfdie();
+
+ free(nb);
+ scf_iter_destroy(iter);
+ scf_snapshot_destroy(snap);
+}
+
+void
+lscf_selectsnap(const char *name)
+{
+ scf_snapshot_t *snap;
+ scf_snaplevel_t *level;
+
+ lscf_prep_hndl();
+
+ if (cur_inst == NULL) {
+ semerr(gettext("Instance not selected.\n"));
+ return;
+ }
+
+ if (cur_snap != NULL) {
+ if (name != NULL) {
+ char *cur_snap_name;
+ boolean_t nochange;
+
+ cur_snap_name = safe_malloc(max_scf_name_len + 1);
+
+ if (scf_snapshot_get_name(cur_snap, cur_snap_name,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ nochange = strcmp(name, cur_snap_name) == 0;
+
+ free(cur_snap_name);
+
+ if (nochange)
+ return;
+ }
+
+ unselect_cursnap();
+ }
+
+ if (name == NULL)
+ return;
+
+ if ((snap = scf_snapshot_create(g_hndl)) == NULL ||
+ (level = scf_snaplevel_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_snapshot(cur_inst, name, snap) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("Invalid name \"%s\".\n"), name);
+ break;
+
+ case SCF_ERROR_NOT_FOUND:
+ semerr(gettext("No such snapshot \"%s\".\n"), name);
+ break;
+
+ default:
+ scfdie();
+ }
+
+ scf_snaplevel_destroy(level);
+ scf_snapshot_destroy(snap);
+ return;
+ }
+
+ /* Load the snaplevels into our list. */
+ cur_levels = uu_list_create(snaplevel_pool, NULL, 0);
+ if (cur_levels == NULL)
+ uu_die(gettext("Could not create list: %s\n"),
+ uu_strerror(uu_error()));
+
+ if (scf_snapshot_get_base_snaplevel(snap, level) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ semerr(gettext("Snapshot has no snaplevels.\n"));
+
+ scf_snaplevel_destroy(level);
+ scf_snapshot_destroy(snap);
+ return;
+ }
+
+ cur_snap = snap;
+
+ for (;;) {
+ cur_elt = safe_malloc(sizeof (*cur_elt));
+ uu_list_node_init(cur_elt, &cur_elt->list_node,
+ snaplevel_pool);
+ cur_elt->sl = level;
+ if (uu_list_insert_after(cur_levels, NULL, cur_elt) != 0)
+ uu_die(gettext("libuutil error: %s\n"),
+ uu_strerror(uu_error()));
+
+ level = scf_snaplevel_create(g_hndl);
+ if (level == NULL)
+ scfdie();
+
+ if (scf_snaplevel_get_next_snaplevel(cur_elt->sl,
+ level) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_snaplevel_destroy(level);
+ break;
+ }
+ }
+
+ cur_elt = uu_list_last(cur_levels);
+ cur_level = cur_elt->sl;
+}
+
+/*
+ * Copies the properties & values in src to dst. Assumes src won't change.
+ * Returns -1 if permission is denied, -2 if another transaction interrupts,
+ * and 0 on success.
+ *
+ * If enabled is 0 or 1, its value is used for the SCF_PROPERTY_ENABLED
+ * property, if it is copied and has type boolean. (See comment in
+ * lscf_revert()).
+ */
+static int
+pg_copy(const scf_propertygroup_t *src, scf_propertygroup_t *dst,
+ uint8_t enabled)
+{
+ scf_transaction_t *tx;
+ scf_iter_t *iter, *viter;
+ scf_property_t *prop;
+ scf_value_t *v;
+ char *nbuf;
+ int r;
+
+ tx = scf_transaction_create(g_hndl);
+ if (tx == NULL)
+ scfdie();
+
+ if (scf_transaction_start(tx, dst) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ scf_transaction_destroy(tx);
+
+ return (-1);
+ }
+
+ if ((iter = scf_iter_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (viter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ nbuf = safe_malloc(max_scf_name_len + 1);
+
+ if (scf_iter_pg_properties(iter, src) != SCF_SUCCESS)
+ scfdie();
+
+ for (;;) {
+ scf_transaction_entry_t *e;
+ scf_type_t ty;
+
+ r = scf_iter_next_property(iter, prop);
+ if (r == -1)
+ scfdie();
+ if (r == 0)
+ break;
+
+ e = scf_entry_create(g_hndl);
+ if (e == NULL)
+ scfdie();
+
+ if (scf_property_type(prop, &ty) != SCF_SUCCESS)
+ scfdie();
+
+ if (scf_property_get_name(prop, nbuf, max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (scf_transaction_property_new(tx, e, nbuf,
+ ty) != SCF_SUCCESS)
+ scfdie();
+
+ if ((enabled == 0 || enabled == 1) &&
+ strcmp(nbuf, scf_property_enabled) == 0 &&
+ ty == SCF_TYPE_BOOLEAN) {
+ v = scf_value_create(g_hndl);
+ if (v == NULL)
+ scfdie();
+
+ scf_value_set_boolean(v, enabled);
+
+ if (scf_entry_add_value(e, v) != 0)
+ scfdie();
+ } else {
+ if (scf_iter_property_values(viter, prop) != 0)
+ scfdie();
+
+ for (;;) {
+ v = scf_value_create(g_hndl);
+ if (v == NULL)
+ scfdie();
+
+ r = scf_iter_next_value(viter, v);
+ if (r == -1)
+ scfdie();
+ if (r == 0) {
+ scf_value_destroy(v);
+ break;
+ }
+
+ if (scf_entry_add_value(e, v) != SCF_SUCCESS)
+ scfdie();
+ }
+ }
+ }
+
+ free(nbuf);
+ scf_iter_destroy(viter);
+ scf_property_destroy(prop);
+ scf_iter_destroy(iter);
+
+ r = scf_transaction_commit(tx);
+ if (r == -1 && scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ scf_transaction_destroy_children(tx);
+ scf_transaction_destroy(tx);
+
+ switch (r) {
+ case 1: return (0);
+ case 0: return (-2);
+ case -1: return (-1);
+
+ default:
+ abort();
+ }
+
+ /* NOTREACHED */
+}
+
+void
+lscf_revert(const char *snapname)
+{
+ scf_snapshot_t *snap, *prev;
+ scf_snaplevel_t *level, *nlevel;
+ scf_iter_t *iter;
+ scf_propertygroup_t *pg, *npg;
+ scf_property_t *prop;
+ scf_value_t *val;
+ char *nbuf, *tbuf;
+ uint8_t enabled;
+
+ lscf_prep_hndl();
+
+ if (cur_inst == NULL) {
+ semerr(gettext("Instance not selected.\n"));
+ return;
+ }
+
+ if (snapname != NULL) {
+ snap = scf_snapshot_create(g_hndl);
+ if (snap == NULL)
+ scfdie();
+
+ if (scf_instance_get_snapshot(cur_inst, snapname, snap) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ semerr(gettext("Invalid snapshot name "
+ "\"%s\".\n"), snapname);
+ break;
+
+ case SCF_ERROR_NOT_FOUND:
+ semerr(gettext("No such snapshot.\n"));
+ break;
+
+ default:
+ scfdie();
+ }
+
+ scf_snapshot_destroy(snap);
+ return;
+ }
+ } else {
+ if (cur_snap != NULL) {
+ snap = cur_snap;
+ } else {
+ semerr(gettext("No snapshot selected.\n"));
+ return;
+ }
+ }
+
+ if ((prev = scf_snapshot_create(g_hndl)) == NULL ||
+ (level = scf_snaplevel_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL ||
+ (pg = scf_pg_create(g_hndl)) == NULL ||
+ (npg = scf_pg_create(g_hndl)) == NULL ||
+ (prop = scf_property_create(g_hndl)) == NULL ||
+ (val = scf_value_create(g_hndl)) == NULL)
+ scfdie();
+
+ nbuf = safe_malloc(max_scf_name_len + 1);
+ tbuf = safe_malloc(max_scf_pg_type_len + 1);
+
+ /* Take the "previous" snapshot before we blow away the properties. */
+ if (scf_instance_get_snapshot(cur_inst, snap_previous, prev) == 0) {
+ if (_scf_snapshot_take_attach(cur_inst, prev) != 0)
+ scfdie();
+ } else {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (_scf_snapshot_take_new(cur_inst, snap_previous, prev) != 0)
+ scfdie();
+ }
+
+ /* Save general/enabled, since we're probably going to replace it. */
+ enabled = 2;
+ if (scf_instance_get_pg(cur_inst, scf_pg_general, pg) == 0 &&
+ scf_pg_get_property(pg, scf_property_enabled, prop) == 0 &&
+ scf_property_get_value(prop, val) == 0)
+ (void) scf_value_get_boolean(val, &enabled);
+
+ if (scf_snapshot_get_base_snaplevel(snap, level) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ goto out;
+ }
+
+ for (;;) {
+ boolean_t isinst;
+ uint32_t flags;
+ int r;
+
+ /* Clear the properties from the corresponding entity. */
+ isinst = snaplevel_is_instance(level);
+
+ if (!isinst)
+ r = scf_iter_service_pgs(iter, cur_svc);
+ else
+ r = scf_iter_instance_pgs(iter, cur_inst);
+ if (r != SCF_SUCCESS)
+ scfdie();
+
+ while ((r = scf_iter_next_pg(iter, pg)) == 1) {
+ if (scf_pg_get_flags(pg, &flags) != SCF_SUCCESS)
+ scfdie();
+
+ /* Skip nonpersistent pgs. */
+ if (flags & SCF_PG_FLAG_NONPERSISTENT)
+ continue;
+
+ if (scf_pg_delete(pg) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ goto out;
+ }
+ }
+ if (r == -1)
+ scfdie();
+
+ /* Copy the properties to the corresponding entity. */
+ if (scf_iter_snaplevel_pgs(iter, level) != SCF_SUCCESS)
+ scfdie();
+
+ while ((r = scf_iter_next_pg(iter, pg)) == 1) {
+ if (scf_pg_get_name(pg, nbuf, max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (scf_pg_get_type(pg, tbuf, max_scf_pg_type_len + 1) <
+ 0)
+ scfdie();
+
+ if (scf_pg_get_flags(pg, &flags) != SCF_SUCCESS)
+ scfdie();
+
+ if (!isinst)
+ r = scf_service_add_pg(cur_svc, nbuf, tbuf,
+ flags, npg);
+ else
+ r = scf_instance_add_pg(cur_inst, nbuf, tbuf,
+ flags, npg);
+ if (r != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_PERMISSION_DENIED)
+ scfdie();
+
+ semerr(emsg_permission_denied);
+ goto out;
+ }
+
+ if ((enabled == 0 || enabled == 1) &&
+ strcmp(nbuf, scf_pg_general) == 0)
+ r = pg_copy(pg, npg, enabled);
+ else
+ r = pg_copy(pg, npg, 2);
+
+ switch (r) {
+ case 0:
+ break;
+
+ case -1:
+ semerr(emsg_permission_denied);
+ goto out;
+
+ case -2:
+ semerr(gettext(
+ "Interrupted by another change.\n"));
+ goto out;
+
+ default:
+ abort();
+ }
+ }
+ if (r == -1)
+ scfdie();
+
+ /* Get next level. */
+ nlevel = scf_snaplevel_create(g_hndl);
+ if (nlevel == NULL)
+ scfdie();
+
+ if (scf_snaplevel_get_next_snaplevel(level, nlevel) !=
+ SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_snaplevel_destroy(nlevel);
+ break;
+ }
+
+ scf_snaplevel_destroy(level);
+ level = nlevel;
+ }
+
+ if (snapname == NULL) {
+ lscf_selectsnap(NULL);
+ snap = NULL; /* cur_snap has been destroyed */
+ }
+
+out:
+ free(tbuf);
+ free(nbuf);
+ scf_value_destroy(val);
+ scf_property_destroy(prop);
+ scf_pg_destroy(npg);
+ scf_pg_destroy(pg);
+ scf_iter_destroy(iter);
+ scf_snaplevel_destroy(level);
+ scf_snapshot_destroy(prev);
+ if (snap != cur_snap)
+ scf_snapshot_destroy(snap);
+}
+
+#ifndef NATIVE_BUILD
+/* ARGSUSED */
+CPL_MATCH_FN(complete_select)
+{
+ const char *arg0, *arg1, *arg1end;
+ int word_start, err = 0, r;
+ size_t len;
+ char *buf;
+
+ lscf_prep_hndl();
+
+ arg0 = line + strspn(line, " \t");
+ assert(strncmp(arg0, "select", sizeof ("select") - 1) == 0);
+
+ arg1 = arg0 + sizeof ("select") - 1;
+ arg1 += strspn(arg1, " \t");
+ word_start = arg1 - line;
+
+ arg1end = arg1 + strcspn(arg1, " \t");
+ if (arg1end < line + word_end)
+ return (0);
+
+ len = line + word_end - arg1;
+
+ buf = safe_malloc(max_scf_name_len + 1);
+
+ if (cur_snap != NULL) {
+ return (0);
+ } else if (cur_inst != NULL) {
+ return (0);
+ } else if (cur_svc != NULL) {
+ scf_instance_t *inst;
+ scf_iter_t *iter;
+
+ if ((inst = scf_instance_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_service_instances(iter, cur_svc) != 0)
+ scfdie();
+
+ for (;;) {
+ r = scf_iter_next_instance(iter, inst);
+ if (r == 0)
+ break;
+ if (r != 1)
+ scfdie();
+
+ if (scf_instance_get_name(inst, buf,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (strncmp(buf, arg1, len) == 0) {
+ err = cpl_add_completion(cpl, line, word_start,
+ word_end, buf + len, "", " ");
+ if (err != 0)
+ break;
+ }
+ }
+
+ scf_iter_destroy(iter);
+ scf_instance_destroy(inst);
+
+ return (err);
+ } else {
+ scf_service_t *svc;
+ scf_iter_t *iter;
+
+ assert(cur_scope != NULL);
+
+ if ((svc = scf_service_create(g_hndl)) == NULL ||
+ (iter = scf_iter_create(g_hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_scope_services(iter, cur_scope) != 0)
+ scfdie();
+
+ for (;;) {
+ r = scf_iter_next_service(iter, svc);
+ if (r == 0)
+ break;
+ if (r != 1)
+ scfdie();
+
+ if (scf_service_get_name(svc, buf,
+ max_scf_name_len + 1) < 0)
+ scfdie();
+
+ if (strncmp(buf, arg1, len) == 0) {
+ err = cpl_add_completion(cpl, line, word_start,
+ word_end, buf + len, "", " ");
+ if (err != 0)
+ break;
+ }
+ }
+
+ scf_iter_destroy(iter);
+ scf_service_destroy(svc);
+
+ return (err);
+ }
+}
+
+/* ARGSUSED */
+CPL_MATCH_FN(complete_command)
+{
+ uint32_t scope = 0;
+
+ if (cur_snap != NULL)
+ scope = CS_SNAP;
+ else if (cur_inst != NULL)
+ scope = CS_INST;
+ else if (cur_svc != NULL)
+ scope = CS_SVC;
+ else
+ scope = CS_SCOPE;
+
+ return (scope ? add_cmd_matches(cpl, line, word_end, scope) : 0);
+}
+#endif /* NATIVE_BUILD */
diff --git a/usr/src/cmd/svc/svccfg/svccfg_main.c b/usr/src/cmd/svc/svccfg/svccfg_main.c
new file mode 100644
index 0000000000..56031d7560
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg_main.c
@@ -0,0 +1,235 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * svccfg - modify service configuration repository
+ */
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "svccfg.h"
+
+#ifndef TEXT_DOMAIN
+#define TEXT_DOMAIN "SUNW_OST_OSCMD"
+#endif /* TEXT_DOMAIN */
+
+#define MAX_CMD_LINE_SZ 2048
+
+static const char *myname;
+int g_verbose = 0;
+const char *fmri;
+
+static void
+usage()
+{
+ (void) fprintf(stderr, gettext(
+ "Usage:\tsvccfg [-v] [-s FMRI] [-f file]\n"
+ "\tsvccfg [-v] [-s FMRI] <command> [args]\n"));
+ exit(UU_EXIT_USAGE);
+}
+
+void *
+safe_malloc(size_t sz)
+{
+ void *p;
+
+ if ((p = calloc(1, sz)) == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ return (p);
+}
+
+char *
+safe_strdup(const char *cp)
+{
+ char *result;
+
+ result = strdup(cp);
+ if (result == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ return (result);
+}
+
+/*
+ * Send a message to the user. If we're interactive, send it to stdout.
+ * Otherwise send it to stderr.
+ */
+static void
+vmessage(const char *fmt, va_list va)
+{
+ int interactive = est->sc_cmd_flags & SC_CMD_IACTIVE;
+ FILE *strm = interactive ? stdout : stderr;
+ const char *ptr;
+
+ if (!interactive) {
+ if (est->sc_cmd_file == NULL)
+ (void) fprintf(stderr, "%s: ", myname);
+ else
+ (void) fprintf(stderr, "%s (%s, line %d): ", myname,
+ est->sc_cmd_filename, est->sc_cmd_lineno - 1);
+ }
+
+ if (vfprintf(strm, fmt, va) < 0 && interactive)
+ uu_die(gettext("printf() error"));
+
+ ptr = strchr(fmt, '\0');
+ if (*(ptr - 1) != '\n')
+ (void) fprintf(strm, ": %s.\n", strerror(errno));
+}
+
+/*
+ * Display a warning. Should usually be predicated by g_verbose.
+ */
+/* PRINTFLIKE1 */
+void
+warn(const char *fmt, ...)
+{
+ va_list va;
+
+ va_start(va, fmt);
+ vmessage(fmt, va);
+ va_end(va);
+}
+
+/*
+ * Syntax error.
+ */
+void
+synerr(int com)
+{
+ if (est->sc_cmd_flags & SC_CMD_IACTIVE) {
+ help(com);
+ return;
+ }
+
+ warn(gettext("Syntax error.\n"));
+ exit(1);
+}
+
+/*
+ * Semantic error. Display the warning and exit if we're not interactive.
+ */
+/* PRINTFLIKE1 */
+void
+semerr(const char *fmt, ...)
+{
+ va_list va;
+
+ va_start(va, fmt);
+ vmessage(fmt, va);
+ va_end(va);
+
+ if ((est->sc_cmd_flags & (SC_CMD_IACTIVE | SC_CMD_DONT_EXIT)) == 0)
+ exit(1);
+}
+
+/*ARGSUSED*/
+static void
+initialize(int argc, char *argv[])
+{
+ myname = uu_setpname(argv[0]);
+ (void) atexit(lscf_cleanup);
+
+ (void) setlocale(LC_ALL, "");
+ (void) textdomain(TEXT_DOMAIN);
+
+ (void) lxml_init();
+ internal_init();
+ engine_init();
+ lscf_init(); /* must follow engine_init() */
+}
+
+int
+main(int argc, char *argv[])
+{
+ char *cmd, *command_file = NULL;
+ char *fmri = NULL;
+ int c;
+
+ while ((c = getopt(argc, argv, "vf:s:")) != EOF)
+ switch (c) {
+ case 'v':
+ g_verbose = 1;
+ break;
+
+ case 's':
+ fmri = optarg;
+ break;
+
+ case 'f':
+ command_file = optarg;
+ break;
+
+ default:
+ usage();
+ break;
+ }
+
+ initialize(argc, argv);
+
+ if (fmri != NULL)
+ lscf_select(fmri);
+
+ if (command_file != NULL)
+ return (engine_source(command_file, 0));
+
+ if (optind == argc) {
+ if (isatty(fileno(stdin)))
+ return (engine_interp());
+ else
+ return (engine_source("-", 0));
+ }
+
+ /*
+ * Knit together remaining arguments into a single statement.
+ */
+ cmd = safe_malloc(MAX_CMD_LINE_SZ);
+ for (c = optind; c < argc; c++) {
+ (void) strlcat(cmd, argv[c], MAX_CMD_LINE_SZ);
+ (void) strlcat(cmd, " ", MAX_CMD_LINE_SZ);
+ }
+
+ return (engine_exec(cmd));
+}
diff --git a/usr/src/cmd/svc/svccfg/svccfg_xml.c b/usr/src/cmd/svc/svccfg/svccfg_xml.c
new file mode 100644
index 0000000000..a3c04cff8a
--- /dev/null
+++ b/usr/src/cmd/svc/svccfg/svccfg_xml.c
@@ -0,0 +1,1937 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "svccfg.h"
+
+/*
+ * XML document manipulation routines
+ *
+ * These routines provide translation to and from the internal representation to
+ * XML. Directionally-oriented verbs are with respect to the external source,
+ * so lxml_get_service() fetches a service from the XML file into the
+ * internal representation.
+ */
+
+const char * const delete_attr = "delete";
+const char * const enabled_attr = "enabled";
+const char * const name_attr = "name";
+const char * const override_attr = "override";
+const char * const type_attr = "type";
+const char * const value_attr = "value";
+const char * const true = "true";
+const char * const false = "false";
+
+/*
+ * The following list must be kept in the same order as that of
+ * element_t array
+ */
+static const char *lxml_elements[] = {
+ "astring_list", /* SC_ASTRING */
+ "boolean_list", /* SC_BOOLEAN */
+ "common_name", /* SC_COMMON_NAME */
+ "count_list", /* SC_COUNT */
+ "create_default_instance", /* SC_INSTANCE_CREATE_DEFAULT */
+ "dependency", /* SC_DEPENDENCY */
+ "dependent", /* SC_DEPENDENT */
+ "description", /* SC_DESCRIPTION */
+ "doc_link", /* SC_DOC_LINK */
+ "documentation", /* SC_DOCUMENTATION */
+ "enabled", /* SC_ENABLED */
+ "exec_method", /* SC_EXEC_METHOD */
+ "fmri_list", /* SC_FMRI */
+ "host_list", /* SC_HOST */
+ "hostname_list", /* SC_HOSTNAME */
+ "instance", /* SC_INSTANCE */
+ "integer_list", /* SC_INTEGER */
+ "loctext", /* SC_LOCTEXT */
+ "manpage", /* SC_MANPAGE */
+ "method_context", /* SC_METHOD_CONTEXT */
+ "method_credential", /* SC_METHOD_CREDENTIAL */
+ "method_profile", /* SC_METHOD_PROFILE */
+ "method_environment", /* SC_METHOD_ENVIRONMENT */
+ "envvar", /* SC_METHOD_ENVVAR */
+ "net_address_v4_list", /* SC_NET_ADDR_V4 */
+ "net_address_v6_list", /* SC_NET_ADDR_V6 */
+ "opaque_list", /* SC_OPAQUE */
+ "property", /* SC_PROPERTY */
+ "property_group", /* SC_PROPERTY_GROUP */
+ "propval", /* SC_PROPVAL */
+ "restarter", /* SC_RESTARTER */
+ "service", /* SC_SERVICE */
+ "service_bundle", /* SC_SERVICE_BUNDLE */
+ "service_fmri", /* SC_SERVICE_FMRI */
+ "single_instance", /* SC_INSTANCE_SINGLE */
+ "stability", /* SC_STABILITY */
+ "template", /* SC_TEMPLATE */
+ "time_list", /* SC_TIME */
+ "uri_list", /* SC_URI */
+ "ustring_list", /* SC_USTRING */
+ "value_node", /* SC_VALUE_NODE */
+ "xi:fallback", /* SC_XI_FALLBACK */
+ "xi:include" /* SC_XI_INCLUDE */
+};
+
+/*
+ * The following list must be kept in the same order as that of
+ * element_t array
+ */
+static const char *lxml_prop_types[] = {
+ "astring", /* SC_ASTRING */
+ "boolean", /* SC_BOOLEAN */
+ "", /* SC_COMMON_NAME */
+ "count", /* SC_COUNT */
+ "", /* SC_INSTANCE_CREATE_DEFAULT */
+ "", /* SC_DEPENDENCY */
+ "", /* SC_DEPENDENT */
+ "", /* SC_DESCRIPTION */
+ "", /* SC_DOC_LINK */
+ "", /* SC_DOCUMENTATION */
+ "", /* SC_ENABLED */
+ "", /* SC_EXEC_METHOD */
+ "fmri", /* SC_FMRI */
+ "host", /* SC_HOST */
+ "hostname", /* SC_HOSTNAME */
+ "", /* SC_INSTANCE */
+ "integer", /* SC_INTEGER */
+ "", /* SC_LOCTEXT */
+ "", /* SC_MANPAGE */
+ "", /* SC_METHOD_CONTEXT */
+ "", /* SC_METHOD_CREDENTIAL */
+ "", /* SC_METHOD_PROFILE */
+ "", /* SC_METHOD_ENVIRONMENT */
+ "", /* SC_METHOD_ENVVAR */
+ "net_address_v4", /* SC_NET_ADDR_V4 */
+ "net_address_v6", /* SC_NET_ADDR_V6 */
+ "opaque", /* SC_OPAQUE */
+ "", /* SC_PROPERTY */
+ "", /* SC_PROPERTY_GROUP */
+ "", /* SC_PROPVAL */
+ "", /* SC_RESTARTER */
+ "", /* SC_SERVICE */
+ "", /* SC_SERVICE_BUNDLE */
+ "", /* SC_SERVICE_FMRI */
+ "", /* SC_INSTANCE_SINGLE */
+ "", /* SC_STABILITY */
+ "", /* SC_TEMPLATE */
+ "time", /* SC_TIME */
+ "uri", /* SC_URI */
+ "ustring", /* SC_USTRING */
+ "" /* SC_VALUE_NODE */
+ "" /* SC_XI_FALLBACK */
+ "" /* SC_XI_INCLUDE */
+};
+
+int
+lxml_init()
+{
+ if (getenv("SVCCFG_NOVALIDATE") == NULL) {
+ /*
+ * DTD validation, with line numbers.
+ */
+ xmlLineNumbersDefault(1);
+ xmlLoadExtDtdDefaultValue |= XML_DETECT_IDS;
+ xmlLoadExtDtdDefaultValue |= XML_COMPLETE_ATTRS;
+ }
+
+ return (0);
+}
+
+static bundle_type_t
+lxml_xlate_bundle_type(xmlChar *type)
+{
+ if (xmlStrcmp(type, (const xmlChar *)"manifest") == 0)
+ return (SVCCFG_MANIFEST);
+
+ if (xmlStrcmp(type, (const xmlChar *)"profile") == 0)
+ return (SVCCFG_PROFILE);
+
+ if (xmlStrcmp(type, (const xmlChar *)"archive") == 0)
+ return (SVCCFG_ARCHIVE);
+
+ return (SVCCFG_UNKNOWN_BUNDLE);
+}
+
+static service_type_t
+lxml_xlate_service_type(xmlChar *type)
+{
+ if (xmlStrcmp(type, (const xmlChar *)"service") == 0)
+ return (SVCCFG_SERVICE);
+
+ if (xmlStrcmp(type, (const xmlChar *)"restarter") == 0)
+ return (SVCCFG_RESTARTER);
+
+ if (xmlStrcmp(type, (const xmlChar *)"milestone") == 0)
+ return (SVCCFG_MILESTONE);
+
+ return (SVCCFG_UNKNOWN_SERVICE);
+}
+
+static element_t
+lxml_xlate_element(const xmlChar *tag)
+{
+ int i;
+
+ for (i = 0; i < sizeof (lxml_elements) / sizeof (char *); i++)
+ if (xmlStrcmp(tag, (const xmlChar *)lxml_elements[i]) == 0)
+ return ((element_t)i);
+
+ return ((element_t)-1);
+}
+
+static uint_t
+lxml_xlate_boolean(const xmlChar *value)
+{
+ if (xmlStrcmp(value, (const xmlChar *)true) == 0)
+ return (1);
+
+ if (xmlStrcmp(value, (const xmlChar *)false) == 0)
+ return (0);
+
+ uu_die(gettext("illegal boolean value \"%s\"\n"), value);
+
+ /*NOTREACHED*/
+}
+
+static scf_type_t
+lxml_element_to_type(element_t type)
+{
+ switch (type) {
+ case SC_ASTRING: return (SCF_TYPE_ASTRING);
+ case SC_BOOLEAN: return (SCF_TYPE_BOOLEAN);
+ case SC_COUNT: return (SCF_TYPE_COUNT);
+ case SC_FMRI: return (SCF_TYPE_FMRI);
+ case SC_HOST: return (SCF_TYPE_HOST);
+ case SC_HOSTNAME: return (SCF_TYPE_HOSTNAME);
+ case SC_INTEGER: return (SCF_TYPE_INTEGER);
+ case SC_NET_ADDR_V4: return (SCF_TYPE_NET_ADDR_V4);
+ case SC_NET_ADDR_V6: return (SCF_TYPE_NET_ADDR_V6);
+ case SC_OPAQUE: return (SCF_TYPE_OPAQUE);
+ case SC_TIME: return (SCF_TYPE_TIME);
+ case SC_URI: return (SCF_TYPE_URI);
+ case SC_USTRING: return (SCF_TYPE_USTRING);
+
+ default:
+ uu_die(gettext("unknown value type (%d)\n"), type);
+ }
+
+ /* NOTREACHED */
+}
+
+static scf_type_t
+lxml_element_to_scf_type(element_t type)
+{
+ switch (type) {
+ case SC_ASTRING: return (SCF_TYPE_ASTRING);
+ case SC_BOOLEAN: return (SCF_TYPE_BOOLEAN);
+ case SC_COUNT: return (SCF_TYPE_COUNT);
+ case SC_FMRI: return (SCF_TYPE_FMRI);
+ case SC_HOST: return (SCF_TYPE_HOST);
+ case SC_HOSTNAME: return (SCF_TYPE_HOSTNAME);
+ case SC_INTEGER: return (SCF_TYPE_INTEGER);
+ case SC_NET_ADDR_V4: return (SCF_TYPE_NET_ADDR_V4);
+ case SC_NET_ADDR_V6: return (SCF_TYPE_NET_ADDR_V6);
+ case SC_OPAQUE: return (SCF_TYPE_OPAQUE);
+ case SC_TIME: return (SCF_TYPE_TIME);
+ case SC_URI: return (SCF_TYPE_URI);
+ case SC_USTRING: return (SCF_TYPE_USTRING);
+ default:
+ uu_die(gettext("unknown value type (%d)\n"), type);
+ }
+
+ /* NOTREACHED */
+}
+
+static int
+new_str_prop_from_attr(pgroup_t *pgrp, const char *pname, scf_type_t ty,
+ xmlNodePtr n, const char *attr)
+{
+ xmlChar *val;
+ property_t *p;
+ int r;
+
+ val = xmlGetProp(n, (xmlChar *)attr);
+
+ p = internal_property_create(pname, ty, 1, val);
+ r = internal_attach_property(pgrp, p);
+
+ if (r != 0)
+ internal_property_free(p);
+
+ return (r);
+}
+
+static int
+lxml_ignorable_block(xmlNodePtr n)
+{
+ return ((xmlStrcmp(n->name, (xmlChar *)"text") == 0 ||
+ xmlStrcmp(n->name, (xmlChar *)"comment") == 0) ? 1 : 0);
+}
+
+static int
+lxml_validate_string_value(scf_type_t type, const char *v)
+{
+ static scf_value_t *scf_value = NULL;
+ static scf_handle_t *scf_hndl = NULL;
+
+ if (scf_hndl == NULL && (scf_hndl = scf_handle_create(SCF_VERSION)) ==
+ NULL)
+ return (-1);
+
+ if (scf_value == NULL && (scf_value = scf_value_create(scf_hndl)) ==
+ NULL)
+ return (-1);
+
+ return (scf_value_set_from_string(scf_value, type, v));
+}
+
+static void
+lxml_free_str(value_t *val)
+{
+ free(val->sc_u.sc_string);
+}
+
+static value_t *
+lxml_make_value(element_t type, const xmlChar *value)
+{
+ value_t *v;
+ char *endptr;
+ scf_type_t scf_type = SCF_TYPE_INVALID;
+
+ v = internal_value_new();
+
+ v->sc_type = lxml_element_to_type(type);
+
+ switch (type) {
+ case SC_COUNT:
+ /*
+ * Although an SC_COUNT represents a uint64_t the use
+ * of a negative value is acceptable due to the usage
+ * established by inetd(1M).
+ */
+ errno = 0;
+ v->sc_u.sc_count = strtoull((char *)value, &endptr, 10);
+ if (errno != 0 || endptr == (char *)value || *endptr)
+ uu_die(gettext("illegal value \"%s\" for "
+ "%s (%s)\n"), (char *)value,
+ lxml_prop_types[type],
+ (errno) ? strerror(errno) :
+ gettext("Illegal character"));
+ break;
+ case SC_INTEGER:
+ errno = 0;
+ v->sc_u.sc_integer = strtoll((char *)value, &endptr, 10);
+ if (errno != 0 || *endptr)
+ uu_die(gettext("illegal value \"%s\" for "
+ "%s (%s)\n"), (char *)value,
+ lxml_prop_types[type],
+ (errno) ? strerror(errno) : "Illegal character");
+ break;
+ case SC_OPAQUE:
+ case SC_HOST:
+ case SC_HOSTNAME:
+ case SC_NET_ADDR_V4:
+ case SC_NET_ADDR_V6:
+ case SC_FMRI:
+ case SC_URI:
+ case SC_TIME:
+ case SC_ASTRING:
+ case SC_USTRING:
+ scf_type = lxml_element_to_scf_type(type);
+
+ if ((v->sc_u.sc_string = strdup((char *)value)) == NULL)
+ uu_die(gettext("string duplication failed (%s)\n"),
+ strerror(errno));
+ if (lxml_validate_string_value(scf_type,
+ v->sc_u.sc_string) != 0)
+ uu_die(gettext("illegal value \"%s\" for "
+ "%s (%s)\n"), (char *)value,
+ lxml_prop_types[type],
+ (scf_error()) ? scf_strerror(scf_error()) :
+ gettext("Illegal format"));
+ v->sc_free = lxml_free_str;
+ break;
+ case SC_BOOLEAN:
+ v->sc_u.sc_count = lxml_xlate_boolean(value);
+ break;
+ default:
+ uu_die(gettext("unknown value type (%d)\n"), type);
+ break;
+ }
+
+ return (v);
+}
+
+static int
+lxml_get_value(property_t *prop, element_t vtype, xmlNodePtr value)
+{
+ xmlNodePtr cursor;
+
+ for (cursor = value->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ xmlChar *assigned_value;
+ value_t *v;
+
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_VALUE_NODE:
+ if ((assigned_value = xmlGetProp(cursor,
+ (xmlChar *)value_attr)) == NULL)
+ uu_die(gettext("no value on value node?\n"));
+ break;
+ default:
+ uu_die(gettext("value list contains illegal element "
+ "\'%s\'\n"), cursor->name);
+ break;
+ }
+
+ v = lxml_make_value(vtype, assigned_value);
+
+ xmlFree(assigned_value);
+
+ internal_attach_value(prop, v);
+ }
+
+ return (0);
+}
+
+static int
+lxml_get_propval(pgroup_t *pgrp, xmlNodePtr propval)
+{
+ property_t *p;
+ element_t r;
+ value_t *v;
+ xmlChar *type, *val, *override;
+
+ p = internal_property_new();
+
+ p->sc_property_name = (char *)xmlGetProp(propval, (xmlChar *)name_attr);
+ if (p->sc_property_name == NULL)
+ uu_die(gettext("property name missing in group '%s'\n"),
+ pgrp->sc_pgroup_name);
+
+ type = xmlGetProp(propval, (xmlChar *)type_attr);
+ if (type == NULL)
+ uu_die(gettext("property type missing for property '%s/%s'\n"),
+ pgrp->sc_pgroup_name, p->sc_property_name);
+
+ for (r = 0; r < sizeof (lxml_prop_types) / sizeof (char *); ++r) {
+ if (xmlStrcmp(type, (const xmlChar *)lxml_prop_types[r]) == 0)
+ break;
+ }
+ if (r >= sizeof (lxml_prop_types) / sizeof (char *))
+ uu_die(gettext("property type invalid for property '%s/%s'\n"),
+ pgrp->sc_pgroup_name, p->sc_property_name);
+
+ p->sc_value_type = lxml_element_to_type(r);
+
+ val = xmlGetProp(propval, (xmlChar *)value_attr);
+ if (val == NULL)
+ uu_die(gettext("property value missing for property '%s/%s'\n"),
+ pgrp->sc_pgroup_name, p->sc_property_name);
+
+ v = lxml_make_value(r, val);
+ internal_attach_value(p, v);
+
+ override = xmlGetProp(propval, (xmlChar *)override_attr);
+ p->sc_property_override = (xmlStrcmp(override, (xmlChar *)true) == 0);
+ xmlFree(override);
+
+ return (internal_attach_property(pgrp, p));
+}
+
+static int
+lxml_get_property(pgroup_t *pgrp, xmlNodePtr property)
+{
+ property_t *p;
+ xmlNodePtr cursor;
+ element_t r;
+ xmlChar *type, *override;
+
+ p = internal_property_new();
+
+ if ((p->sc_property_name = (char *)xmlGetProp(property,
+ (xmlChar *)name_attr)) == NULL)
+ uu_die(gettext("property name missing in group \'%s\'\n"),
+ pgrp->sc_pgroup_name);
+
+ if ((type = xmlGetProp(property, (xmlChar *)type_attr)) == NULL)
+ uu_die(gettext("property type missing for "
+ "property \'%s/%s\'\n"), pgrp->sc_pgroup_name,
+ p->sc_property_name);
+
+ for (cursor = property->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (r = lxml_xlate_element(cursor->name)) {
+ case SC_ASTRING:
+ case SC_BOOLEAN:
+ case SC_COUNT:
+ case SC_FMRI:
+ case SC_HOST:
+ case SC_HOSTNAME:
+ case SC_INTEGER:
+ case SC_NET_ADDR_V4:
+ case SC_NET_ADDR_V6:
+ case SC_OPAQUE:
+ case SC_TIME:
+ case SC_URI:
+ case SC_USTRING:
+ if (strcmp(lxml_prop_types[r], (const char *)type) != 0)
+ uu_die(gettext("property \'%s\' "
+ "type-to-list mismatch\n"),
+ p->sc_property_name);
+
+ p->sc_value_type = lxml_element_to_type(r);
+ (void) lxml_get_value(p, r, cursor);
+ break;
+ default:
+ uu_die(gettext("unknown value list type: %s\n"),
+ cursor->name);
+ break;
+ }
+ }
+
+ xmlFree(type);
+
+ override = xmlGetProp(property, (xmlChar *)override_attr);
+ p->sc_property_override = (xmlStrcmp(override, (xmlChar *)true) == 0);
+ xmlFree(override);
+
+ return (internal_attach_property(pgrp, p));
+}
+
+static int
+lxml_get_pgroup_stability(pgroup_t *pgrp, xmlNodePtr stab)
+{
+ return (new_str_prop_from_attr(pgrp, SCF_PROPERTY_STABILITY,
+ SCF_TYPE_ASTRING, stab, value_attr));
+}
+
+/*
+ * Property groups can go on any of a service, an instance, or a template.
+ */
+static int
+lxml_get_pgroup(entity_t *entity, xmlNodePtr pgroup)
+{
+ pgroup_t *pg;
+ xmlNodePtr cursor;
+ xmlChar *name, *type, *delete;
+
+ /*
+ * property group attributes:
+ * name: string
+ * type: string | framework | application
+ */
+ name = xmlGetProp(pgroup, (xmlChar *)name_attr);
+ type = xmlGetProp(pgroup, (xmlChar *)type_attr);
+ pg = internal_pgroup_find_or_create(entity, (char *)name, (char *)type);
+ xmlFree(name);
+ xmlFree(type);
+
+ /*
+ * Walk the children of this lxml_elements, which are a stability
+ * element, property elements, or propval elements.
+ */
+ for (cursor = pgroup->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_STABILITY:
+ (void) lxml_get_pgroup_stability(pg, cursor);
+ break;
+ case SC_PROPERTY:
+ (void) lxml_get_property(pg, cursor);
+ break;
+ case SC_PROPVAL:
+ (void) lxml_get_propval(pg, cursor);
+ break;
+ default:
+ abort();
+ break;
+ }
+ }
+
+ delete = xmlGetProp(pgroup, (xmlChar *)delete_attr);
+ pg->sc_pgroup_delete = (xmlStrcmp(delete, (xmlChar *)true) == 0);
+ xmlFree(delete);
+
+ return (0);
+}
+
+
+/*
+ * Dependency groups, execution methods can go on either a service or an
+ * instance.
+ */
+
+static int
+lxml_get_method_profile(pgroup_t *pg, xmlNodePtr profile)
+{
+ property_t *p;
+
+ p = internal_property_create(SCF_PROPERTY_USE_PROFILE, SCF_TYPE_BOOLEAN,
+ 1, (uint64_t)1);
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+
+ return (new_str_prop_from_attr(pg, SCF_PROPERTY_PROFILE,
+ SCF_TYPE_ASTRING, profile, name_attr));
+}
+
+static int
+lxml_get_method_credential(pgroup_t *pg, xmlNodePtr cred)
+{
+ property_t *p;
+
+ p = internal_property_create(SCF_PROPERTY_USE_PROFILE, SCF_TYPE_BOOLEAN,
+ 1, (uint64_t)0);
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_USER, SCF_TYPE_ASTRING,
+ cred, "user") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_GROUP, SCF_TYPE_ASTRING,
+ cred, "group") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_SUPP_GROUPS,
+ SCF_TYPE_ASTRING, cred, "supp_groups") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_PRIVILEGES,
+ SCF_TYPE_ASTRING, cred, "privileges") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_LIMIT_PRIVILEGES,
+ SCF_TYPE_ASTRING, cred, "limit_privileges") != 0)
+ return (-1);
+
+ return (0);
+}
+
+static char *
+lxml_get_envvar(xmlNodePtr envvar)
+{
+ char *name;
+ char *value;
+ char *ret;
+
+ name = (char *)xmlGetProp(envvar, (xmlChar *)"name");
+ value = (char *)xmlGetProp(envvar, (xmlChar *)"value");
+
+ if (strlen(name) == 0 || strchr(name, '=') != NULL)
+ uu_die(gettext("Invalid environment variable "
+ "\"%s\".\n"), name);
+ if (strstr(name, "SMF_") == name)
+ uu_die(gettext("Invalid environment variable "
+ "\"%s\"; \"SMF_\" prefix is reserved.\n"), name);
+
+ ret = uu_msprintf("%s=%s", name, value);
+ xmlFree(name);
+ xmlFree(value);
+ return (ret);
+}
+
+static int
+lxml_get_method_environment(pgroup_t *pg, xmlNodePtr environment)
+{
+ property_t *p;
+ xmlNodePtr cursor;
+ value_t *val;
+
+ p = internal_property_create(SCF_PROPERTY_ENVIRONMENT,
+ SCF_TYPE_ASTRING, 0);
+
+ for (cursor = environment->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ char *tmp;
+
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ if (lxml_xlate_element(cursor->name) != SC_METHOD_ENVVAR)
+ uu_die(gettext("illegal element \"%s\" on "
+ "method environment for \"%s\"\n"),
+ cursor->name, pg->sc_pgroup_name);
+
+ if ((tmp = lxml_get_envvar(cursor)) == NULL)
+ uu_die(gettext("Out of memory\n"));
+
+ val = internal_value_new();
+ val->sc_u.sc_string = tmp;
+ val->sc_type = SCF_TYPE_ASTRING;
+ val->sc_free = lxml_free_str;
+ internal_attach_value(p, val);
+ }
+
+ if (internal_attach_property(pg, p) != 0) {
+ internal_property_free(p);
+ return (-1);
+ }
+
+ return (0);
+}
+
+static int
+lxml_get_method_context(pgroup_t *pg, xmlNodePtr ctx)
+{
+ xmlNodePtr cursor;
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_WORKING_DIRECTORY,
+ SCF_TYPE_ASTRING, ctx, "working_directory") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_PROJECT, SCF_TYPE_ASTRING,
+ ctx, "project") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_RESOURCE_POOL,
+ SCF_TYPE_ASTRING, ctx, "resource_pool") != 0)
+ return (-1);
+
+ for (cursor = ctx->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_METHOD_CREDENTIAL:
+ (void) lxml_get_method_credential(pg, cursor);
+ break;
+ case SC_METHOD_PROFILE:
+ (void) lxml_get_method_profile(pg, cursor);
+ break;
+ case SC_METHOD_ENVIRONMENT:
+ (void) lxml_get_method_environment(pg, cursor);
+ break;
+ default:
+ semerr(gettext("illegal element \'%s\' in method "
+ "context\n"), (char *)cursor);
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static int
+lxml_get_entity_method_context(entity_t *entity, xmlNodePtr ctx)
+{
+ pgroup_t *pg;
+
+ pg = internal_pgroup_find_or_create(entity, SCF_PG_METHOD_CONTEXT,
+ (char *)scf_group_framework);
+
+ return (lxml_get_method_context(pg, ctx));
+}
+
+static int
+lxml_get_exec_method(entity_t *entity, xmlNodePtr emeth)
+{
+ pgroup_t *pg;
+ property_t *p;
+ xmlChar *name, *timeout, *delete;
+ xmlNodePtr cursor;
+ int r = 0;
+
+ name = xmlGetProp(emeth, (xmlChar *)name_attr);
+ pg = internal_pgroup_find_or_create(entity, (char *)name,
+ (char *)SCF_GROUP_METHOD);
+ xmlFree(name);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_TYPE, SCF_TYPE_ASTRING,
+ emeth, type_attr) != 0 ||
+ new_str_prop_from_attr(pg, SCF_PROPERTY_EXEC, SCF_TYPE_ASTRING,
+ emeth, "exec") != 0)
+ return (-1);
+
+ timeout = xmlGetProp(emeth, (xmlChar *)"timeout_seconds");
+ if (timeout != NULL) {
+ uint64_t u_timeout;
+ char *endptr;
+ /*
+ * Although an SC_COUNT represents a uint64_t the use
+ * of a negative value is acceptable due to the usage
+ * established by inetd(1M).
+ */
+ errno = 0;
+ u_timeout = strtoull((char *)timeout, &endptr, 10);
+ if (errno != 0 || endptr == (char *)timeout || *endptr)
+ uu_die(gettext("illegal value \"%s\" for "
+ "timeout_seconds (%s)\n"),
+ (char *)timeout, (errno) ? strerror(errno):
+ gettext("Illegal character"));
+ p = internal_property_create(SCF_PROPERTY_TIMEOUT,
+ SCF_TYPE_COUNT, 1, u_timeout);
+ r = internal_attach_property(pg, p);
+ xmlFree(timeout);
+ }
+ if (r != 0)
+ return (-1);
+
+ /*
+ * There is a possibility that a method context also exists, in which
+ * case the following attributes are defined: project, resource_pool,
+ * working_directory, profile, user, group, privileges, limit_privileges
+ */
+ for (cursor = emeth->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_STABILITY:
+ if (lxml_get_pgroup_stability(pg, cursor) != 0)
+ return (-1);
+ break;
+
+ case SC_METHOD_CONTEXT:
+ (void) lxml_get_method_context(pg, cursor);
+ break;
+
+ case SC_PROPVAL:
+ (void) lxml_get_propval(pg, cursor);
+ break;
+
+ case SC_PROPERTY:
+ (void) lxml_get_property(pg, cursor);
+ break;
+
+ default:
+ uu_die(gettext("illegal element \"%s\" on "
+ "execution method \"%s\"\n"), cursor->name,
+ pg->sc_pgroup_name);
+ break;
+ }
+ }
+
+ delete = xmlGetProp(emeth, (xmlChar *)delete_attr);
+ pg->sc_pgroup_delete = (xmlStrcmp(delete, (xmlChar *)true) == 0);
+ xmlFree(delete);
+
+ return (0);
+}
+
+static int
+lxml_get_dependency(entity_t *entity, xmlNodePtr dependency)
+{
+ pgroup_t *pg;
+ property_t *p;
+ xmlNodePtr cursor;
+ xmlChar *name;
+ xmlChar *delete;
+
+ /*
+ * dependency attributes:
+ * name: string
+ * grouping: require_all | require_any | exclude_all | optional_all
+ * reset_on: string (error | restart | refresh | none)
+ * type: service / path /host
+ */
+
+ name = xmlGetProp(dependency, (xmlChar *)name_attr);
+ pg = internal_pgroup_find_or_create(entity, (char *)name,
+ (char *)SCF_GROUP_DEPENDENCY);
+ xmlFree(name);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_TYPE, SCF_TYPE_ASTRING,
+ dependency, type_attr) != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_RESTART_ON,
+ SCF_TYPE_ASTRING, dependency, "restart_on") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_GROUPING, SCF_TYPE_ASTRING,
+ dependency, "grouping") != 0)
+ return (-1);
+
+ p = internal_property_create(SCF_PROPERTY_ENTITIES, SCF_TYPE_FMRI, 0);
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+
+ for (cursor = dependency->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ xmlChar *value;
+ value_t *v;
+
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_STABILITY:
+ if (lxml_get_pgroup_stability(pg, cursor) != 0)
+ return (-1);
+ break;
+
+ case SC_SERVICE_FMRI:
+ value = xmlGetProp(cursor, (xmlChar *)value_attr);
+ if (value != NULL) {
+ if (lxml_validate_string_value(SCF_TYPE_FMRI,
+ (char *)value) != 0)
+ uu_die(gettext("illegal value \"%s\" "
+ "for %s (%s)\n"), (char *)value,
+ lxml_prop_types[SC_FMRI],
+ (scf_error()) ?
+ scf_strerror(scf_error()) :
+ gettext("Illegal format"));
+ v = internal_value_new();
+ v->sc_type = SCF_TYPE_FMRI;
+ v->sc_u.sc_string = (char *)value;
+ internal_attach_value(p, v);
+ }
+
+ break;
+
+ case SC_PROPVAL:
+ (void) lxml_get_propval(pg, cursor);
+ break;
+
+ case SC_PROPERTY:
+ (void) lxml_get_property(pg, cursor);
+ break;
+
+ default:
+ uu_die(gettext("illegal element \"%s\" on "
+ "dependency group \"%s\"\n"), cursor->name, name);
+ break;
+ }
+ }
+
+ delete = xmlGetProp(dependency, (xmlChar *)delete_attr);
+ pg->sc_pgroup_delete = (xmlStrcmp(delete, (xmlChar *)true) == 0);
+ xmlFree(delete);
+
+ return (0);
+}
+
+/*
+ * Dependents are hairy. They should cause a dependency pg to be created in
+ * another service, but we can't do that here; we'll have to wait until the
+ * import routines. So for now we'll add the dependency group that should go
+ * in the other service to the entity's dependent list.
+ */
+static int
+lxml_get_dependent(entity_t *entity, xmlNodePtr dependent)
+{
+ xmlChar *name, *or;
+ xmlNodePtr sf;
+ xmlChar *fmri, *delete;
+ pgroup_t *pg;
+ property_t *p;
+ xmlNodePtr n;
+ char *myfmri;
+
+ name = xmlGetProp(dependent, (xmlChar *)name_attr);
+
+ if (internal_pgroup_find(entity, (char *)name, NULL) != NULL) {
+ semerr(gettext("Property group and dependent of entity %s "
+ "have same name \"%s\".\n"), entity->sc_name, name);
+ xmlFree(name);
+ return (-1);
+ }
+
+ or = xmlGetProp(dependent, (xmlChar *)override_attr);
+
+ pg = internal_pgroup_new();
+ pg->sc_pgroup_name = (char *)name;
+ pg->sc_pgroup_type = (char *)SCF_GROUP_DEPENDENCY;
+ pg->sc_pgroup_override = (xmlStrcmp(or, (xmlChar *)true) == 0);
+ xmlFree(or);
+ if (internal_attach_dependent(entity, pg) != 0) {
+ xmlFree(name);
+ internal_pgroup_free(pg);
+ return (-1);
+ }
+
+ for (sf = dependent->children; sf != NULL; sf = sf->next)
+ if (xmlStrcmp(sf->name, (xmlChar *)"service_fmri") == 0)
+ break;
+ assert(sf != NULL);
+ fmri = xmlGetProp(sf, (xmlChar *)value_attr);
+ pg->sc_pgroup_fmri = (char *)fmri;
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_RESTART_ON,
+ SCF_TYPE_ASTRING, dependent, "restart_on") != 0)
+ return (-1);
+
+ if (new_str_prop_from_attr(pg, SCF_PROPERTY_GROUPING, SCF_TYPE_ASTRING,
+ dependent, "grouping") != 0)
+ return (-1);
+
+ myfmri = safe_malloc(max_scf_fmri_len + 1);
+ if (entity->sc_etype == SVCCFG_SERVICE_OBJECT) {
+ if (snprintf(myfmri, max_scf_fmri_len + 1, "svc:/%s",
+ entity->sc_name) < 0)
+ bad_error("snprintf", errno);
+ } else {
+ assert(entity->sc_etype == SVCCFG_INSTANCE_OBJECT);
+ if (snprintf(myfmri, max_scf_fmri_len + 1, "svc:/%s:%s",
+ entity->sc_parent->sc_name, entity->sc_name) < 0)
+ bad_error("snprintf", errno);
+ }
+
+ p = internal_property_create(SCF_PROPERTY_ENTITIES, SCF_TYPE_FMRI, 1,
+ myfmri);
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+
+ /* Create a property to serve as a do-not-export flag. */
+ p = internal_property_create("external", SCF_TYPE_BOOLEAN, 1,
+ (uint64_t)1);
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+
+ for (n = sf->next; n != NULL; n = n->next) {
+ if (lxml_ignorable_block(n))
+ continue;
+
+ switch (lxml_xlate_element(n->name)) {
+ case SC_STABILITY:
+ if (new_str_prop_from_attr(pg,
+ SCF_PROPERTY_ENTITY_STABILITY, SCF_TYPE_ASTRING, n,
+ value_attr) != 0)
+ return (-1);
+ break;
+
+ case SC_PROPVAL:
+ (void) lxml_get_propval(pg, n);
+ break;
+
+ case SC_PROPERTY:
+ (void) lxml_get_property(pg, n);
+ break;
+
+ default:
+ uu_die(gettext("unexpected element %s.\n"), n->name);
+ }
+ }
+
+ /* Go back and fill in defaults. */
+ if (internal_property_find(pg, SCF_PROPERTY_TYPE) == NULL) {
+ p = internal_property_create(SCF_PROPERTY_TYPE,
+ SCF_TYPE_ASTRING, 1, "service");
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+ }
+
+ delete = xmlGetProp(dependent, (xmlChar *)delete_attr);
+ pg->sc_pgroup_delete = (xmlStrcmp(delete, (xmlChar *)true) == 0);
+ xmlFree(delete);
+
+ pg = internal_pgroup_find_or_create(entity, "dependents",
+ (char *)scf_group_framework);
+ p = internal_property_create((char *)name, SCF_TYPE_ASTRING, 1, fmri);
+ if (internal_attach_property(pg, p) != 0)
+ return (-1);
+
+ return (0);
+}
+
+static int
+lxml_get_entity_stability(entity_t *entity, xmlNodePtr rstr)
+{
+ pgroup_t *pg;
+ property_t *p;
+ xmlChar *stabval;
+
+ if ((stabval = xmlGetProp(rstr, (xmlChar *)value_attr)) == NULL) {
+ uu_warn(gettext("no stability value found\n"));
+ stabval = (xmlChar *)strdup("External");
+ }
+
+ pg = internal_pgroup_find_or_create(entity, (char *)scf_pg_general,
+ (char *)scf_group_framework);
+
+ p = internal_property_create(SCF_PROPERTY_ENTITY_STABILITY,
+ SCF_TYPE_ASTRING, 1, stabval);
+
+ return (internal_attach_property(pg, p));
+}
+
+static int
+lxml_get_restarter(entity_t *entity, xmlNodePtr rstr)
+{
+ pgroup_t *pg;
+ property_t *p;
+ xmlChar *restarter;
+ xmlNode *cursor;
+ int r;
+
+ /*
+ * Go find child. Child is a service_fmri element. value attribute
+ * contains restarter FMRI.
+ */
+
+ pg = internal_pgroup_find_or_create(entity, (char *)scf_pg_general,
+ (char *)scf_group_framework);
+
+ /*
+ * Walk its child elements, as appropriate.
+ */
+ for (cursor = rstr->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_SERVICE_FMRI:
+ restarter = xmlGetProp(cursor, (xmlChar *)value_attr);
+ break;
+ default:
+ uu_die(gettext("illegal element \"%s\" on restarter "
+ "element for \"%s\"\n"), cursor->name,
+ entity->sc_name);
+ break;
+ }
+ }
+
+ p = internal_property_create(SCF_PROPERTY_RESTARTER, SCF_TYPE_FMRI, 1,
+ restarter);
+
+ r = internal_attach_property(pg, p);
+ if (r != 0) {
+ internal_property_free(p);
+ return (-1);
+ }
+
+ return (0);
+}
+
+static void
+sanitize_locale(uchar_t *locale)
+{
+ for (; *locale != '\0'; locale++)
+ if (!isalnum(*locale) && *locale != '_')
+ *locale = '_';
+}
+
+static int
+lxml_get_loctext(entity_t *service, pgroup_t *pg, xmlNodePtr loctext)
+{
+ xmlNodePtr cursor;
+ xmlChar *val;
+ char *stripped, *cp;
+ property_t *p;
+ int r;
+
+ if ((val = xmlGetProp(loctext, (xmlChar *)"xml:lang")) == NULL)
+ if ((val = xmlGetProp(loctext, (xmlChar *)"lang")) == NULL)
+ val = (xmlChar *)"unknown";
+
+ sanitize_locale(val);
+
+ for (cursor = loctext->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (strcmp("text", (const char *)cursor->name) == 0) {
+ break;
+ } else if (strcmp("comment", (const char *)cursor->name) != 0) {
+ uu_die(gettext("illegal element \"%s\" on loctext "
+ "element for \"%s\"\n"), cursor->name,
+ service->sc_name);
+ }
+ }
+
+ if (cursor == NULL) {
+ uu_die(gettext("loctext element has no content for \"%s\"\n"),
+ service->sc_name);
+ }
+
+ /*
+ * Remove leading and trailing whitespace.
+ */
+ if ((stripped = strdup((const char *)cursor->content)) == NULL)
+ uu_die(gettext("Out of memory\n"));
+
+ for (; isspace(*stripped); stripped++);
+ for (cp = stripped + strlen(stripped) - 1; isspace(*cp); cp--);
+ *(cp + 1) = '\0';
+
+ p = internal_property_create((const char *)val, SCF_TYPE_USTRING, 1,
+ stripped);
+
+ r = internal_attach_property(pg, p);
+ if (r != 0)
+ internal_property_free(p);
+
+ return (r);
+}
+
+static int
+lxml_get_tm_common_name(entity_t *service, xmlNodePtr common_name)
+{
+ xmlNodePtr cursor;
+ pgroup_t *pg;
+
+ /*
+ * Create the property group, if absent.
+ */
+ pg = internal_pgroup_find_or_create(service,
+ (char *)SCF_PG_TM_COMMON_NAME, (char *)SCF_GROUP_TEMPLATE);
+
+ /*
+ * Iterate through one or more loctext elements. The locale is the
+ * property name; the contents are the ustring value for the property.
+ */
+ for (cursor = common_name->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_LOCTEXT:
+ if (lxml_get_loctext(service, pg, cursor))
+ return (-1);
+ break;
+ default:
+ uu_die(gettext("illegal element \"%s\" on common_name "
+ "element for \"%s\"\n"), cursor->name,
+ service->sc_name);
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static int
+lxml_get_tm_description(entity_t *service, xmlNodePtr description)
+{
+ xmlNodePtr cursor;
+ pgroup_t *pg;
+
+ /*
+ * Create the property group, if absent.
+ */
+ pg = internal_pgroup_find_or_create(service,
+ (char *)SCF_PG_TM_DESCRIPTION, (char *)SCF_GROUP_TEMPLATE);
+
+ /*
+ * Iterate through one or more loctext elements. The locale is the
+ * property name; the contents are the ustring value for the property.
+ */
+ for (cursor = description->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_LOCTEXT:
+ if (lxml_get_loctext(service, pg, cursor))
+ return (-1);
+ break;
+ default:
+ uu_die(gettext("illegal element \"%s\" on description "
+ "element for \"%s\"\n"), cursor->name,
+ service->sc_name);
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static char *
+lxml_label_to_groupname(const char *prefix, const char *in)
+{
+ char *out, *cp;
+ size_t len, piece_len;
+
+ out = uu_zalloc(2 * scf_limit(SCF_LIMIT_MAX_NAME_LENGTH) + 1);
+ if (out == NULL)
+ return (NULL);
+
+ (void) strcpy(out, prefix);
+ (void) strcat(out, in);
+
+ len = strlen(out);
+ if (len > max_scf_name_len) {
+ /* Use the first half and the second half. */
+ piece_len = (max_scf_name_len - 2) / 2;
+
+ (void) strncpy(out + piece_len, "..", 2);
+
+ (void) strcpy(out + piece_len + 2, out + (len - piece_len));
+
+ len = strlen(out);
+ }
+
+ /*
+ * Translate non-property characters to '_'.
+ */
+ for (cp = out; *cp != '\0'; ++cp) {
+ if (!(isalnum(*cp) || *cp == '_' || *cp == '-'))
+ *cp = '_';
+ }
+
+ *cp = '\0';
+
+ return (out);
+}
+
+static int
+lxml_get_tm_manpage(entity_t *service, xmlNodePtr manpage)
+{
+ pgroup_t *pg;
+ char *pgname;
+ xmlChar *title;
+
+ /*
+ * Fetch title attribute, convert to something sanitized, and create
+ * property group.
+ */
+ title = xmlGetProp(manpage, (xmlChar *)"title");
+ pgname = (char *)lxml_label_to_groupname(SCF_PG_TM_MAN_PREFIX,
+ (const char *)title);
+
+ pg = internal_pgroup_find_or_create(service, pgname,
+ (char *)SCF_GROUP_TEMPLATE);
+
+ /*
+ * Each attribute is an astring property within the group.
+ */
+ if (new_str_prop_from_attr(pg, "title", SCF_TYPE_ASTRING, manpage,
+ "title") != 0 ||
+ new_str_prop_from_attr(pg, "section", SCF_TYPE_ASTRING, manpage,
+ "section") != 0 ||
+ new_str_prop_from_attr(pg, "manpath", SCF_TYPE_ASTRING, manpage,
+ "manpath") != 0)
+ return (-1);
+
+ return (0);
+}
+
+static int
+lxml_get_tm_doclink(entity_t *service, xmlNodePtr doc_link)
+{
+ pgroup_t *pg;
+ char *pgname;
+ xmlChar *name;
+
+ /*
+ * Fetch name attribute, convert name to something sanitized, and create
+ * property group.
+ */
+ name = xmlGetProp(doc_link, (xmlChar *)"name");
+
+ pgname = (char *)lxml_label_to_groupname(SCF_PG_TM_DOC_PREFIX,
+ (const char *)name);
+
+ pg = internal_pgroup_find_or_create(service, pgname,
+ (char *)SCF_GROUP_TEMPLATE);
+
+ /*
+ * Each attribute is an astring property within the group.
+ */
+ if (new_str_prop_from_attr(pg, "name", SCF_TYPE_ASTRING, doc_link,
+ "name") != 0 ||
+ new_str_prop_from_attr(pg, "uri", SCF_TYPE_ASTRING, doc_link,
+ "uri") != 0)
+ return (-1);
+
+ return (0);
+}
+
+static int
+lxml_get_tm_documentation(entity_t *service, xmlNodePtr documentation)
+{
+ xmlNodePtr cursor;
+
+ for (cursor = documentation->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_MANPAGE:
+ (void) lxml_get_tm_manpage(service, cursor);
+ break;
+ case SC_DOC_LINK:
+ (void) lxml_get_tm_doclink(service, cursor);
+ break;
+ default:
+ uu_die(gettext("illegal element \"%s\" on template "
+ "for service \"%s\"\n"),
+ cursor->name, service->sc_name);
+ }
+ }
+
+ return (0);
+}
+
+static int
+lxml_get_template(entity_t *service, xmlNodePtr templ)
+{
+ xmlNodePtr cursor;
+
+ for (cursor = templ->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_COMMON_NAME:
+ (void) lxml_get_tm_common_name(service, cursor);
+ break;
+ case SC_DESCRIPTION:
+ (void) lxml_get_tm_description(service, cursor);
+ break;
+ case SC_DOCUMENTATION:
+ (void) lxml_get_tm_documentation(service, cursor);
+ break;
+ default:
+ uu_die(gettext("illegal element \"%s\" on template "
+ "for service \"%s\"\n"),
+ cursor->name, service->sc_name);
+ }
+ }
+
+ return (0);
+}
+
+static int
+lxml_get_default_instance(entity_t *service, xmlNodePtr definst)
+{
+ entity_t *i;
+ xmlChar *enabled;
+ pgroup_t *pg;
+ property_t *p;
+ char *package;
+ uint64_t enabled_val = 0;
+
+ i = internal_instance_new("default");
+
+ if ((enabled = xmlGetProp(definst, (xmlChar *)enabled_attr)) != NULL) {
+ enabled_val = (strcmp(true, (const char *)enabled) == 0) ?
+ 1 : 0;
+ xmlFree(enabled);
+ }
+
+ /*
+ * New general property group with enabled boolean property set.
+ */
+
+ pg = internal_pgroup_new();
+ (void) internal_attach_pgroup(i, pg);
+
+ pg->sc_pgroup_name = (char *)scf_pg_general;
+ pg->sc_pgroup_type = (char *)scf_group_framework;
+ pg->sc_pgroup_flags = 0;
+
+ p = internal_property_create(SCF_PROPERTY_ENABLED, SCF_TYPE_BOOLEAN, 1,
+ enabled_val);
+
+ (void) internal_attach_property(pg, p);
+
+ /*
+ * Add general/package property if PKGINST is set.
+ */
+ if ((package = getenv("PKGINST")) != NULL) {
+ p = internal_property_create(SCF_PROPERTY_PACKAGE,
+ SCF_TYPE_ASTRING, 1, package);
+
+ (void) internal_attach_property(pg, p);
+ }
+
+ return (internal_attach_entity(service, i));
+}
+
+/*
+ * Translate an instance element into an internal property tree, added to
+ * service. If apply is true, forbid subelements and set the enabled property
+ * to override.
+ */
+static int
+lxml_get_instance(entity_t *service, xmlNodePtr inst, int apply)
+{
+ entity_t *i;
+ pgroup_t *pg;
+ property_t *p;
+ xmlNodePtr cursor;
+ xmlChar *enabled;
+ int r;
+
+ /*
+ * Fetch its attributes, as appropriate.
+ */
+ i = internal_instance_new((char *)xmlGetProp(inst,
+ (xmlChar *)name_attr));
+
+ /*
+ * Note that this must be done before walking the children so that
+ * sc_fmri is set in case we enter lxml_get_dependent().
+ */
+ r = internal_attach_entity(service, i);
+ if (r != 0)
+ return (r);
+
+ enabled = xmlGetProp(inst, (xmlChar *)enabled_attr);
+
+ /*
+ * New general property group with enabled boolean property set.
+ */
+ pg = internal_pgroup_new();
+ (void) internal_attach_pgroup(i, pg);
+
+ pg->sc_pgroup_name = (char *)scf_pg_general;
+ pg->sc_pgroup_type = (char *)scf_group_framework;
+ pg->sc_pgroup_flags = 0;
+
+ p = internal_property_create(SCF_PROPERTY_ENABLED, SCF_TYPE_BOOLEAN, 1,
+ (uint64_t)(strcmp(true, (const char *)enabled) == 0 ? 1 : 0));
+
+ p->sc_property_override = apply;
+
+ (void) internal_attach_property(pg, p);
+
+ xmlFree(enabled);
+
+ /*
+ * Walk its child elements, as appropriate.
+ */
+ for (cursor = inst->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ if (apply) {
+ semerr(gettext("Instance \"%s\" may not contain "
+ "elements in profiles.\n"), i->sc_name,
+ cursor->name);
+ return (-1);
+ }
+
+ switch (lxml_xlate_element(cursor->name)) {
+ case SC_RESTARTER:
+ (void) lxml_get_restarter(i, cursor);
+ break;
+ case SC_DEPENDENCY:
+ (void) lxml_get_dependency(i, cursor);
+ break;
+ case SC_DEPENDENT:
+ (void) lxml_get_dependent(i, cursor);
+ break;
+ case SC_METHOD_CONTEXT:
+ (void) lxml_get_entity_method_context(i, cursor);
+ break;
+ case SC_EXEC_METHOD:
+ (void) lxml_get_exec_method(i, cursor);
+ break;
+ case SC_PROPERTY_GROUP:
+ (void) lxml_get_pgroup(i, cursor);
+ break;
+ case SC_TEMPLATE:
+ (void) lxml_get_template(i, cursor);
+ break;
+ default:
+ uu_die(gettext(
+ "illegal element \"%s\" on instance \"%s\"\n"),
+ cursor->name, i->sc_name);
+ break;
+ }
+ }
+
+ return (0);
+}
+
+/* ARGSUSED1 */
+static int
+lxml_get_single_instance(entity_t *entity, xmlNodePtr si)
+{
+ pgroup_t *pg;
+ property_t *p;
+ int r;
+
+ pg = internal_pgroup_find_or_create(entity, (char *)scf_pg_general,
+ (char *)scf_group_framework);
+
+ p = internal_property_create(SCF_PROPERTY_SINGLE_INSTANCE,
+ SCF_TYPE_BOOLEAN, 1, (uint64_t)1);
+
+ r = internal_attach_property(pg, p);
+ if (r != 0) {
+ internal_property_free(p);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Translate a service element into an internal instance/property tree, added
+ * to bundle. If apply is true, allow only instance subelements.
+ */
+static int
+lxml_get_service(bundle_t *bundle, xmlNodePtr svc, int apply)
+{
+ entity_t *s;
+ xmlNodePtr cursor;
+ xmlChar *type;
+ xmlChar *version;
+ int e;
+
+ /*
+ * Fetch attributes, as appropriate.
+ */
+ s = internal_service_new((char *)xmlGetProp(svc,
+ (xmlChar *)name_attr));
+
+ version = xmlGetProp(svc, (xmlChar *)"version");
+ s->sc_u.sc_service.sc_service_version = atol((const char *)version);
+ xmlFree(version);
+
+ type = xmlGetProp(svc, (xmlChar *)type_attr);
+ s->sc_u.sc_service.sc_service_type = lxml_xlate_service_type(type);
+ xmlFree(type);
+
+ /*
+ * Walk its child elements, as appropriate.
+ */
+ for (cursor = svc->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ e = lxml_xlate_element(cursor->name);
+
+ if (apply && e != SC_INSTANCE) {
+ semerr(gettext("Service \"%s\" may not contain the "
+ "non-instance element \"%s\" in a profile.\n"),
+ s->sc_name, cursor->name);
+
+ return (-1);
+ }
+
+ switch (e) {
+ case SC_INSTANCE:
+ (void) lxml_get_instance(s, cursor, apply);
+ break;
+ case SC_TEMPLATE:
+ (void) lxml_get_template(s, cursor);
+ break;
+ case SC_STABILITY:
+ (void) lxml_get_entity_stability(s, cursor);
+ break;
+ case SC_DEPENDENCY:
+ (void) lxml_get_dependency(s, cursor);
+ break;
+ case SC_DEPENDENT:
+ (void) lxml_get_dependent(s, cursor);
+ break;
+ case SC_RESTARTER:
+ (void) lxml_get_restarter(s, cursor);
+ break;
+ case SC_EXEC_METHOD:
+ (void) lxml_get_exec_method(s, cursor);
+ break;
+ case SC_METHOD_CONTEXT:
+ (void) lxml_get_entity_method_context(s, cursor);
+ break;
+ case SC_PROPERTY_GROUP:
+ (void) lxml_get_pgroup(s, cursor);
+ break;
+ case SC_INSTANCE_CREATE_DEFAULT:
+ (void) lxml_get_default_instance(s, cursor);
+ break;
+ case SC_INSTANCE_SINGLE:
+ (void) lxml_get_single_instance(s, cursor);
+ break;
+ default:
+ uu_die(gettext(
+ "illegal element \"%s\" on service \"%s\"\n"),
+ cursor->name, s->sc_name);
+ break;
+ }
+ }
+
+ return (internal_attach_service(bundle, s));
+}
+
+#ifdef DEBUG
+void
+lxml_dump(int g, xmlNodePtr p)
+{
+ if (p && p->name) {
+ printf("%d %s\n", g, p->name);
+
+ for (p = p->xmlChildrenNode; p != NULL; p = p->next)
+ lxml_dump(g + 1, p);
+ }
+}
+#endif /* DEBUG */
+
+static int
+lxml_is_known_dtd(const xmlChar *dtdname)
+{
+ if (dtdname == NULL ||
+ strcmp(MANIFEST_DTD_PATH, (const char *)dtdname) != 0)
+ return (0);
+
+ return (1);
+}
+
+static int
+lxml_get_bundle(bundle_t *bundle, bundle_type_t bundle_type,
+ xmlNodePtr subbundle, int apply)
+{
+ xmlNodePtr cursor;
+ xmlChar *type;
+ int e;
+
+ /*
+ * 1. Get bundle attributes.
+ */
+ type = xmlGetProp(subbundle, (xmlChar *)"type");
+ bundle->sc_bundle_type = lxml_xlate_bundle_type(type);
+ if (bundle->sc_bundle_type != bundle_type &&
+ bundle_type != SVCCFG_UNKNOWN_BUNDLE) {
+ semerr(gettext("included bundle of different type.\n"));
+ return (-1);
+ }
+
+ xmlFree(type);
+
+ if (!apply) {
+ if (bundle->sc_bundle_type != SVCCFG_MANIFEST) {
+ semerr(gettext("document is not a manifest.\n"));
+ return (-1);
+ }
+ } else {
+ if (bundle->sc_bundle_type != SVCCFG_PROFILE) {
+ semerr(gettext("document is not a profile.\n"));
+ return (-1);
+ }
+ }
+
+ if ((bundle->sc_bundle_name = xmlGetProp(subbundle,
+ (xmlChar *)"name")) == NULL) {
+ semerr(gettext("service bundle lacks name attribute\n"));
+ return (-1);
+ }
+
+ /*
+ * 2. Get services, descend into each one and build state.
+ */
+ for (cursor = subbundle->xmlChildrenNode; cursor != NULL;
+ cursor = cursor->next) {
+ if (lxml_ignorable_block(cursor))
+ continue;
+
+ e = lxml_xlate_element(cursor->name);
+
+ switch (e) {
+ case SC_XI_INCLUDE:
+ continue;
+
+ case SC_SERVICE_BUNDLE:
+ if (lxml_get_bundle(bundle, bundle_type, cursor, apply))
+ return (-1);
+ break;
+ case SC_SERVICE:
+ (void) lxml_get_service(bundle, cursor, apply);
+ break;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Load an XML tree from filename and translate it into an internal service
+ * tree bundle. If apply is false, require that the the bundle be of type
+ * manifest, or type profile otherwise.
+ */
+int
+lxml_get_bundle_file(bundle_t *bundle, const char *filename, int apply)
+{
+ xmlDocPtr document;
+ xmlNodePtr cursor;
+ xmlDtdPtr dtd = NULL;
+ xmlValidCtxtPtr vcp;
+ boolean_t do_validate;
+ char *dtdpath = NULL;
+ int r;
+
+ /*
+ * Until libxml2 addresses DTD-based validation with XInclude, we don't
+ * validate service profiles (i.e. the apply path).
+ */
+ do_validate = (apply == 0) && (getenv("SVCCFG_NOVALIDATE") == NULL);
+ if (do_validate)
+ dtdpath = getenv("SVCCFG_DTD");
+
+ if (dtdpath != NULL)
+ xmlLoadExtDtdDefaultValue = 0;
+
+ if ((document = xmlReadFile(filename, NULL,
+ XML_PARSE_NOERROR | XML_PARSE_NOWARNING)) == NULL) {
+ semerr(gettext("couldn't parse document\n"));
+ return (-1);
+ }
+
+ /*
+ * Verify that this is a document type we understand.
+ */
+ if ((dtd = xmlGetIntSubset(document)) == NULL) {
+ semerr(gettext("document has no DTD\n"));
+ return (-1);
+ }
+
+ if (!lxml_is_known_dtd(dtd->SystemID)) {
+ semerr(gettext("document DTD unknown; not service bundle?\n"));
+ return (-1);
+ }
+
+ if ((cursor = xmlDocGetRootElement(document)) == NULL) {
+ semerr(gettext("document is empty\n"));
+ xmlFreeDoc(document);
+ return (-1);
+ }
+
+ if (xmlStrcmp(cursor->name, (const xmlChar *)"service_bundle") != 0) {
+ semerr(gettext("document is not a service bundle\n"));
+ xmlFreeDoc(document);
+ return (-1);
+ }
+
+
+ if (dtdpath != NULL) {
+ dtd = xmlParseDTD(NULL, (xmlChar *)dtdpath);
+ if (dtd == NULL) {
+ semerr(gettext("Could not parse DTD \"%s\".\n"),
+ dtdpath);
+ return (-1);
+ }
+
+ if (document->extSubset != NULL)
+ xmlFreeDtd(document->extSubset);
+
+ document->extSubset = dtd;
+ }
+
+ if (xmlXIncludeProcessFlags(document, XML_PARSE_XINCLUDE) == -1) {;
+ semerr(gettext("couldn't handle XInclude statements "
+ "in document\n"));
+ return (-1);
+ }
+
+ if (do_validate) {
+ vcp = xmlNewValidCtxt();
+ if (vcp == NULL)
+ uu_die(gettext("could not allocate memory"));
+ vcp->warning = xmlParserValidityWarning;
+ vcp->error = xmlParserValidityError;
+
+ r = xmlValidateDocument(vcp, document);
+
+ xmlFreeValidCtxt(vcp);
+
+ if (r == 0) {
+ semerr(gettext("Document is not valid.\n"));
+ xmlFreeDoc(document);
+ return (-1);
+ }
+ }
+
+
+#ifdef DEBUG
+ lxml_dump(0, cursor);
+#endif /* DEBUG */
+
+ r = lxml_get_bundle(bundle, SVCCFG_UNKNOWN_BUNDLE, cursor, apply);
+
+ xmlFreeDoc(document);
+
+ return (r);
+}
+
+int
+lxml_inventory(const char *filename)
+{
+ bundle_t *b;
+ uu_list_walk_t *svcs, *insts;
+ entity_t *svc, *inst;
+
+ b = internal_bundle_new();
+
+ if (lxml_get_bundle_file(b, filename, 0) != 0) {
+ internal_bundle_free(b);
+ return (-1);
+ }
+
+ svcs = uu_list_walk_start(b->sc_bundle_services, 0);
+ if (svcs == NULL)
+ uu_die(gettext("Couldn't walk services"));
+
+ while ((svc = uu_list_walk_next(svcs)) != NULL) {
+ uu_list_t *inst_list;
+
+ inst_list = svc->sc_u.sc_service.sc_service_instances;
+ insts = uu_list_walk_start(inst_list, 0);
+ if (insts == NULL)
+ uu_die(gettext("Couldn't walk instances"));
+
+ while ((inst = uu_list_walk_next(insts)) != NULL)
+ (void) printf("svc:/%s:%s\n", svc->sc_name,
+ inst->sc_name);
+
+ uu_list_walk_end(insts);
+ }
+
+ uu_list_walk_end(svcs);
+
+ svcs = uu_list_walk_start(b->sc_bundle_services, 0);
+ while ((svc = uu_list_walk_next(svcs)) != NULL) {
+ (void) fputs("svc:/", stdout);
+ (void) puts(svc->sc_name);
+ }
+ uu_list_walk_end(svcs);
+
+ internal_bundle_free(b);
+
+ return (0);
+}
diff --git a/usr/src/cmd/svc/svcprop/Makefile b/usr/src/cmd/svc/svcprop/Makefile
new file mode 100644
index 0000000000..6c454aad37
--- /dev/null
+++ b/usr/src/cmd/svc/svcprop/Makefile
@@ -0,0 +1,55 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PROG = svcprop
+OBJS = svcprop.o
+SRCS = $(OBJS:%.o=%.c)
+POFILES = $(OBJS:.o=.po)
+
+include ../../Makefile.cmd
+include ../Makefile.ctf
+
+LDLIBS += -lscf -luutil
+
+lint := LINTFLAGS = -ux
+
+.KEEP_STATE:
+
+all: $(PROG)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS) $(CTFMERGE_HOOK)
+ $(POST_PROCESS)
+
+install: all $(ROOTPROG)
+
+clean:
+ $(RM) $(OBJS)
+
+lint: lint_SRCS
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/svcprop/svcprop.c b/usr/src/cmd/svc/svcprop/svcprop.c
new file mode 100644
index 0000000000..2f9f3ddb0b
--- /dev/null
+++ b/usr/src/cmd/svc/svcprop/svcprop.c
@@ -0,0 +1,1117 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * svcprop - report service configuration properties
+ */
+
+#include <locale.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <strings.h>
+#include <assert.h>
+
+#ifndef TEXT_DOMAIN
+#define TEXT_DOMAIN "SUNW_OST_OSCMD"
+#endif /* TEXT_DOMAIN */
+
+/*
+ * Error functions. These can change if the quiet (-q) option is used.
+ */
+static void (*warn)(const char *, ...) = uu_warn;
+static void (*die)(const char *, ...) = uu_die;
+
+/*
+ * Entity encapsulation. This allows me to treat services and instances
+ * similarly, and avoid duplicating process_ent().
+ */
+typedef struct {
+ char type; /* !=0: service, 0: instance */
+ union {
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ } u;
+} scf_entityp_t;
+
+#define ENT_INSTANCE 0
+
+#define SCF_ENTITY_SET_TO_SERVICE(ent, s) { ent.type = 1; ent.u.svc = s; }
+
+#define SCF_ENTITY_SET_TO_INSTANCE(ent, i) \
+ { ent.type = ENT_INSTANCE; ent.u.inst = i; }
+
+#define scf_entity_get_pg(ent, name, pg) \
+ (ent.type ? scf_service_get_pg(ent.u.svc, name, pg) : \
+ scf_instance_get_pg(ent.u.inst, name, pg))
+
+#define scf_entity_to_fmri(ent, buf, buf_sz) \
+ (ent.type ? scf_service_to_fmri(ent.u.svc, buf, buf_sz) : \
+ scf_instance_to_fmri(ent.u.inst, buf, buf_sz))
+
+#define SCF_ENTITY_TYPE_NAME(ent) (ent.type ? "service" : "instance")
+
+/*
+ * Data structure for -p arguments. Since they may be name or name/name, we
+ * just track the components.
+ */
+typedef struct svcprop_prop_node {
+ uu_list_node_t spn_list_node;
+ const char *spn_comp1;
+ const char *spn_comp2;
+} svcprop_prop_node_t;
+
+static uu_list_pool_t *prop_pool;
+static uu_list_t *prop_list;
+
+static scf_handle_t *hndl;
+static ssize_t max_scf_name_length;
+static ssize_t max_scf_value_length;
+static ssize_t max_scf_fmri_length;
+
+/* Options */
+static int quiet = 0; /* No output. Nothing found, exit(1) */
+static int types = 0; /* Display types of properties. */
+static int verbose = 0; /* Print not found errors to stderr. */
+static int fmris = 0; /* Display full FMRIs for properties. */
+static int wait = 0; /* Wait mode. */
+static char *snapshot = "running"; /* Snapshot to use. */
+static int Cflag = 0; /* C option supplied */
+static int cflag = 0; /* c option supplied */
+static int sflag = 0; /* s option supplied */
+static int return_code; /* main's return code */
+
+#define PRINT_NOPROP_ERRORS (!quiet || verbose)
+
+/*
+ * For unexpected libscf errors. The ending newline is necessary to keep
+ * uu_die() from appending the errno error.
+ */
+static void
+scfdie()
+{
+ die(gettext("Unexpected libscf error: %s. Exiting.\n"),
+ scf_strerror(scf_error()));
+}
+
+static void *
+safe_malloc(size_t sz)
+{
+ void *p;
+
+ p = malloc(sz);
+ if (p == NULL)
+ die(gettext("Could not allocate memory"));
+
+ return (p);
+}
+
+static void
+usage()
+{
+ (void) fprintf(stderr, gettext("Usage: %1$s [-fqtv] "
+ "[-C | -c | -s snapshot] "
+ "[-p [name/]name]... \n"
+ " {FMRI | pattern}...\n"
+ " %1$s -w [-fqtv] [-p [name/]name] "
+ "{FMRI | pattern}\n"), uu_getpname());
+ exit(UU_EXIT_USAGE);
+}
+
+/*
+ * Return an allocated copy of str, with the Bourne shell's metacharacters
+ * escaped by '\'.
+ *
+ * What about unicode?
+ */
+static char *
+quote_for_shell(const char *str)
+{
+ const char *sp;
+ char *dst, *dp;
+ size_t dst_len;
+
+ const char * const metachars = ";&()|^<>\n \t\\\"\'`";
+
+ if (str[0] == '\0')
+ return (strdup("\"\""));
+
+ dst_len = 0;
+ for (sp = str; *sp != '\0'; ++sp) {
+ ++dst_len;
+
+ if (strchr(metachars, *sp) != NULL)
+ ++dst_len;
+ }
+
+ if (sp - str == dst_len)
+ return (strdup(str));
+
+ dst = safe_malloc(dst_len + 1);
+
+ for (dp = dst, sp = str; *sp != '\0'; ++dp, ++sp) {
+ if (strchr(metachars, *sp) != NULL)
+ *dp++ = '\\';
+
+ *dp = *sp;
+ }
+ *dp = '\0';
+
+ return (dst);
+}
+
+static void
+print_value(scf_value_t *val)
+{
+ char *buf, *qbuf;
+ ssize_t bufsz, r;
+
+ bufsz = scf_value_get_as_string(val, NULL, 0) + 1;
+ if (bufsz - 1 < 0)
+ scfdie();
+
+ buf = safe_malloc(bufsz);
+
+ r = scf_value_get_as_string(val, buf, bufsz);
+ assert(r + 1 == bufsz);
+
+ qbuf = quote_for_shell(buf);
+ (void) fputs(qbuf, stdout);
+
+ free(qbuf);
+ free(buf);
+}
+
+/*
+ * Display a property's values on a line. If types is true, prepend
+ * identification (the FMRI if fmris is true, pg/prop otherwise) and the type
+ * of the property.
+ */
+static void
+display_prop(scf_propertygroup_t *pg, scf_property_t *prop)
+{
+ scf_value_t *val;
+ scf_iter_t *iter;
+ int ret, first;
+
+ if (types) {
+ scf_type_t ty;
+ char *buf;
+ size_t buf_sz;
+
+ if (fmris) {
+ buf_sz = max_scf_fmri_length + 1;
+ buf = safe_malloc(buf_sz);
+
+ if (scf_property_to_fmri(prop, buf, buf_sz) == -1)
+ scfdie();
+ (void) fputs(buf, stdout);
+
+ free(buf);
+ } else {
+ buf_sz = max_scf_name_length + 1;
+ buf = safe_malloc(buf_sz);
+
+ if (scf_pg_get_name(pg, buf, buf_sz) < 0)
+ scfdie();
+ (void) fputs(buf, stdout);
+ (void) putchar('/');
+
+ if (scf_property_get_name(prop, buf, buf_sz) < 0)
+ scfdie();
+ (void) fputs(buf, stdout);
+
+ free(buf);
+ }
+
+ (void) putchar(' ');
+
+ if (scf_property_type(prop, &ty) == -1)
+ scfdie();
+ (void) fputs(scf_type_to_string(ty), stdout);
+ (void) putchar(' ');
+ }
+
+ if ((iter = scf_iter_create(hndl)) == NULL ||
+ (val = scf_value_create(hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_property_values(iter, prop) == -1)
+ scfdie();
+
+ first = 1;
+ while ((ret = scf_iter_next_value(iter, val)) == 1) {
+ if (first)
+ first = 0;
+ else
+ (void) putchar(' ');
+ print_value(val);
+ }
+ if (ret == -1)
+ scfdie();
+
+ (void) putchar('\n');
+
+ scf_iter_destroy(iter);
+ (void) scf_value_destroy(val);
+}
+
+/*
+ * display_prop() all of the properties in the given property group. Force
+ * types to true so identification will be displayed.
+ */
+static void
+display_pg(scf_propertygroup_t *pg)
+{
+ scf_property_t *prop;
+ scf_iter_t *iter;
+ int ret;
+
+ types = 1; /* Always display types for whole propertygroups. */
+
+ if ((prop = scf_property_create(hndl)) == NULL ||
+ (iter = scf_iter_create(hndl)) == NULL)
+ scfdie();
+
+ if (scf_iter_pg_properties(iter, pg) == -1)
+ scfdie();
+
+ while ((ret = scf_iter_next_property(iter, prop)) == 1)
+ display_prop(pg, prop);
+ if (ret == -1)
+ scfdie();
+
+ scf_iter_destroy(iter);
+ scf_property_destroy(prop);
+}
+
+/*
+ * Common code to execute when a nonexistant property is encountered.
+ */
+static void
+noprop_common_action()
+{
+ if (!PRINT_NOPROP_ERRORS)
+ /* We're not printing errors, so we can cut out early. */
+ exit(UU_EXIT_FATAL);
+
+ return_code = UU_EXIT_FATAL;
+}
+
+/*
+ * Iterate the properties of a service or an instance when no snapshot
+ * is specified.
+ */
+static int
+scf_iter_entity_pgs(scf_iter_t *iter, scf_entityp_t ent)
+{
+ int ret = 0;
+
+ if (ent.type) {
+ /*
+ * If we are displaying properties for a service,
+ * treat it as though it were a composed, current
+ * lookup. (implicit cflag) However, if a snapshot
+ * was specified, fail.
+ */
+ if (sflag)
+ die(gettext("Only instances have "
+ "snapshots.\n"));
+ ret = scf_iter_service_pgs(iter, ent.u.svc);
+ } else {
+ if (Cflag)
+ ret = scf_iter_instance_pgs(iter, ent.u.inst);
+ else
+ ret = scf_iter_instance_pgs_composed(iter, ent.u.inst,
+ NULL);
+ }
+ return (ret);
+}
+
+/*
+ * Return a snapshot for the supplied instance and snapshot name.
+ */
+static scf_snapshot_t *
+get_snapshot(const scf_instance_t *inst, const char *snapshot)
+{
+ scf_snapshot_t *snap = scf_snapshot_create(hndl);
+
+ if (snap == NULL)
+ scfdie();
+
+ if (scf_instance_get_snapshot(inst, snapshot, snap) == -1) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ die(gettext("Invalid snapshot name.\n"));
+ /* NOTREACHED */
+
+ case SCF_ERROR_NOT_FOUND:
+ if (sflag == 0) {
+ scf_snapshot_destroy(snap);
+ snap = NULL;
+ } else
+ die(gettext("No such snapshot.\n"));
+ break;
+
+ default:
+ scfdie();
+ }
+ }
+
+ return (snap);
+}
+
+/*
+ * Entity (service or instance): If there are -p options,
+ * display_{pg,prop}() the named property groups and/or properties. Otherwise
+ * display_pg() all property groups.
+ */
+static void
+process_ent(scf_entityp_t ent)
+{
+ scf_snapshot_t *snap = NULL;
+ scf_propertygroup_t *pg;
+ scf_property_t *prop;
+ scf_iter_t *iter;
+ svcprop_prop_node_t *spn;
+ int ret, err;
+
+ if (uu_list_numnodes(prop_list) == 0) {
+ if (quiet)
+ return;
+
+ if ((pg = scf_pg_create(hndl)) == NULL ||
+ (iter = scf_iter_create(hndl)) == NULL)
+ scfdie();
+
+ if (cflag || Cflag || ent.type != ENT_INSTANCE) {
+ if (scf_iter_entity_pgs(iter, ent) == -1)
+ scfdie();
+ } else {
+ if (snapshot != NULL)
+ snap = get_snapshot(ent.u.inst, snapshot);
+
+ if (scf_iter_instance_pgs_composed(iter, ent.u.inst,
+ snap) == -1)
+ scfdie();
+ if (snap)
+ scf_snapshot_destroy(snap);
+ }
+
+ while ((ret = scf_iter_next_pg(iter, pg)) == 1)
+ display_pg(pg);
+ if (ret == -1)
+ scfdie();
+
+ /*
+ * In normal usage, i.e. against the running snapshot,
+ * we must iterate over the current non-persistent
+ * pg's.
+ */
+ if (sflag == 0 && snap != NULL) {
+ scf_iter_reset(iter);
+ if (scf_iter_instance_pgs_composed(iter, ent.u.inst,
+ NULL) == -1)
+ scfdie();
+ while ((ret = scf_iter_next_pg(iter, pg)) == 1) {
+ uint32_t flags;
+
+ if (scf_pg_get_flags(pg, &flags) == -1)
+ scfdie();
+ if (flags & SCF_PG_FLAG_NONPERSISTENT)
+ display_pg(pg);
+ }
+ }
+ if (ret == -1)
+ scfdie();
+
+ scf_iter_destroy(iter);
+ scf_pg_destroy(pg);
+
+ return;
+ }
+
+ if ((pg = scf_pg_create(hndl)) == NULL ||
+ (prop = scf_property_create(hndl)) == NULL)
+ scfdie();
+
+ if (ent.type == ENT_INSTANCE && snapshot != NULL)
+ snap = get_snapshot(ent.u.inst, snapshot);
+
+ for (spn = uu_list_first(prop_list);
+ spn != NULL;
+ spn = uu_list_next(prop_list, spn)) {
+ if (ent.type == ENT_INSTANCE) {
+ if (Cflag)
+ ret = scf_instance_get_pg(ent.u.inst,
+ spn->spn_comp1, pg);
+ else
+ ret = scf_instance_get_pg_composed(ent.u.inst,
+ snap, spn->spn_comp1, pg);
+ err = scf_error();
+
+ /*
+ * If we didn't find it in the specified snapshot, use
+ * the current values if the pg is nonpersistent.
+ */
+ if (ret == -1 && !Cflag &&snap != NULL && err ==
+ SCF_ERROR_NOT_FOUND) {
+ ret = scf_instance_get_pg_composed(
+ ent.u.inst, NULL, spn->spn_comp1,
+ pg);
+
+ if (ret == 0) {
+ uint32_t flags;
+
+ if (scf_pg_get_flags(pg, &flags) == -1)
+ scfdie();
+ if ((flags & SCF_PG_FLAG_NONPERSISTENT)
+ == 0) {
+ ret = -1;
+ }
+ }
+ }
+ } else {
+ /*
+ * If we are displaying properties for a service,
+ * treat it as though it were a composed, current
+ * lookup. (implicit cflag) However, if a snapshot
+ * was specified, fail.
+ */
+ if (sflag)
+ die(gettext("Only instances have "
+ "snapshots.\n"));
+ ret = scf_entity_get_pg(ent, spn->spn_comp1, pg);
+ err = scf_error();
+ }
+ if (ret == -1) {
+ if (err != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (PRINT_NOPROP_ERRORS) {
+ char *buf;
+
+ buf = safe_malloc(max_scf_fmri_length + 1);
+ if (scf_entity_to_fmri(ent, buf,
+ max_scf_fmri_length + 1) == -1)
+ scfdie();
+
+ uu_warn(gettext("Couldn't find property group "
+ "`%s' for %s `%s'.\n"), spn->spn_comp1,
+ SCF_ENTITY_TYPE_NAME(ent), buf);
+
+ free(buf);
+ }
+
+ noprop_common_action();
+
+ continue;
+ }
+
+ if (spn->spn_comp2 == NULL) {
+ if (!quiet)
+ display_pg(pg);
+ continue;
+ }
+
+ if (scf_pg_get_property(pg, spn->spn_comp2, prop) == -1) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (PRINT_NOPROP_ERRORS) {
+ char *buf;
+
+ buf = safe_malloc(max_scf_fmri_length + 1);
+ if (scf_entity_to_fmri(ent, buf,
+ max_scf_fmri_length + 1) == -1)
+ scfdie();
+
+ /* FMRI syntax knowledge */
+ uu_warn(gettext("Couldn't find property "
+ "`%s/%s' for %s `%s'.\n"), spn->spn_comp1,
+ spn->spn_comp2, SCF_ENTITY_TYPE_NAME(ent),
+ buf);
+
+ free(buf);
+ }
+
+ noprop_common_action();
+
+ continue;
+ }
+
+ if (!quiet)
+ display_prop(pg, prop);
+ }
+
+ scf_property_destroy(prop);
+ scf_pg_destroy(pg);
+ if (snap)
+ scf_snapshot_destroy(snap);
+}
+
+/*
+ * Without -p options, just call display_pg(). Otherwise display_prop() the
+ * named properties of the property group.
+ */
+static void
+process_pg(scf_propertygroup_t *pg)
+{
+ scf_property_t *prop;
+ svcprop_prop_node_t *spn;
+
+ if (uu_list_first(prop_list) == NULL) {
+ if (quiet)
+ return;
+
+ display_pg(pg);
+ return;
+ }
+
+ prop = scf_property_create(hndl);
+ if (prop == NULL)
+ scfdie();
+
+ for (spn = uu_list_first(prop_list);
+ spn != NULL;
+ spn = uu_list_next(prop_list, spn)) {
+ if (spn->spn_comp2 != NULL) {
+ char *buf;
+
+ buf = safe_malloc(max_scf_fmri_length + 1);
+ if (scf_pg_to_fmri(pg, buf, max_scf_fmri_length + 1) ==
+ -1)
+ scfdie();
+
+ uu_xdie(UU_EXIT_USAGE, gettext("-p argument `%s/%s' "
+ "has too many components for property "
+ "group `%s'.\n"), spn->spn_comp1, spn->spn_comp2,
+ buf);
+
+ free(buf);
+ }
+
+ if (scf_pg_get_property(pg, spn->spn_comp1, prop) == 0) {
+ if (!quiet)
+ display_prop(pg, prop);
+ continue;
+ }
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (PRINT_NOPROP_ERRORS) {
+ char *buf;
+
+ buf = safe_malloc(max_scf_fmri_length + 1);
+ if (scf_pg_to_fmri(pg, buf, max_scf_fmri_length + 1) ==
+ -1)
+ scfdie();
+
+ uu_warn(gettext("Couldn't find property `%s' in "
+ "property group `%s'.\n"), spn->spn_comp1, buf);
+
+ free(buf);
+ }
+
+ noprop_common_action();
+ }
+}
+
+/*
+ * If there are -p options, show the error. Otherwise just call
+ * display_prop().
+ */
+static void
+process_prop(scf_propertygroup_t *pg, scf_property_t *prop)
+{
+ if (uu_list_first(prop_list) != NULL) {
+ uu_warn(gettext("The -p option cannot be used with property "
+ "operands.\n"));
+ usage();
+ }
+
+ if (quiet)
+ return;
+
+ display_prop(pg, prop);
+}
+
+/* Decode an operand & dispatch. */
+/* ARGSUSED */
+static int
+process_fmri(void *unused, scf_walkinfo_t *wip)
+{
+ scf_entityp_t ent;
+
+ /* Multiple matches imply multiple entities. */
+ if (wip->count > 1)
+ types = fmris = 1;
+
+ if (wip->prop != NULL) {
+ process_prop(wip->pg, wip->prop);
+ } else if (wip->pg != NULL) {
+ process_pg(wip->pg);
+ } else if (wip->inst != NULL) {
+ SCF_ENTITY_SET_TO_INSTANCE(ent, wip->inst);
+ process_ent(ent);
+ } else {
+ /* scf_walk_fmri() won't let this happen */
+ assert(wip->svc != NULL);
+ SCF_ENTITY_SET_TO_SERVICE(ent, wip->svc);
+ process_ent(ent);
+ }
+
+ return (0);
+}
+
+static void
+add_prop(char *property)
+{
+ svcprop_prop_node_t *p, *last;
+ char *slash;
+
+ const char * const invalid_component_emsg =
+ gettext("Invalid component name `%s'.\n");
+
+ /* FMRI syntax knowledge. */
+ slash = strchr(property, '/');
+ if (slash != NULL) {
+ if (strchr(slash + 1, '/') != NULL) {
+ uu_warn(gettext("-p argument `%s' has too many "
+ "components.\n"), property);
+ usage();
+ }
+ }
+
+ if (slash != NULL)
+ *slash = '\0';
+
+ p = safe_malloc(sizeof (svcprop_prop_node_t));
+ uu_list_node_init(p, &p->spn_list_node, prop_pool);
+
+ p->spn_comp1 = property;
+ p->spn_comp2 = (slash == NULL) ? NULL : slash + 1;
+
+ if (uu_check_name(p->spn_comp1, UU_NAME_DOMAIN) == -1)
+ uu_xdie(UU_EXIT_USAGE, invalid_component_emsg, p->spn_comp1);
+ if (p->spn_comp2 != NULL &&
+ uu_check_name(p->spn_comp2, UU_NAME_DOMAIN) == -1)
+ uu_xdie(UU_EXIT_USAGE, invalid_component_emsg, p->spn_comp2);
+
+ last = uu_list_last(prop_list);
+ if (last != NULL) {
+ if ((last->spn_comp2 == NULL) ^ (p->spn_comp2 == NULL)) {
+ /*
+ * The -p options have mixed numbers of components.
+ * If they both turn out to be valid, then the
+ * single-component ones will specify property groups,
+ * so we need to turn on types to keep the output of
+ * display_prop() consistent with display_pg().
+ */
+ types = 1;
+ }
+ }
+
+ (void) uu_list_insert_after(prop_list, NULL, p);
+}
+
+
+/*
+ * Wait for a property group or property change.
+ *
+ * Extract a pg and optionally a property name from fmri & prop_list.
+ * _scf_pg_wait() for the pg, and display_pg(pg) or display_prop(pg, prop)
+ * when it returns.
+ */
+/* ARGSUSED */
+static int
+do_wait(void *unused, scf_walkinfo_t *wip)
+{
+ scf_property_t *prop;
+ scf_propertygroup_t *lpg, *pg;
+ const char *propname;
+ svcprop_prop_node_t *p;
+
+ const char *emsg_not_found = gettext("Not found.\n");
+
+ if ((lpg = scf_pg_create(hndl)) == NULL ||
+ (prop = scf_property_create(hndl)) == NULL)
+ scfdie();
+
+ if (wip->prop != NULL) {
+ if (uu_list_numnodes(prop_list) > 0)
+ uu_xdie(UU_EXIT_USAGE, gettext("-p cannot be used with "
+ "property FMRIs.\n"));
+ pg = wip->pg;
+
+ assert(strrchr(wip->fmri, '/') != NULL);
+ propname = strrchr(wip->fmri, '/') + 1;
+
+ } else if (wip->pg != NULL) {
+ p = uu_list_first(prop_list);
+
+ if (p != NULL) {
+ if (p->spn_comp2 != NULL)
+ uu_xdie(UU_EXIT_USAGE, gettext("-p argument "
+ "\"%s/%s\" has too many components for "
+ "property group %s.\n"),
+ p->spn_comp1, p->spn_comp2, wip->fmri);
+
+ propname = p->spn_comp1;
+
+ if (scf_pg_get_property(wip->pg, propname, prop) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ uu_xdie(UU_EXIT_USAGE,
+ gettext("Invalid property name "
+ "\"%s\".\n"), propname);
+
+ /* NOTREACHED */
+
+ case SCF_ERROR_NOT_FOUND:
+ die(emsg_not_found);
+
+ /* NOTREACHED */
+
+ default:
+ scfdie();
+ }
+ }
+ } else {
+ propname = NULL;
+ }
+
+ pg = wip->pg;
+
+ } else if (wip->inst != NULL) {
+
+ p = uu_list_first(prop_list);
+ if (p == NULL)
+ uu_xdie(UU_EXIT_USAGE,
+ gettext("Cannot wait for an instance.\n"));
+
+ if (scf_instance_get_pg(wip->inst, p->spn_comp1, lpg) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ uu_xdie(UU_EXIT_USAGE, gettext("Invalid "
+ "property group name \"%s\".\n"),
+ p->spn_comp1);
+
+ case SCF_ERROR_NOT_FOUND:
+ die(emsg_not_found);
+
+ /* NOTREACHED */
+
+ default:
+ scfdie();
+ }
+ }
+
+ propname = p->spn_comp2;
+
+ if (propname != NULL) {
+ if (scf_pg_get_property(lpg, propname, prop) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ uu_xdie(UU_EXIT_USAGE,
+ gettext("Invalid property name "
+ "\"%s\".\n"), propname);
+
+ case SCF_ERROR_NOT_FOUND:
+ die(emsg_not_found);
+
+ /* NOTREACHED */
+
+ default:
+ scfdie();
+ }
+ }
+ }
+
+ pg = lpg;
+
+ } else if (wip->svc != NULL) {
+
+ p = uu_list_first(prop_list);
+ if (p == NULL)
+ uu_xdie(UU_EXIT_USAGE,
+ gettext("Cannot wait for a service.\n"));
+
+ if (scf_service_get_pg(wip->svc, p->spn_comp1, lpg) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ uu_xdie(UU_EXIT_USAGE, gettext("Invalid "
+ "property group name \"%s\".\n"),
+ p->spn_comp1);
+
+ case SCF_ERROR_NOT_FOUND:
+ die(emsg_not_found);
+
+ default:
+ scfdie();
+ }
+ }
+
+ propname = p->spn_comp2;
+
+ if (propname != NULL) {
+ if (scf_pg_get_property(lpg, propname, prop) !=
+ SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ uu_xdie(UU_EXIT_USAGE,
+ gettext("Invalid property name "
+ "\"%s\".\n"), propname);
+
+ /* NOTREACHED */
+
+ case SCF_ERROR_NOT_FOUND:
+ die(emsg_not_found);
+
+ /* NOTREACHED */
+
+ default:
+ scfdie();
+ }
+ }
+ }
+
+ pg = lpg;
+
+ } else {
+ uu_xdie(UU_EXIT_USAGE, gettext("FMRI must specify an entity, "
+ "property group, or property.\n"));
+ }
+
+ for (;;) {
+ int ret;
+
+ ret = _scf_pg_wait(pg, -1);
+ if (ret != SCF_SUCCESS)
+ scfdie();
+
+ ret = scf_pg_update(pg);
+ if (ret < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ die(emsg_not_found);
+ }
+ if (ret == SCF_COMPLETE)
+ break;
+ }
+
+ if (propname != NULL) {
+ if (scf_pg_get_property(pg, propname, prop) == SCF_SUCCESS) {
+ if (!quiet)
+ display_prop(pg, prop);
+ } else {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (PRINT_NOPROP_ERRORS)
+ uu_warn(emsg_not_found);
+
+ return_code = UU_EXIT_FATAL;
+ }
+ } else {
+ if (!quiet)
+ display_pg(pg);
+ }
+
+ scf_property_destroy(prop);
+ scf_pg_destroy(lpg);
+
+ return (0);
+}
+
+/*
+ * These functions replace uu_warn() and uu_die() when the quiet (-q) option is
+ * used, and silently ignore any output.
+ */
+
+/*ARGSUSED*/
+static void
+quiet_warn(const char *fmt, ...)
+{
+ /* Do nothing */
+}
+
+/*ARGSUSED*/
+static void
+quiet_die(const char *fmt, ...)
+{
+ exit(UU_EXIT_FATAL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int c;
+ scf_walk_callback callback;
+ int flags;
+ int err;
+
+ (void) setlocale(LC_ALL, "");
+ (void) textdomain(TEXT_DOMAIN);
+
+ return_code = UU_EXIT_OK;
+
+ (void) uu_setpname(argv[0]);
+
+ prop_pool = uu_list_pool_create("properties",
+ sizeof (svcprop_prop_node_t),
+ offsetof(svcprop_prop_node_t, spn_list_node), NULL, 0);
+ if (prop_pool == NULL)
+ uu_die("%s\n", uu_strerror(uu_error()));
+
+ prop_list = uu_list_create(prop_pool, NULL, 0);
+
+ while ((c = getopt(argc, argv, "Ccfp:qs:tvw")) != -1) {
+ switch (c) {
+ case 'C':
+ if (cflag || sflag || wait)
+ usage(); /* Not with -c, -s or -w */
+ Cflag++;
+ snapshot = NULL;
+ break;
+
+ case 'c':
+ if (Cflag || sflag || wait)
+ usage(); /* Not with -C, -s or -w */
+ cflag++;
+ snapshot = NULL;
+ break;
+
+ case 'f':
+ types = 1;
+ fmris = 1;
+ break;
+
+ case 'p':
+ add_prop(optarg);
+ break;
+
+ case 'q':
+ quiet = 1;
+ warn = quiet_warn;
+ die = quiet_die;
+ break;
+
+ case 's':
+ if (Cflag || cflag || wait)
+ usage(); /* Not with -C, -c or -w */
+ snapshot = optarg;
+ sflag++;
+ break;
+
+ case 't':
+ types = 1;
+ break;
+
+ case 'v':
+ verbose = 1;
+ break;
+
+ case 'w':
+ if (Cflag || cflag || sflag)
+ usage(); /* Not with -C, -c or -s */
+ wait = 1;
+ break;
+
+ case '?':
+ switch (optopt) {
+ case 'p':
+ usage();
+
+ default:
+ break;
+ }
+
+ /* FALLTHROUGH */
+
+ default:
+ usage();
+ }
+ }
+
+ if (optind == argc)
+ usage();
+
+ max_scf_name_length = scf_limit(SCF_LIMIT_MAX_NAME_LENGTH);
+ max_scf_value_length = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH);
+ max_scf_fmri_length = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH);
+ if (max_scf_name_length == -1 || max_scf_value_length == -1 ||
+ max_scf_fmri_length == -1)
+ scfdie();
+
+ hndl = scf_handle_create(SCF_VERSION);
+ if (hndl == NULL)
+ scfdie();
+ if (scf_handle_bind(hndl) == -1)
+ die(gettext("Could not connect to configuration repository: "
+ "%s.\n"), scf_strerror(scf_error()));
+
+ flags = SCF_WALK_PROPERTY | SCF_WALK_SERVICE | SCF_WALK_EXPLICIT;
+
+ if (wait) {
+ if (uu_list_numnodes(prop_list) > 1)
+ usage();
+
+ if (argc - optind > 1)
+ usage();
+
+ callback = do_wait;
+
+ } else {
+ callback = process_fmri;
+
+ flags |= SCF_WALK_MULTIPLE;
+ }
+
+ if ((err = scf_walk_fmri(hndl, argc - optind, argv + optind, flags,
+ callback, NULL, &return_code, warn)) != 0) {
+ warn(gettext("failed to iterate over instances: %s\n"),
+ scf_strerror(err));
+ return_code = UU_EXIT_FATAL;
+ }
+
+ scf_handle_destroy(hndl);
+
+ return (return_code);
+}
diff --git a/usr/src/cmd/svc/svcs/Makefile b/usr/src/cmd/svc/svcs/Makefile
new file mode 100644
index 0000000000..a59857ef69
--- /dev/null
+++ b/usr/src/cmd/svc/svcs/Makefile
@@ -0,0 +1,59 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+PROG = svcs
+OBJS = svcs.o explain.o
+SRCS = $(OBJS:%.o=%.c)
+POFILES = $(OBJS:.o=.po)
+
+include ../../Makefile.cmd
+include ../Makefile.ctf
+
+POFILE = $(PROG)_all.po
+LDLIBS += -lcontract -lscf -luutil -lumem
+
+lint := LINTFLAGS = -ux
+
+.KEEP_STATE:
+
+all: $(PROG)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS) $(CTFMERGE_HOOK)
+ $(POST_PROCESS)
+
+$(POFILE): $(POFILES)
+ cat $(POFILES) > $(POFILE)
+
+install: all $(ROOTPROG)
+
+clean:
+ $(RM) $(OBJS)
+
+lint: lint_SRCS
+
+include ../../Makefile.targ
diff --git a/usr/src/cmd/svc/svcs/explain.c b/usr/src/cmd/svc/svcs/explain.c
new file mode 100644
index 0000000000..c30840b438
--- /dev/null
+++ b/usr/src/cmd/svc/svcs/explain.c
@@ -0,0 +1,2122 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Service state explanation. For select services, display a description, the
+ * state, and possibly why the service is in that state, what's causing it to
+ * be in that state, and what other services it is keeping offline (impact).
+ *
+ * Explaining states other than offline is easy. For maintenance and
+ * degraded, we just use the auxiliary state. For offline, we must determine
+ * which dependencies are unsatisfied and recurse. If a causal service is not
+ * offline, then a svcptr to it is added to the offline service's causes list.
+ * If a causal service is offline, then we recurse to determine its causes and
+ * merge them into the causes list of the service in question (see
+ * add_causes()). Note that by adding a self-pointing svcptr to the causes
+ * lists of services which are not offline or are offline for unknown reasons,
+ * we can always merge the unsatisfied dependency's causes into the
+ * dependent's list.
+ *
+ * Computing an impact list is more involved because the dependencies in the
+ * repository are unidirectional; it requires determining the causes of all
+ * offline services. For each unsatisfied dependency of an offline service,
+ * a svcptr to the dependent is added to the dependency's impact_dependents
+ * list (see add_causes()). determine_impact() uses the lists to build an
+ * impact list. The direct dependency is used so that a path from the
+ * affected service to the causal service can be constructed (see
+ * print_dependency_reasons()).
+ *
+ * Because we always need at least impact counts, we always run
+ * determine_causes() on all services.
+ *
+ * If no arguments are given, we must select the services which are causing
+ * other services to be offline. We do so by adding services which are not
+ * running for any reason other than another service to the g_causes list in
+ * determine_causes().
+ *
+ * Since all services must be examined, and their states may be consulted
+ * a lot, it is important that we only read volatile data (like states) from
+ * the repository once. add_instance() reads data for an instance from the
+ * repository into an inst_t and puts it into the "services" cache, which is
+ * organized as a hash table of svc_t's, each of which has a list of inst_t's.
+ */
+
+#include "svcs.h"
+
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+
+
+#define DC_DISABLED "SMF-8000-05"
+#define DC_TEMPDISABLED "SMF-8000-1S"
+#define DC_RSTRINVALID "SMF-8000-2A"
+#define DC_RSTRABSENT "SMF-8000-3P"
+#define DC_UNINIT "SMF-8000-4D"
+#define DC_RSTRDEAD "SMF-8000-5H"
+#define DC_ADMINMAINT "SMF-8000-63"
+#define DC_REPTFAIL "SMF-8000-7Y"
+#define DC_METHFAIL "SMF-8000-8Q"
+#define DC_NONE "SMF-8000-9C"
+#define DC_UNKNOWN "SMF-8000-AR"
+#define DC_STARTING "SMF-8000-C4"
+#define DC_ADMINDEGR "SMF-8000-DX"
+#define DC_DEPABSENT "SMF-8000-E2"
+#define DC_DEPRUNNING "SMF-8000-FJ"
+#define DC_DEPOTHER "SMF-8000-GE"
+#define DC_DEPCYCLE "SMF-8000-HP"
+#define DC_INVALIDDEP "SMF-8000-JA"
+#define DC_STARTFAIL "SMF-8000-KS"
+#define DC_TOOQUICKLY "SMF-8000-L5"
+#define DC_INVALIDSTATE "SMF-8000-N3"
+#define DC_TRANSITION "SMF-8000-PH"
+
+#define DEFAULT_MAN_PATH "/usr/share/man"
+
+
+#define uu_list_append(lst, e) uu_list_insert_before(lst, NULL, e)
+
+#ifdef NDEBUG
+#define bad_error(func, err) abort()
+#else
+#define bad_error(func, err) \
+ (void) fprintf(stderr, "%s:%d: %s() failed with unknown error %d.\n", \
+ __FILE__, __LINE__, func, err); \
+ abort();
+#endif
+
+
+typedef struct {
+ const char *svcname;
+ const char *instname;
+
+ /* restarter pg properties */
+ char state[MAX_SCF_STATE_STRING_SZ];
+ char next_state[MAX_SCF_STATE_STRING_SZ];
+ struct timeval stime;
+ const char *aux_state;
+ int64_t start_method_waitstatus;
+
+ int enabled;
+ int temporary;
+ const char *restarter;
+ uu_list_t *dependencies; /* list of dependency_group's */
+
+ int active; /* In use? (cycle detection) */
+ int restarter_bad;
+ const char *summary;
+ uu_list_t *baddeps; /* list of dependency's */
+ uu_list_t *causes; /* list of svcptrs */
+ uu_list_t *impact_dependents; /* list of svcptrs */
+ uu_list_t *impact; /* list of svcptrs */
+
+ uu_list_node_t node;
+} inst_t;
+
+typedef struct service {
+ const char *svcname;
+ uu_list_t *instances;
+ struct service *next;
+} svc_t;
+
+struct svcptr {
+ inst_t *svcp;
+ inst_t *next_hop;
+ uu_list_node_t node;
+};
+
+struct dependency_group {
+ enum { DGG_REQALL, DGG_REQANY, DGG_OPTALL, DGG_EXCALL } grouping;
+ const char *type;
+ uu_list_t *entities; /* List of struct dependency's */
+ uu_list_node_t node;
+};
+
+struct dependency {
+ const char *fmri;
+ uu_list_node_t node;
+};
+
+/* Hash table of service names -> svc_t's */
+#define SVC_HASH_NBUCKETS 256
+#define SVC_HASH_MASK (SVC_HASH_NBUCKETS - 1)
+
+static svc_t **services;
+
+static uu_list_pool_t *insts, *svcptrs, *depgroups, *deps;
+static uu_list_t *g_causes; /* list of svcptrs */
+
+static scf_scope_t *g_local_scope;
+static scf_service_t *g_svc;
+static scf_instance_t *g_inst;
+static scf_snapshot_t *g_snap;
+static scf_propertygroup_t *g_pg;
+static scf_property_t *g_prop;
+static scf_value_t *g_val;
+static scf_iter_t *g_iter, *g_viter;
+static char *g_fmri, *g_value;
+static size_t g_fmri_sz, g_value_sz;
+static const char *g_msgbase = "http://sun.com/msg/";
+
+static char *emsg_nomem;
+static char *emsg_invalid_dep;
+
+extern scf_handle_t *h;
+
+/* ARGSUSED */
+static int
+svcptr_compare(struct svcptr *a, struct svcptr *b, void *data)
+{
+ return (b->svcp - a->svcp);
+}
+
+static uint32_t
+hash_name(const char *name)
+{
+ uint32_t h = 0, g;
+ const char *p;
+
+ for (p = name; *p != '\0'; ++p) {
+ h = (h << 4) + *p;
+ if ((g = (h & 0xf0000000)) != 0) {
+ h ^= (g >> 24);
+ h ^= g;
+ }
+ }
+
+ return (h);
+}
+
+
+static void
+x_init(void)
+{
+ emsg_nomem = gettext("Out of memory.\n");
+ emsg_invalid_dep =
+ gettext("svc:/%s:%s has invalid dependency \"%s\".\n");
+
+ services = calloc(SVC_HASH_NBUCKETS, sizeof (*services));
+ if (services == NULL)
+ uu_die(emsg_nomem);
+
+ insts = uu_list_pool_create("insts", sizeof (inst_t),
+ offsetof(inst_t, node), NULL, UU_LIST_POOL_DEBUG);
+ svcptrs = uu_list_pool_create("svcptrs", sizeof (struct svcptr),
+ offsetof(struct svcptr, node), (uu_compare_fn_t *)svcptr_compare,
+ UU_LIST_POOL_DEBUG);
+ depgroups = uu_list_pool_create("depgroups",
+ sizeof (struct dependency_group),
+ offsetof(struct dependency_group, node), NULL, UU_LIST_POOL_DEBUG);
+ deps = uu_list_pool_create("deps", sizeof (struct dependency),
+ offsetof(struct dependency, node), NULL, UU_LIST_POOL_DEBUG);
+ g_causes = uu_list_create(svcptrs, NULL, UU_LIST_DEBUG);
+ if (insts == NULL || svcptrs == NULL || depgroups == NULL ||
+ deps == NULL || g_causes == NULL)
+ uu_die(emsg_nomem);
+
+ if ((g_local_scope = scf_scope_create(h)) == NULL ||
+ (g_svc = scf_service_create(h)) == NULL ||
+ (g_inst = scf_instance_create(h)) == NULL ||
+ (g_snap = scf_snapshot_create(h)) == NULL ||
+ (g_pg = scf_pg_create(h)) == NULL ||
+ (g_prop = scf_property_create(h)) == NULL ||
+ (g_val = scf_value_create(h)) == NULL ||
+ (g_iter = scf_iter_create(h)) == NULL ||
+ (g_viter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ if (scf_handle_get_scope(h, SCF_SCOPE_LOCAL, g_local_scope) != 0)
+ scfdie();
+
+ g_fmri_sz = max_scf_fmri_length + 1;
+ g_fmri = safe_malloc(g_fmri_sz);
+
+ g_value_sz = max_scf_value_length + 1;
+ g_value = safe_malloc(g_value_sz);
+}
+
+/*
+ * Repository loading routines.
+ */
+
+/*
+ * Returns
+ * 0 - success
+ * ECANCELED - inst was deleted
+ * EINVAL - inst is invalid
+ */
+static int
+load_dependencies(inst_t *svcp, scf_instance_t *inst)
+{
+ scf_snapshot_t *snap;
+ struct dependency_group *dg;
+ struct dependency *d;
+ int r;
+
+ assert(svcp->dependencies == NULL);
+ svcp->dependencies = uu_list_create(depgroups, svcp, UU_LIST_DEBUG);
+ if (svcp->dependencies == NULL)
+ uu_die(emsg_nomem);
+
+ if (scf_instance_get_snapshot(inst, "running", g_snap) == 0) {
+ snap = g_snap;
+ } else {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ snap = NULL;
+ }
+
+ if (scf_iter_instance_pgs_typed_composed(g_iter, inst, snap,
+ SCF_GROUP_DEPENDENCY) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ return (ECANCELED);
+ }
+
+ for (;;) {
+ r = scf_iter_next_pg(g_iter, g_pg);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ return (ECANCELED);
+ }
+
+ dg = safe_malloc(sizeof (*dg));
+ (void) memset(dg, 0, sizeof (*dg));
+ dg->entities = uu_list_create(deps, dg, UU_LIST_DEBUG);
+ if (dg->entities == NULL)
+ uu_die(emsg_nomem);
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_GROUPING,
+ SCF_TYPE_ASTRING, g_value, g_value_sz, 0) != 0)
+ return (EINVAL);
+
+ if (strcmp(g_value, "require_all") == 0)
+ dg->grouping = DGG_REQALL;
+ else if (strcmp(g_value, "require_any") == 0)
+ dg->grouping = DGG_REQANY;
+ else if (strcmp(g_value, "optional_all") == 0)
+ dg->grouping = DGG_OPTALL;
+ else if (strcmp(g_value, "exclude_all") == 0)
+ dg->grouping = DGG_EXCALL;
+ else {
+ (void) fprintf(stderr, gettext("svc:/%s:%s has "
+ "dependency with unknown type \"%s\".\n"),
+ svcp->svcname, svcp->instname, g_value);
+ return (EINVAL);
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_TYPE, SCF_TYPE_ASTRING,
+ g_value, g_value_sz, 0) != 0)
+ return (EINVAL);
+ dg->type = safe_strdup(g_value);
+
+ if (scf_pg_get_property(g_pg, SCF_PROPERTY_ENTITIES, g_prop) !=
+ 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ (void) fprintf(stderr, gettext("svc:/%s:%s has "
+ "dependency without an entities "
+ "property.\n"), svcp->svcname,
+ svcp->instname);
+ return (EINVAL);
+
+ case SCF_ERROR_DELETED:
+ return (ECANCELED);
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_iter_property_values(g_viter, g_prop) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ return (ECANCELED);
+ }
+
+ for (;;) {
+ r = scf_iter_next_value(g_viter, g_val);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ return (ECANCELED);
+ }
+
+ d = safe_malloc(sizeof (*d));
+ d->fmri = safe_malloc(max_scf_fmri_length + 1);
+
+ if (scf_value_get_astring(g_val, (char *)d->fmri,
+ max_scf_fmri_length + 1) < 0)
+ scfdie();
+
+ uu_list_node_init(d, &d->node, deps);
+ (void) uu_list_append(dg->entities, d);
+ }
+
+ uu_list_node_init(dg, &dg->node, depgroups);
+ r = uu_list_append(svcp->dependencies, dg);
+ assert(r == 0);
+ }
+
+ return (0);
+}
+
+static void
+add_instance(const char *svcname, const char *instname, scf_instance_t *inst)
+{
+ inst_t *instp;
+ svc_t *svcp;
+ int have_enabled = 0;
+ int i;
+ uint32_t h;
+
+ h = hash_name(svcname) & SVC_HASH_MASK;
+ for (svcp = services[h]; svcp != NULL; svcp = svcp->next) {
+ if (strcmp(svcp->svcname, svcname) == 0)
+ break;
+ }
+
+ if (svcp == NULL) {
+ svcp = safe_malloc(sizeof (*svcp));
+ svcp->svcname = safe_strdup(svcname);
+ svcp->instances = uu_list_create(insts, svcp, UU_LIST_DEBUG);
+ if (svcp->instances == NULL)
+ uu_die(emsg_nomem);
+ svcp->next = services[h];
+ services[h] = svcp;
+ }
+
+ instp = safe_malloc(sizeof (*instp));
+ (void) memset(instp, 0, sizeof (*instp));
+ instp->svcname = svcp->svcname;
+ instp->instname = safe_strdup(instname);
+ instp->impact_dependents =
+ uu_list_create(svcptrs, instp, UU_LIST_DEBUG);
+ if (instp->impact_dependents == NULL)
+ uu_die(emsg_nomem);
+
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, g_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ return;
+
+ case SCF_ERROR_NOT_FOUND:
+ (void) fprintf(stderr, gettext("svc:/%s:%s has no "
+ "\"%s\" property group; ignoring.\n"),
+ instp->svcname, instp->instname, SCF_PG_RESTARTER);
+ return;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_STATE, SCF_TYPE_ASTRING,
+ (void *)instp->state, sizeof (instp->state), 0) != 0)
+ return;
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_NEXT_STATE, SCF_TYPE_ASTRING,
+ (void *)instp->next_state, sizeof (instp->next_state), 0) != 0)
+ return;
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_STATE_TIMESTAMP,
+ SCF_TYPE_TIME, &instp->stime, 0, 0) != 0)
+ return;
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_AUX_STATE, SCF_TYPE_ASTRING,
+ g_fmri, g_fmri_sz, 0) != 0)
+ return;
+ instp->aux_state = safe_strdup(g_fmri);
+
+ (void) pg_get_single_val(g_pg, SCF_PROPERTY_START_METHOD_WAITSTATUS,
+ SCF_TYPE_INTEGER, &instp->start_method_waitstatus, 0, 0);
+
+ if (scf_instance_get_pg(inst, SCF_PG_GENERAL_OVR, g_pg) == 0) {
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_ENABLED,
+ SCF_TYPE_BOOLEAN, &instp->enabled, 0, 0) == 0)
+ have_enabled = 1;
+ } else {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ return;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_instance_get_pg_composed(inst, NULL, SCF_PG_GENERAL, g_pg) !=
+ 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_DELETED:
+ case SCF_ERROR_NOT_FOUND:
+ return;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_ENABLED, SCF_TYPE_BOOLEAN,
+ &i, 0, 0) != 0)
+ return;
+ if (!have_enabled) {
+ instp->enabled = i;
+ instp->temporary = 0;
+ } else {
+ instp->temporary = (instp->enabled != i);
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_RESTARTER, SCF_TYPE_ASTRING,
+ g_fmri, g_fmri_sz, 0) == 0)
+ instp->restarter = safe_strdup(g_fmri);
+ else
+ instp->restarter = SCF_SERVICE_STARTD;
+
+ if (strcmp(instp->state, SCF_STATE_STRING_OFFLINE) == 0 &&
+ load_dependencies(instp, inst) != 0)
+ return;
+
+ uu_list_node_init(instp, &instp->node, insts);
+ i = uu_list_append(svcp->instances, instp);
+ assert(i == 0);
+}
+
+static void
+load_services(void)
+{
+ scf_iter_t *siter, *iiter;
+ int r;
+ char *svcname, *instname;
+
+ if ((siter = scf_iter_create(h)) == NULL ||
+ (iiter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ svcname = safe_malloc(max_scf_name_length + 1);
+ instname = safe_malloc(max_scf_name_length + 1);
+
+ if (scf_iter_scope_services(siter, g_local_scope) != 0)
+ scfdie();
+
+ for (;;) {
+ r = scf_iter_next_service(siter, g_svc);
+ if (r == 0)
+ break;
+ if (r != 1)
+ scfdie();
+
+ if (scf_service_get_name(g_svc, svcname,
+ max_scf_name_length + 1) < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ continue;
+ }
+
+ if (scf_iter_service_instances(iiter, g_svc) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ continue;
+ }
+
+ for (;;) {
+ r = scf_iter_next_instance(iiter, g_inst);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ break;
+ }
+
+ if (scf_instance_get_name(g_inst, instname,
+ max_scf_name_length + 1) < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+ continue;
+ }
+
+ add_instance(svcname, instname, g_inst);
+ }
+ }
+
+ free(svcname);
+ free(instname);
+ scf_iter_destroy(siter);
+ scf_iter_destroy(iiter);
+}
+
+/*
+ * Dependency analysis routines.
+ */
+
+static void
+add_svcptr(uu_list_t *lst, inst_t *svcp)
+{
+ struct svcptr *spp;
+ uu_list_index_t idx;
+ int r;
+
+ spp = safe_malloc(sizeof (*spp));
+ spp->svcp = svcp;
+ spp->next_hop = NULL;
+
+ if (uu_list_find(lst, spp, NULL, &idx) != NULL) {
+ free(spp);
+ return;
+ }
+
+ uu_list_node_init(spp, &spp->node, svcptrs);
+ r = uu_list_append(lst, spp);
+ assert(r == 0);
+}
+
+static int determine_causes(inst_t *, void *);
+
+/*
+ * Determine the causes of src and add them to the causes list of dst.
+ * Returns ELOOP if src is active, and 0 otherwise.
+ */
+static int
+add_causes(inst_t *dst, inst_t *src)
+{
+ struct svcptr *spp, *copy;
+ uu_list_index_t idx;
+
+ if (determine_causes(src, (void *)1) != UU_WALK_NEXT) {
+ /* Dependency cycle. */
+ (void) fprintf(stderr, " svc:/%s:%s\n", dst->svcname,
+ dst->instname);
+ return (ELOOP);
+ }
+
+ add_svcptr(src->impact_dependents, dst);
+
+ for (spp = uu_list_first(src->causes);
+ spp != NULL;
+ spp = uu_list_next(src->causes, spp)) {
+ if (uu_list_find(dst->causes, spp, NULL, &idx) != NULL)
+ continue;
+
+ copy = safe_malloc(sizeof (*copy));
+ copy->svcp = spp->svcp;
+ copy->next_hop = src;
+ uu_list_node_init(copy, &copy->node, svcptrs);
+ uu_list_insert(dst->causes, copy, idx);
+
+ add_svcptr(g_causes, spp->svcp);
+ }
+
+ return (0);
+}
+
+static int
+inst_running(inst_t *ip)
+{
+ return (strcmp(ip->state, SCF_STATE_STRING_ONLINE) == 0 ||
+ strcmp(ip->state, SCF_STATE_STRING_DEGRADED) == 0);
+}
+
+static int
+inst_running_or_maint(inst_t *ip)
+{
+ return (inst_running(ip) ||
+ strcmp(ip->state, SCF_STATE_STRING_MAINT) == 0);
+}
+
+static svc_t *
+get_svc(const char *sn)
+{
+ uint32_t h;
+ svc_t *svcp;
+
+ h = hash_name(sn) & SVC_HASH_MASK;
+
+ for (svcp = services[h]; svcp != NULL; svcp = svcp->next) {
+ if (strcmp(svcp->svcname, sn) == 0)
+ break;
+ }
+
+ return (svcp);
+}
+
+/* ARGSUSED */
+static inst_t *
+get_inst(svc_t *svcp, const char *in)
+{
+ inst_t *instp;
+
+ for (instp = uu_list_first(svcp->instances);
+ instp != NULL;
+ instp = uu_list_next(svcp->instances, instp)) {
+ if (strcmp(instp->instname, in) == 0)
+ return (instp);
+ }
+
+ return (NULL);
+}
+
+static int
+get_fmri(const char *fmri, svc_t **spp, inst_t **ipp)
+{
+ const char *sn, *in;
+ svc_t *sp;
+ inst_t *ip;
+
+ if (strlcpy(g_fmri, fmri, g_fmri_sz) >= g_fmri_sz)
+ return (EINVAL);
+
+ if (scf_parse_svc_fmri(g_fmri, NULL, &sn, &in, NULL, NULL) != 0)
+ return (EINVAL);
+
+ if (sn == NULL)
+ return (EINVAL);
+
+ sp = get_svc(sn);
+ if (sp == NULL)
+ return (ENOENT);
+
+ if (in != NULL) {
+ ip = get_inst(sp, in);
+ if (ip == NULL)
+ return (ENOENT);
+ }
+
+ if (spp != NULL)
+ *spp = sp;
+ if (ipp != NULL)
+ *ipp = ((in == NULL) ? NULL : ip);
+
+ return (0);
+}
+
+static int
+process_reqall(inst_t *svcp, struct dependency_group *dg)
+{
+ uu_list_walk_t *walk;
+ struct dependency *d;
+ int r, svcrunning;
+ svc_t *sp;
+ inst_t *ip;
+
+ walk = uu_list_walk_start(dg->entities, UU_WALK_ROBUST);
+ if (walk == NULL)
+ uu_die(emsg_nomem);
+
+ while ((d = uu_list_walk_next(walk)) != NULL) {
+ r = get_fmri(d->fmri, &sp, &ip);
+ switch (r) {
+ case EINVAL:
+ /* LINTED */
+ (void) fprintf(stderr, emsg_invalid_dep, svcp->svcname,
+ svcp->instname, d->fmri);
+ continue;
+
+ case ENOENT:
+ uu_list_remove(dg->entities, d);
+ r = uu_list_append(svcp->baddeps, d);
+ assert(r == 0);
+ continue;
+
+ case 0:
+ break;
+
+ default:
+ bad_error("get_fmri", r);
+ }
+
+ if (ip != NULL) {
+ if (inst_running(ip))
+ continue;
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ return (r);
+ }
+ continue;
+ }
+
+ svcrunning = 0;
+
+ for (ip = uu_list_first(sp->instances);
+ ip != NULL;
+ ip = uu_list_next(sp->instances, ip)) {
+ if (inst_running(ip))
+ svcrunning = 1;
+ }
+
+ if (!svcrunning) {
+ for (ip = uu_list_first(sp->instances);
+ ip != NULL;
+ ip = uu_list_next(sp->instances, ip)) {
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ uu_list_walk_end(walk);
+ return (r);
+ }
+ }
+ }
+ }
+
+ uu_list_walk_end(walk);
+ return (0);
+}
+
+static int
+process_reqany(inst_t *svcp, struct dependency_group *dg)
+{
+ svc_t *sp;
+ inst_t *ip;
+ struct dependency *d;
+ int r;
+ uu_list_walk_t *walk;
+
+ for (d = uu_list_first(dg->entities);
+ d != NULL;
+ d = uu_list_next(dg->entities, d)) {
+ r = get_fmri(d->fmri, &sp, &ip);
+ switch (r) {
+ case 0:
+ break;
+
+ case EINVAL:
+ /* LINTED */
+ (void) fprintf(stderr, emsg_invalid_dep, svcp->svcname,
+ svcp->instname, d->fmri);
+ continue;
+
+ case ENOENT:
+ continue;
+
+ default:
+ bad_error("eval_svc_dep", r);
+ }
+
+ if (ip != NULL) {
+ if (inst_running(ip))
+ return (0);
+ continue;
+ }
+
+ for (ip = uu_list_first(sp->instances);
+ ip != NULL;
+ ip = uu_list_next(sp->instances, ip)) {
+ if (inst_running(ip))
+ return (0);
+ }
+ }
+
+ /*
+ * The dependency group is not satisfied. Add all unsatisfied members
+ * to the cause list.
+ */
+
+ walk = uu_list_walk_start(dg->entities, UU_WALK_ROBUST);
+ if (walk == NULL)
+ uu_die(emsg_nomem);
+
+ while ((d = uu_list_walk_next(walk)) != NULL) {
+ r = get_fmri(d->fmri, &sp, &ip);
+ switch (r) {
+ case 0:
+ break;
+
+ case ENOENT:
+ uu_list_remove(dg->entities, d);
+ r = uu_list_append(svcp->baddeps, d);
+ assert(r == 0);
+ continue;
+
+ case EINVAL:
+ /* Should have caught above. */
+ default:
+ bad_error("eval_svc_dep", r);
+ }
+
+ if (ip != NULL) {
+ if (inst_running(ip))
+ continue;
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ return (r);
+ }
+ continue;
+ }
+
+ for (ip = uu_list_first(sp->instances);
+ ip != NULL;
+ ip = uu_list_next(sp->instances, ip)) {
+ if (inst_running(ip))
+ continue;
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ return (r);
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+process_optall(inst_t *svcp, struct dependency_group *dg)
+{
+ uu_list_walk_t *walk;
+ struct dependency *d;
+ int r;
+ inst_t *ip;
+ svc_t *sp;
+
+ walk = uu_list_walk_start(dg->entities, UU_WALK_ROBUST);
+ if (walk == NULL)
+ uu_die(emsg_nomem);
+
+ while ((d = uu_list_walk_next(walk)) != NULL) {
+ r = get_fmri(d->fmri, &sp, &ip);
+
+ switch (r) {
+ case 0:
+ break;
+
+ case EINVAL:
+ /* LINTED */
+ (void) fprintf(stderr, emsg_invalid_dep, svcp->svcname,
+ svcp->instname, d->fmri);
+ continue;
+
+ case ENOENT:
+ continue;
+
+ default:
+ bad_error("get_fmri", r);
+ }
+
+ if (ip != NULL) {
+ if (ip->enabled && !inst_running_or_maint(ip)) {
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ uu_list_walk_end(walk);
+ return (r);
+ }
+ }
+ continue;
+ }
+
+ for (ip = uu_list_first(sp->instances);
+ ip != NULL;
+ ip = uu_list_next(sp->instances, ip)) {
+ if (ip->enabled && !inst_running_or_maint(ip)) {
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ uu_list_walk_end(walk);
+ return (r);
+ }
+ }
+ }
+ }
+
+ uu_list_walk_end(walk);
+ return (0);
+}
+
+static int
+process_excall(inst_t *svcp, struct dependency_group *dg)
+{
+ struct dependency *d;
+ int r;
+ svc_t *sp;
+ inst_t *ip;
+
+ for (d = uu_list_first(dg->entities);
+ d != NULL;
+ d = uu_list_next(dg->entities, d)) {
+ r = get_fmri(d->fmri, &sp, &ip);
+
+ switch (r) {
+ case 0:
+ break;
+
+ case EINVAL:
+ /* LINTED */
+ (void) fprintf(stderr, emsg_invalid_dep, svcp->svcname,
+ svcp->instname, d->fmri);
+ continue;
+
+ case ENOENT:
+ continue;
+
+ default:
+ bad_error("eval_svc_dep", r);
+ }
+
+ if (ip != NULL) {
+ if (inst_running(ip)) {
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ return (r);
+ }
+ }
+ continue;
+ }
+
+ for (ip = uu_list_first(sp->instances);
+ ip != NULL;
+ ip = uu_list_next(sp->instances, ip)) {
+ if (inst_running(ip)) {
+ r = add_causes(svcp, ip);
+ if (r != 0) {
+ assert(r == ELOOP);
+ return (r);
+ }
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+process_svc_dg(inst_t *svcp, struct dependency_group *dg)
+{
+ switch (dg->grouping) {
+ case DGG_REQALL:
+ return (process_reqall(svcp, dg));
+
+ case DGG_REQANY:
+ return (process_reqany(svcp, dg));
+
+ case DGG_OPTALL:
+ return (process_optall(svcp, dg));
+
+ case DGG_EXCALL:
+ return (process_excall(svcp, dg));
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr,
+ "%s:%d: Unknown dependency grouping %d.\n", __FILE__,
+ __LINE__, dg->grouping);
+#endif
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Returns
+ * EINVAL - fmri is not a valid FMRI
+ * 0 - the file indicated by fmri is missing
+ * 1 - the file indicated by fmri is present
+ */
+static int
+eval_file_dep(const char *fmri)
+{
+ const char *path;
+ struct stat st;
+
+ if (strncmp(fmri, "file:", sizeof ("file:") - 1) != 0)
+ return (EINVAL);
+
+ path = fmri + (sizeof ("file:") - 1);
+
+ if (path[0] != '/')
+ return (EINVAL);
+
+ if (path[1] == '/') {
+ path += 2;
+ if (strncmp(path, "localhost/", sizeof ("localhost/") - 1) == 0)
+ path += sizeof ("localhost") - 1;
+ else if (path[0] != '/')
+ return (EINVAL);
+ }
+
+ return (stat(path, &st) == 0 ? 1 : 0);
+}
+
+static void
+process_file_dg(inst_t *svcp, struct dependency_group *dg)
+{
+ uu_list_walk_t *walk;
+ struct dependency *d, **deps;
+ int r, i = 0, any_satisfied = 0;
+
+ if (dg->grouping == DGG_REQANY) {
+ deps = calloc(uu_list_numnodes(dg->entities), sizeof (*deps));
+ if (deps == NULL)
+ uu_die(emsg_nomem);
+ }
+
+ walk = uu_list_walk_start(dg->entities, UU_WALK_ROBUST);
+ if (walk == NULL)
+ uu_die(emsg_nomem);
+
+ while ((d = uu_list_walk_next(walk)) != NULL) {
+ r = eval_file_dep(d->fmri);
+ if (r == EINVAL) {
+ /* LINTED */
+ (void) fprintf(stderr, emsg_invalid_dep, svcp->svcname,
+ svcp->instname, d->fmri);
+ continue;
+ }
+
+ assert(r == 0 || r == 1);
+
+ switch (dg->grouping) {
+ case DGG_REQALL:
+ case DGG_OPTALL:
+ if (r == 0) {
+ uu_list_remove(dg->entities, d);
+ r = uu_list_append(svcp->baddeps, d);
+ assert(r == 0);
+ }
+ break;
+
+ case DGG_REQANY:
+ if (r == 1)
+ any_satisfied = 1;
+ else
+ deps[i++] = d;
+ break;
+
+ case DGG_EXCALL:
+ if (r == 1) {
+ uu_list_remove(dg->entities, d);
+ r = uu_list_append(svcp->baddeps, d);
+ assert(r == 0);
+ }
+ break;
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: Unknown grouping %d.\n",
+ __FILE__, __LINE__, dg->grouping);
+#endif
+ abort();
+ }
+ }
+
+ uu_list_walk_end(walk);
+
+ if (dg->grouping != DGG_REQANY)
+ return;
+
+ if (!any_satisfied) {
+ for (; i >= 0; --i) {
+ uu_list_remove(dg->entities, deps[i]);
+ r = uu_list_append(svcp->baddeps, deps[i]);
+ assert(r == 0);
+ }
+ }
+
+ free(deps);
+}
+
+/*
+ * Populate the causes list of svcp. This function should not return with
+ * causes empty.
+ */
+static int
+determine_causes(inst_t *svcp, void *canfailp)
+{
+ struct dependency_group *dg;
+ int r;
+
+ if (svcp->active) {
+ (void) fprintf(stderr, gettext("Dependency cycle detected:\n"
+ " svc:/%s:%s\n"), svcp->svcname, svcp->instname);
+ return ((int)canfailp != 0 ? UU_WALK_ERROR : UU_WALK_NEXT);
+ }
+
+ if (svcp->causes != NULL)
+ return (UU_WALK_NEXT);
+
+ svcp->causes = uu_list_create(svcptrs, svcp, UU_LIST_DEBUG);
+ svcp->baddeps = uu_list_create(deps, svcp, UU_LIST_DEBUG);
+ if (svcp->causes == NULL || svcp->baddeps == NULL)
+ uu_die(emsg_nomem);
+
+ if (inst_running(svcp) ||
+ strcmp(svcp->state, SCF_STATE_STRING_UNINIT) == 0) {
+ /*
+ * If we're running, add a self-pointer in case we're
+ * excluding another service.
+ */
+ add_svcptr(svcp->causes, svcp);
+ return (UU_WALK_NEXT);
+ }
+
+ if (strcmp(svcp->state, SCF_STATE_STRING_MAINT) == 0) {
+ add_svcptr(svcp->causes, svcp);
+ add_svcptr(g_causes, svcp);
+ return (UU_WALK_NEXT);
+ }
+
+ if (strcmp(svcp->state, SCF_STATE_STRING_DISABLED) == 0) {
+ add_svcptr(svcp->causes, svcp);
+ if (svcp->enabled)
+ add_svcptr(g_causes, svcp);
+
+ return (UU_WALK_NEXT);
+ }
+
+ if (strcmp(svcp->state, SCF_STATE_STRING_OFFLINE) != 0) {
+ (void) fprintf(stderr,
+ gettext("svc:/%s:%s has invalid state \"%s\".\n"),
+ svcp->svcname, svcp->instname, svcp->state);
+ add_svcptr(svcp->causes, svcp);
+ add_svcptr(g_causes, svcp);
+ return (UU_WALK_NEXT);
+ }
+
+ if (strcmp(svcp->next_state, SCF_STATE_STRING_NONE) != 0) {
+ add_svcptr(svcp->causes, svcp);
+ add_svcptr(g_causes, svcp);
+ return (UU_WALK_NEXT);
+ }
+
+ svcp->active = 1;
+
+ /*
+ * Dependency analysis can add elements to our baddeps list (absent
+ * dependency, unsatisfied file dependency), or to our cause list
+ * (unsatisfied dependency).
+ */
+ for (dg = uu_list_first(svcp->dependencies);
+ dg != NULL;
+ dg = uu_list_next(svcp->dependencies, dg)) {
+ if (strcmp(dg->type, "path") == 0) {
+ process_file_dg(svcp, dg);
+ } else if (strcmp(dg->type, "service") == 0) {
+ int r;
+
+ r = process_svc_dg(svcp, dg);
+ if (r != 0) {
+ assert(r == ELOOP);
+ svcp->active = 0;
+ return ((int)canfailp != 0 ?
+ UU_WALK_ERROR : UU_WALK_NEXT);
+ }
+ } else {
+ (void) fprintf(stderr, gettext("svc:/%s:%s has "
+ "dependency group with invalid type \"%s\".\n"),
+ svcp->svcname, svcp->instname, dg->type);
+ }
+ }
+
+ if (uu_list_numnodes(svcp->causes) == 0) {
+ if (uu_list_numnodes(svcp->baddeps) > 0) {
+ add_svcptr(g_causes, svcp);
+ add_svcptr(svcp->causes, svcp);
+ } else {
+ inst_t *restarter;
+
+ r = get_fmri(svcp->restarter, NULL, &restarter);
+ if (r == 0 && !inst_running(restarter)) {
+ r = add_causes(svcp, restarter);
+ if (r != 0) {
+ assert(r == ELOOP);
+ svcp->active = 0;
+ return ((int)canfailp != 0 ?
+ UU_WALK_ERROR : UU_WALK_NEXT);
+ }
+ } else {
+ svcp->restarter_bad = r;
+ add_svcptr(svcp->causes, svcp);
+ add_svcptr(g_causes, svcp);
+ }
+ }
+ }
+
+ assert(uu_list_numnodes(svcp->causes) > 0);
+
+ svcp->active = 0;
+ return (UU_WALK_NEXT);
+}
+
+static void
+determine_all_causes(void)
+{
+ svc_t *svcp;
+ int i;
+
+ for (i = 0; i < SVC_HASH_NBUCKETS; ++i) {
+ for (svcp = services[i]; svcp != NULL; svcp = svcp->next)
+ (void) uu_list_walk(svcp->instances,
+ (uu_walk_fn_t *)determine_causes, 0, 0);
+ }
+}
+
+/*
+ * Returns
+ * 0 - success
+ * ELOOP - dependency cycle detected
+ */
+static int
+determine_impact(inst_t *ip)
+{
+ struct svcptr *idsp, *spp, *copy;
+ uu_list_index_t idx;
+
+ if (ip->active) {
+ (void) fprintf(stderr, gettext("Dependency cycle detected:\n"
+ " svc:/%s:%s\n"), ip->svcname, ip->instname);
+ return (ELOOP);
+ }
+
+ if (ip->impact != NULL)
+ return (0);
+
+ ip->impact = uu_list_create(svcptrs, ip, UU_LIST_DEBUG);
+ if (ip->impact == NULL)
+ uu_die(emsg_nomem);
+ ip->active = 1;
+
+ for (idsp = uu_list_first(ip->impact_dependents);
+ idsp != NULL;
+ idsp = uu_list_next(ip->impact_dependents, idsp)) {
+ if (determine_impact(idsp->svcp) != 0) {
+ (void) fprintf(stderr, " svc:/%s:%s\n",
+ ip->svcname, ip->instname);
+ return (ELOOP);
+ }
+
+ add_svcptr(ip->impact, idsp->svcp);
+
+ for (spp = uu_list_first(idsp->svcp->impact);
+ spp != NULL;
+ spp = uu_list_next(idsp->svcp->impact, spp)) {
+ if (uu_list_find(ip->impact, spp, NULL, &idx) != NULL)
+ continue;
+
+ copy = safe_malloc(sizeof (*copy));
+ copy->svcp = spp->svcp;
+ copy->next_hop = NULL;
+ uu_list_node_init(copy, &copy->node, svcptrs);
+ uu_list_insert(ip->impact, copy, idx);
+ }
+ }
+
+ ip->active = 0;
+ return (0);
+}
+
+/*
+ * Printing routines.
+ */
+
+static void
+check_msgbase(void)
+{
+ if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, g_inst,
+ NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ return;
+ }
+
+ if (scf_instance_get_pg_composed(g_inst, NULL, "msg", g_pg) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_DELETED:
+ return;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_pg_get_property(g_pg, "base", g_prop) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_DELETED:
+ return;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_property_get_value(g_prop, g_val) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ g_msgbase = NULL;
+ return;
+
+ case SCF_ERROR_DELETED:
+ return;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_value_get_astring(g_val, g_value, g_value_sz) < 0) {
+ if (scf_error() != SCF_ERROR_TYPE_MISMATCH)
+ scfdie();
+ return;
+ }
+
+ g_msgbase = safe_strdup(g_value);
+}
+
+static void
+determine_summary(inst_t *ip)
+{
+ if (ip->summary != NULL)
+ return;
+
+ if (inst_running(ip)) {
+ ip->summary = gettext("is running.");
+ return;
+ }
+
+ if (strcmp(ip->state, SCF_STATE_STRING_UNINIT) == 0) {
+ ip->summary = gettext("is uninitialized.");
+ } else if (strcmp(ip->state, SCF_STATE_STRING_DISABLED) == 0) {
+ if (!ip->temporary)
+ ip->summary = gettext("is disabled.");
+ else
+ ip->summary = gettext("is temporarily disabled.");
+ } else if (strcmp(ip->state, SCF_STATE_STRING_OFFLINE) == 0) {
+ if (uu_list_numnodes(ip->baddeps) != 0)
+ ip->summary = gettext("has missing dependencies.");
+ else if (strcmp(ip->next_state, SCF_STATE_STRING_ONLINE) == 0)
+ ip->summary = gettext("is starting.");
+ else
+ ip->summary = gettext("is offline.");
+ } else if (strcmp(ip->state, SCF_STATE_STRING_MAINT) == 0) {
+ if (strcmp(ip->aux_state, "administrative_request") == 0) {
+ ip->summary = gettext("was taken down for maintenace "
+ "by an administrator.");
+ } else if (strcmp(ip->aux_state, "dependency_cycle") == 0) {
+ ip->summary = gettext("completed a dependency cycle.");
+ } else if (strcmp(ip->aux_state, "fault_threshold_reached") ==
+ 0) {
+ ip->summary = gettext("is not running because "
+ "a method failed repeatedly.");
+ } else if (strcmp(ip->aux_state, "invalid_dependency") == 0) {
+ ip->summary = gettext("has an invalid dependency.");
+ } else if (strcmp(ip->aux_state, "invalid_restarter") == 0) {
+ ip->summary = gettext("has an invalid restarter.");
+ } else if (strcmp(ip->aux_state, "method_failed") == 0) {
+ ip->summary = gettext("is not running because "
+ "a method failed.");
+ } else if (strcmp(ip->aux_state, "none") == 0) {
+ ip->summary =
+ gettext("is not running for an unknown reason.");
+ } else if (strcmp(ip->aux_state, "restarting_too_quickly") ==
+ 0) {
+ ip->summary = gettext("was restarting too quickly.");
+ } else {
+ ip->summary = gettext("requires maintenance.");
+ }
+ } else {
+ ip->summary = gettext("is in an invalid state.");
+ }
+}
+
+static void
+print_method_failure(const inst_t *ip, const char **dcp)
+{
+ char buf[50];
+ int stat = ip->start_method_waitstatus;
+
+ if (stat != 0) {
+ if (WIFEXITED(stat)) {
+ if (WEXITSTATUS(stat) == SMF_EXIT_ERR_CONFIG) {
+ (void) strlcpy(buf, gettext(
+ "exited with $SMF_EXIT_ERR_CONFIG"),
+ sizeof (buf));
+ } else if (WEXITSTATUS(stat) == SMF_EXIT_ERR_FATAL) {
+ (void) strlcpy(buf, gettext(
+ "exited with $SMF_EXIT_ERR_FATAL"),
+ sizeof (buf));
+ } else {
+ (void) snprintf(buf, sizeof (buf),
+ gettext("exited with status %d"),
+ WEXITSTATUS(stat));
+ }
+ } else if (WIFSIGNALED(stat)) {
+ if (WCOREDUMP(stat)) {
+ if (strsignal(WTERMSIG(stat)) != NULL)
+ (void) snprintf(buf, sizeof (buf),
+ gettext("dumped core on %s (%d)"),
+ strsignal(WTERMSIG(stat)),
+ WTERMSIG(stat));
+ else
+ (void) snprintf(buf, sizeof (buf),
+ gettext("dumped core signal %d"),
+ WTERMSIG(stat));
+ } else {
+ if (strsignal(WTERMSIG(stat)) != NULL) {
+ (void) snprintf(buf, sizeof (buf),
+ gettext("died on %s (%d)"),
+ strsignal(WTERMSIG(stat)),
+ WTERMSIG(stat));
+ } else {
+ (void) snprintf(buf, sizeof (buf),
+ gettext("died on signal %d"),
+ WTERMSIG(stat));
+ }
+ }
+ } else {
+ goto fail;
+ }
+
+ if (strcmp(ip->aux_state, "fault_threshold_reached") != 0)
+ (void) printf(gettext("Reason: Start method %s.\n"),
+ buf);
+ else
+ (void) printf(gettext("Reason: "
+ "Start method failed repeatedly, last %s.\n"), buf);
+ *dcp = DC_STARTFAIL;
+ } else {
+fail:
+ if (strcmp(ip->aux_state, "fault_threshold_reached") == 0)
+ (void) puts(gettext(
+ "Reason: Method failed repeatedly."));
+ else
+ (void) puts(gettext("Reason: Method failed."));
+ *dcp = DC_METHFAIL;
+ }
+}
+
+static void
+print_dependency_reasons(const inst_t *svcp, int verbose)
+{
+ struct dependency *d;
+ struct svcptr *spp;
+ const char *dc;
+
+ /*
+ * If we couldn't determine why the service is offline, then baddeps
+ * will be empty and causes will have a pointer to self.
+ */
+ if (uu_list_numnodes(svcp->baddeps) == 0 &&
+ uu_list_numnodes(svcp->causes) == 1) {
+ spp = uu_list_first(svcp->causes);
+ if (spp->svcp == svcp) {
+ switch (svcp->restarter_bad) {
+ case 0:
+ (void) puts(gettext("Reason: Unknown."));
+ dc = DC_UNKNOWN;
+ break;
+
+ case EINVAL:
+ (void) printf(gettext("Reason: "
+ "Restarter \"%s\" is invalid.\n"),
+ svcp->restarter);
+ dc = DC_RSTRINVALID;
+ break;
+
+ case ENOENT:
+ (void) printf(gettext("Reason: "
+ "Restarter \"%s\" does not exist.\n"),
+ svcp->restarter);
+ dc = DC_RSTRABSENT;
+ break;
+
+ default:
+#ifndef NDEBUG
+ (void) fprintf(stderr, "%s:%d: Bad "
+ "restarter_bad value %d. Aborting.\n",
+ __FILE__, __LINE__, svcp->restarter_bad);
+#endif
+ abort();
+ }
+
+ if (g_msgbase)
+ (void) printf(gettext(" See: %s%s\n"),
+ g_msgbase, dc);
+ return;
+ }
+ }
+
+ for (d = uu_list_first(svcp->baddeps);
+ d != NULL;
+ d = uu_list_next(svcp->baddeps, d)) {
+ (void) printf(gettext("Reason: Dependency %s is absent.\n"),
+ d->fmri);
+ if (g_msgbase)
+ (void) printf(gettext(" See: %s%s\n"), g_msgbase,
+ DC_DEPABSENT);
+ }
+
+ for (spp = uu_list_first(svcp->causes);
+ spp != NULL && spp->svcp != svcp;
+ spp = uu_list_next(svcp->causes, spp)) {
+ determine_summary(spp->svcp);
+
+ if (inst_running(spp->svcp)) {
+ (void) printf(gettext("Reason: "
+ "Service svc:/%s:%s is running.\n"),
+ spp->svcp->svcname, spp->svcp->instname);
+ dc = DC_DEPRUNNING;
+ } else {
+ if (snprintf(NULL, 0,
+ gettext("Reason: Service svc:/%s:%s %s"),
+ spp->svcp->svcname, spp->svcp->instname,
+ spp->svcp->summary) <= 80) {
+ (void) printf(gettext(
+ "Reason: Service svc:/%s:%s %s\n"),
+ spp->svcp->svcname, spp->svcp->instname,
+ spp->svcp->summary);
+ } else {
+ (void) printf(gettext(
+ "Reason: Service svc:/%s:%s\n"
+ " %s\n"), spp->svcp->svcname,
+ spp->svcp->instname, spp->svcp->summary);
+ }
+
+ dc = DC_DEPOTHER;
+ }
+
+ if (g_msgbase != NULL)
+ (void) printf(gettext(" See: %s%s\n"), g_msgbase, dc);
+
+ if (verbose) {
+ inst_t *pp;
+ int indent;
+
+ (void) printf(gettext(" Path: svc:/%s:%s\n"),
+ svcp->svcname, svcp->instname);
+
+ indent = 1;
+ for (pp = spp->next_hop; ; ) {
+ struct svcptr *tmp;
+
+ (void) printf(gettext("%6s %*ssvc:/%s:%s\n"),
+ "", indent++ * 2, "", pp->svcname,
+ pp->instname);
+
+ if (pp == spp->svcp)
+ break;
+
+ /* set pp to next_hop of cause with same svcp */
+ tmp = uu_list_find(pp->causes, spp, NULL, NULL);
+ pp = tmp->next_hop;
+ }
+ }
+ }
+}
+
+static void
+print_reasons(const inst_t *svcp, int verbose)
+{
+ int r;
+ const char *dc = NULL;
+
+ if (strcmp(svcp->state, SCF_STATE_STRING_ONLINE) == 0)
+ return;
+
+ if (strcmp(svcp->state, SCF_STATE_STRING_UNINIT) == 0) {
+ inst_t *rsp;
+
+ r = get_fmri(svcp->restarter, NULL, &rsp);
+ switch (r) {
+ case 0:
+ if (rsp != NULL)
+ break;
+ /* FALLTHROUGH */
+
+ case EINVAL:
+ (void) printf(gettext("Reason: "
+ "Restarter \"%s\" is invalid.\n"), svcp->restarter);
+ dc = DC_RSTRINVALID;
+ goto diagcode;
+
+ case ENOENT:
+ (void) printf(gettext("Reason: "
+ "Restarter \"%s\" does not exist.\n"),
+ svcp->restarter);
+ dc = DC_RSTRABSENT;
+ goto diagcode;
+
+ default:
+ bad_error("get_fmri", r);
+ }
+
+ if (inst_running(rsp)) {
+ (void) printf(gettext("Reason: Restarter %s "
+ "has not initialized service state.\n"),
+ svcp->restarter);
+ dc = DC_UNINIT;
+ } else {
+ (void) printf(gettext(
+ "Reason: Restarter %s is not running.\n"),
+ svcp->restarter);
+ dc = DC_RSTRDEAD;
+ }
+
+ } else if (strcmp(svcp->state, SCF_STATE_STRING_DISABLED) == 0) {
+ if (!svcp->temporary) {
+ (void) puts(gettext(
+ "Reason: Disabled by an administrator."));
+ dc = DC_DISABLED;
+ } else {
+ (void) puts(gettext("Reason: "
+ "Temporarily disabled by an administrator."));
+ dc = DC_TEMPDISABLED;
+ }
+
+ } else if (strcmp(svcp->state, SCF_STATE_STRING_MAINT) == 0) {
+ if (strcmp(svcp->aux_state, "administrative_request") == 0) {
+ (void) puts(gettext("Reason: "
+ "Maintenance requested by an administrator."));
+ dc = DC_ADMINMAINT;
+ } else if (strcmp(svcp->aux_state, "dependency_cycle") == 0) {
+ (void) puts(gettext(
+ "Reason: Completes a dependency cycle."));
+ dc = DC_DEPCYCLE;
+ } else if (strcmp(svcp->aux_state, "fault_threshold_reached") ==
+ 0) {
+ print_method_failure(svcp, &dc);
+ } else if (strcmp(svcp->aux_state, "invalid_dependency") == 0) {
+ (void) puts(gettext("Reason: Has invalid dependency."));
+ dc = DC_INVALIDDEP;
+ } else if (strcmp(svcp->aux_state, "invalid_restarter") == 0) {
+ (void) printf(gettext("Reason: Restarter \"%s\" is "
+ "invalid.\n"), svcp->restarter);
+ dc = DC_RSTRINVALID;
+ } else if (strcmp(svcp->aux_state, "method_failed") == 0) {
+ print_method_failure(svcp, &dc);
+ } else if (strcmp(svcp->aux_state, "restarting_too_quickly") ==
+ 0) {
+ (void) puts(gettext("Reason: Restarting too quickly."));
+ dc = DC_TOOQUICKLY;
+ } else if (strcmp(svcp->aux_state, "none") == 0) {
+ (void) printf(gettext(
+ "Reason: Restarter %s gave no explanation.\n"),
+ svcp->restarter);
+ dc = DC_NONE;
+ } else {
+ (void) puts(gettext("Reason: Unknown."));
+ dc = DC_UNKNOWN;
+ }
+
+ } else if (strcmp(svcp->state, SCF_STATE_STRING_OFFLINE) == 0) {
+ if (strcmp(svcp->next_state, SCF_STATE_STRING_ONLINE) == 0) {
+ (void) puts(gettext(
+ "Reason: Start method is running."));
+ dc = DC_STARTING;
+ } else if (strcmp(svcp->next_state, SCF_STATE_STRING_NONE) ==
+ 0) {
+ print_dependency_reasons(svcp, verbose);
+ /* Function prints diagcodes. */
+ return;
+ } else {
+ (void) printf(gettext(
+ "Reason: Transitioning to state %s.\n"),
+ svcp->next_state);
+ dc = DC_TRANSITION;
+ }
+
+ } else if (strcmp(svcp->state, SCF_STATE_STRING_DEGRADED) == 0) {
+ (void) puts(gettext("Reason: Degraded by an administrator."));
+ dc = DC_ADMINDEGR;
+
+ } else {
+ (void) printf(gettext("Reason: Not in valid state (%s).\n"),
+ svcp->state);
+ dc = DC_INVALIDSTATE;
+ }
+
+diagcode:
+ if (g_msgbase != NULL)
+ (void) printf(gettext(" See: %s%s\n"), g_msgbase, dc);
+}
+
+static void
+print_manpage(int verbose)
+{
+ static char *title = NULL;
+ static char *section = NULL;
+
+ if (title == NULL) {
+ title = safe_malloc(g_value_sz);
+ section = safe_malloc(g_value_sz);
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_TM_TITLE, SCF_TYPE_ASTRING,
+ (void *)title, g_value_sz, 0) != 0)
+ return;
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_TM_SECTION,
+ SCF_TYPE_ASTRING, (void *)section, g_value_sz, 0) != 0)
+ return;
+
+ if (!verbose) {
+ (void) printf(gettext(" See: %s(%s)\n"), title, section);
+ return;
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_TM_MANPATH, SCF_TYPE_ASTRING,
+ (void *)g_value, g_value_sz, 0) != 0)
+ return;
+
+ if (strcmp(g_value, ":default") == 0) {
+ assert(sizeof (DEFAULT_MAN_PATH) < g_value_sz);
+ (void) strcpy(g_value, DEFAULT_MAN_PATH);
+ }
+
+ (void) printf(gettext(" See: man -M %s -s %s %s\n"), g_value,
+ section, title);
+}
+
+static void
+print_doclink()
+{
+ static char *uri = NULL;
+
+ if (uri == NULL) {
+ uri = safe_malloc(g_value_sz);
+ }
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_TM_URI, SCF_TYPE_ASTRING,
+ (void *)uri, g_value_sz, 0) != 0)
+ return;
+
+ (void) printf(gettext(" See: %s\n"), uri);
+}
+
+
+/*
+ * Returns
+ * 0 - success
+ * 1 - inst was deleted
+ */
+static int
+print_docs(scf_instance_t *inst, int verbose)
+{
+ scf_snapshot_t *snap;
+ int r;
+
+ if (scf_instance_get_snapshot(inst, "running", g_snap) != 0) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ break;
+
+ case SCF_ERROR_DELETED:
+ return (1);
+
+ default:
+ scfdie();
+ }
+
+ snap = NULL;
+ } else {
+ snap = g_snap;
+ }
+
+ if (scf_iter_instance_pgs_typed_composed(g_iter, inst, snap,
+ SCF_GROUP_TEMPLATE) != 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ return (1);
+ }
+
+ for (;;) {
+ r = scf_iter_next_pg(g_iter, g_pg);
+ if (r == 0)
+ break;
+ if (r != 1) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ return (1);
+ }
+
+ if (scf_pg_get_name(g_pg, g_fmri, g_fmri_sz) < 0) {
+ if (scf_error() != SCF_ERROR_DELETED)
+ scfdie();
+
+ continue;
+ }
+
+ if (strncmp(g_fmri, SCF_PG_TM_MAN_PREFIX,
+ strlen(SCF_PG_TM_MAN_PREFIX)) == 0) {
+ print_manpage(verbose);
+ continue;
+ }
+
+ if (strncmp(g_fmri, SCF_PG_TM_DOC_PREFIX,
+ strlen(SCF_PG_TM_DOC_PREFIX)) == 0) {
+ print_doclink();
+ continue;
+ }
+ }
+ return (0);
+}
+
+static void
+print_logs(scf_instance_t *inst)
+{
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, g_pg) != 0)
+ return;
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_ALT_LOGFILE,
+ SCF_TYPE_ASTRING, (void *)g_value, g_value_sz, 0) == 0)
+ (void) printf(gettext(" See: %s\n"), g_value);
+
+ if (pg_get_single_val(g_pg, SCF_PROPERTY_LOGFILE,
+ SCF_TYPE_ASTRING, (void *)g_value, g_value_sz, 0) == 0)
+ (void) printf(gettext(" See: %s\n"), g_value);
+}
+
+static int first = 1;
+
+/*
+ * Explain why the given service is in the state it's in.
+ */
+static void
+print_service(inst_t *svcp, int verbose)
+{
+ struct svcptr *spp;
+ time_t stime;
+ char *timebuf;
+ size_t tbsz;
+ struct tm *tmp;
+ int deleted = 0;
+
+ if (first)
+ first = 0;
+ else
+ (void) putchar('\n');
+
+ (void) printf(gettext("svc:/%s:%s"), svcp->svcname, svcp->instname);
+
+ if (scf_scope_get_service(g_local_scope, svcp->svcname, g_svc) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ deleted = 1;
+ } else if (scf_service_get_instance(g_svc, svcp->instname, g_inst) !=
+ 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ deleted = 1;
+ }
+
+ if (!deleted) {
+ if (inst_get_single_val(g_inst, SCF_PG_TM_COMMON_NAME, locale,
+ SCF_TYPE_USTRING, g_value, g_value_sz, 0, 0, 1) == 0)
+ /* EMPTY */;
+ else if (inst_get_single_val(g_inst, SCF_PG_TM_COMMON_NAME, "C",
+ SCF_TYPE_USTRING, g_value, g_value_sz, 0, 0, 1) != 0)
+ (void) strcpy(g_value, "?");
+
+ (void) printf(gettext(" (%s)\n"), g_value);
+ } else {
+ (void) putchar('\n');
+ }
+
+ stime = svcp->stime.tv_sec;
+ tmp = localtime(&stime);
+
+ for (tbsz = 50; ; tbsz *= 2) {
+ timebuf = safe_malloc(tbsz);
+ if (strftime(timebuf, tbsz, NULL, tmp) != 0)
+ break;
+ free(timebuf);
+ }
+
+ (void) printf(gettext(" State: %s since %s\n"), svcp->state, timebuf);
+
+ free(timebuf);
+
+ /* Reasons */
+ print_reasons(svcp, verbose);
+
+ if (!deleted)
+ deleted = print_docs(g_inst, verbose);
+ if (!deleted)
+ print_logs(g_inst);
+
+ (void) determine_impact(svcp);
+
+ switch (uu_list_numnodes(svcp->impact)) {
+ case 0:
+ if (inst_running(svcp))
+ (void) puts(gettext("Impact: None."));
+ else
+ (void) puts(gettext(
+ "Impact: This service is not running."));
+ break;
+
+ case 1:
+ if (!verbose)
+ (void) puts(gettext("Impact: 1 dependent service "
+ "is not running. (Use -v for list.)"));
+ else
+ (void) puts(gettext(
+ "Impact: 1 dependent service is not running:"));
+ break;
+
+ default:
+ if (!verbose)
+ (void) printf(gettext("Impact: %d dependent services "
+ "are not running. (Use -v for list.)\n"),
+ uu_list_numnodes(svcp->impact));
+ else
+ (void) printf(gettext(
+ "Impact: %d dependent services are not running:\n"),
+ uu_list_numnodes(svcp->impact));
+ }
+
+ if (verbose) {
+ for (spp = uu_list_first(svcp->impact);
+ spp != NULL;
+ spp = uu_list_next(svcp->impact, spp))
+ (void) printf(gettext(" svc:/%s:%s\n"),
+ spp->svcp->svcname, spp->svcp->instname);
+ }
+}
+
+/*
+ * Top level routine.
+ */
+
+static int
+impact_compar(const void *a, const void *b)
+{
+ int n, m;
+
+ n = uu_list_numnodes((*(inst_t **)a)->impact);
+ m = uu_list_numnodes((*(inst_t **)b)->impact);
+
+ return (m - n);
+}
+
+static int
+print_service_cb(void *verbose, scf_walkinfo_t *wip)
+{
+ int r;
+ inst_t *ip;
+
+ assert(wip->pg == NULL);
+
+ r = get_fmri(wip->fmri, NULL, &ip);
+ assert(r != EINVAL);
+ if (r == ENOENT)
+ return (0);
+
+ assert(r == 0);
+ assert(ip != NULL);
+
+ print_service(ip, (int)verbose);
+
+ return (0);
+}
+
+void
+explain(int verbose, int argc, char **argv)
+{
+ /* Initialize globals. */
+ x_init();
+
+ /* Walk the graph and populate services with inst_t's */
+ load_services();
+
+ /* Populate causes for services. */
+ determine_all_causes();
+
+ if (argc > 0) {
+ scf_error_t err;
+
+ check_msgbase();
+
+ /* Call print_service() for each operand. */
+
+ err = scf_walk_fmri(h, argc, argv, SCF_WALK_MULTIPLE,
+ print_service_cb, (void *)verbose, &exit_status, uu_warn);
+ if (err != 0) {
+ uu_warn(gettext(
+ "failed to iterate over instances: %s\n"),
+ scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+ } else {
+ struct svcptr *spp;
+ int n, i;
+ inst_t **ary;
+
+ /* Sort g_causes. */
+
+ n = uu_list_numnodes(g_causes);
+ if (n == 0)
+ return;
+
+ check_msgbase();
+
+ ary = calloc(n, sizeof (*ary));
+ if (ary == NULL)
+ uu_die(emsg_nomem);
+
+ i = 0;
+ for (spp = uu_list_first(g_causes);
+ spp != NULL;
+ spp = uu_list_next(g_causes, spp)) {
+ (void) determine_impact(spp->svcp);
+ ary[i++] = spp->svcp;
+ }
+
+ qsort(ary, n, sizeof (*ary), impact_compar);
+
+ /* Call print_service() for each service. */
+
+ for (i = 0; i < n; ++i)
+ print_service(ary[i], verbose);
+ }
+}
diff --git a/usr/src/cmd/svc/svcs/svcs.c b/usr/src/cmd/svc/svcs/svcs.c
new file mode 100644
index 0000000000..27c013f7ab
--- /dev/null
+++ b/usr/src/cmd/svc/svcs/svcs.c
@@ -0,0 +1,2919 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * svcs - display attributes of service instances
+ *
+ * We have two output formats and six instance selection mechanisms. The
+ * primary output format is a line of attributes (selected by -o), possibly
+ * followed by process description lines (if -p is specified), for each
+ * instance selected. The columns available to display are described by the
+ * struct column columns array. The columns to actually display are kept in
+ * the opt_columns array as indicies into the columns array. The selection
+ * mechanisms available for this format are service FMRIs (selects all child
+ * instances), instance FMRIs, instance FMRI glob patterns, instances with
+ * a certain restarter (-R), dependencies of instances (-d), and dependents of
+ * instances (-D). Since the lines must be sorted (per -sS), we'll just stick
+ * each into a data structure and print them in order when we're done. To
+ * avoid listing the same instance twice (when -d and -D aren't given), we'll
+ * use a hash table of FMRIs to record that we've listed (added to the tree)
+ * an instance.
+ *
+ * The secondary output format (-l "long") is a paragraph of text for the
+ * services or instances selected. Not needing to be sorted, it's implemented
+ * by just calling print_detailed() for each FMRI given.
+ */
+
+#include "svcs.h"
+
+/* Get the byteorder macros to ease sorting. */
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <inttypes.h>
+
+#include <sys/contract.h>
+#include <sys/ctfs.h>
+#include <sys/stat.h>
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fnmatch.h>
+#include <libcontract.h>
+#include <libcontract_priv.h>
+#include <libintl.h>
+#include <libscf.h>
+#include <libscf_priv.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <procfs.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <time.h>
+
+
+#ifndef TEXT_DOMAIN
+#define TEXT_DOMAIN "SUNW_OST_OSCMD"
+#endif /* TEXT_DOMAIN */
+
+#define LEGACY_SCHEME "lrc:"
+#define LEGACY_UNKNOWN "unknown"
+
+/* Flags for pg_get_single_val() */
+#define EMPTY_OK 0x01
+#define MULTI_OK 0x02
+
+
+/*
+ * An AVL-storable node for output lines and the keys to sort them by.
+ */
+struct avl_string {
+ uu_avl_node_t node;
+ char *key;
+ char *str;
+};
+
+/*
+ * For lists of parsed restarter FMRIs.
+ */
+struct pfmri_list {
+ const char *scope;
+ const char *service;
+ const char *instance;
+ struct pfmri_list *next;
+};
+
+
+/*
+ * Globals
+ */
+scf_handle_t *h;
+static scf_propertygroup_t *g_pg;
+static scf_property_t *g_prop;
+static scf_value_t *g_val;
+
+static size_t line_sz; /* Bytes in the header line. */
+static size_t sortkey_sz; /* Bytes in sort keys. */
+static uu_avl_pool_t *lines_pool;
+static uu_avl_t *lines; /* Output lines. */
+int exit_status;
+ssize_t max_scf_name_length;
+ssize_t max_scf_value_length;
+ssize_t max_scf_fmri_length;
+static time_t now;
+static struct pfmri_list *restarters = NULL;
+static int first_paragraph = 1; /* For -l mode. */
+static char *common_name_buf; /* Sized for maximal length value. */
+char *locale; /* Current locale. */
+
+/* Options */
+static int *opt_columns = NULL; /* Indices into columns to display. */
+static int opt_cnum = 0;
+static int opt_processes = 0; /* Print processes? */
+static int *opt_sort = NULL; /* Indices into columns to sort. */
+static int opt_snum = 0;
+static int opt_nstate_shown = 0; /* Will nstate be shown? */
+static int opt_verbose = 0;
+
+/* Minimize string constants. */
+static const char * const scf_property_state = SCF_PROPERTY_STATE;
+static const char * const scf_property_next_state = SCF_PROPERTY_NEXT_STATE;
+static const char * const scf_property_contract = SCF_PROPERTY_CONTRACT;
+
+
+/*
+ * Utility functions
+ */
+
+/*
+ * For unexpected libscf errors. The ending newline is necessary to keep
+ * uu_die() from appending the errno error.
+ */
+#ifndef NDEBUG
+void
+do_scfdie(const char *file, int line)
+{
+ uu_die(gettext("%s:%d: Unexpected libscf error: %s. Exiting.\n"),
+ file, line, scf_strerror(scf_error()));
+}
+#else
+void
+scfdie(void)
+{
+ uu_die(gettext("Unexpected libscf error: %s. Exiting.\n"),
+ scf_strerror(scf_error()));
+}
+#endif
+
+void *
+safe_malloc(size_t sz)
+{
+ void *ptr;
+
+ ptr = malloc(sz);
+ if (ptr == NULL)
+ uu_die(gettext("Out of memory"));
+
+ return (ptr);
+}
+
+char *
+safe_strdup(const char *str)
+{
+ char *cp;
+
+ cp = strdup(str);
+ if (cp == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ return (cp);
+}
+
+static void
+sanitize_locale(char *locale)
+{
+ for (; *locale != '\0'; locale++)
+ if (!isalnum(*locale))
+ *locale = '_';
+}
+
+/*
+ * FMRI hashtable. For uniquifing listings.
+ */
+
+struct ht_elem {
+ const char *fmri;
+ struct ht_elem *next;
+};
+
+static struct ht_elem **ht_buckets;
+static uint_t ht_buckets_num;
+static uint_t ht_num;
+
+static void
+ht_init()
+{
+ ht_buckets_num = 8;
+ ht_buckets = safe_malloc(sizeof (*ht_buckets) * ht_buckets_num);
+ bzero(ht_buckets, sizeof (*ht_buckets) * ht_buckets_num);
+ ht_num = 0;
+}
+
+static uint_t
+ht_hash_fmri(const char *fmri)
+{
+ uint_t h = 0, g;
+ const char *p, *k;
+
+ /* All FMRIs begin with svc:/, so skip that part. */
+ assert(strncmp(fmri, "svc:/", sizeof ("svc:/") - 1) == 0);
+ k = fmri + sizeof ("svc:/") - 1;
+
+ /*
+ * Generic hash function from uts/common/os/modhash.c.
+ */
+ for (p = k; *p != '\0'; ++p) {
+ h = (h << 4) + *p;
+ if ((g = (h & 0xf0000000)) != 0) {
+ h ^= (g >> 24);
+ h ^= g;
+ }
+ }
+
+ return (h);
+}
+
+static void
+ht_grow()
+{
+ uint_t new_ht_buckets_num;
+ struct ht_elem **new_ht_buckets;
+ int i;
+
+ new_ht_buckets_num = ht_buckets_num * 2;
+ assert(new_ht_buckets_num > ht_buckets_num);
+ new_ht_buckets =
+ safe_malloc(sizeof (*new_ht_buckets) * new_ht_buckets_num);
+ bzero(new_ht_buckets, sizeof (*new_ht_buckets) * new_ht_buckets_num);
+
+ for (i = 0; i < ht_buckets_num; ++i) {
+ struct ht_elem *elem, *next;
+
+ for (elem = ht_buckets[i]; elem != NULL; elem = next) {
+ uint_t h;
+
+ next = elem->next;
+
+ h = ht_hash_fmri(elem->fmri);
+
+ elem->next =
+ new_ht_buckets[h & (new_ht_buckets_num - 1)];
+ new_ht_buckets[h & (new_ht_buckets_num - 1)] = elem;
+ }
+ }
+
+ free(ht_buckets);
+
+ ht_buckets = new_ht_buckets;
+ ht_buckets_num = new_ht_buckets_num;
+}
+
+/*
+ * Add an FMRI to the hash table. Returns 1 if it was already there,
+ * 0 otherwise.
+ */
+static int
+ht_add(const char *fmri)
+{
+ uint_t h;
+ struct ht_elem *elem;
+
+ h = ht_hash_fmri(fmri);
+
+ elem = ht_buckets[h & (ht_buckets_num - 1)];
+
+ for (; elem != NULL; elem = elem->next) {
+ if (strcmp(elem->fmri, fmri) == 0)
+ return (1);
+ }
+
+ /* Grow when average chain length is over 3. */
+ if (ht_num > 3 * ht_buckets_num)
+ ht_grow();
+
+ ++ht_num;
+
+ elem = safe_malloc(sizeof (*elem));
+ elem->fmri = strdup(fmri);
+ elem->next = ht_buckets[h & (ht_buckets_num - 1)];
+ ht_buckets[h & (ht_buckets_num - 1)] = elem;
+
+ return (0);
+}
+
+
+
+/*
+ * Convenience libscf wrapper functions.
+ */
+
+/*
+ * Get the single value of the named property in the given property group,
+ * which must have type ty, and put it in *vp. If ty is SCF_TYPE_ASTRING, vp
+ * is taken to be a char **, and sz is the size of the buffer. sz is unused
+ * otherwise. Return 0 on success, -1 if the property doesn't exist, has the
+ * wrong type, or doesn't have a single value. If flags has EMPTY_OK, don't
+ * complain if the property has no values (but return nonzero). If flags has
+ * MULTI_OK and the property has multiple values, succeed with E2BIG.
+ */
+int
+pg_get_single_val(scf_propertygroup_t *pg, const char *propname, scf_type_t ty,
+ void *vp, size_t sz, uint_t flags)
+{
+ char *buf;
+ size_t buf_sz;
+ int ret = -1, r;
+ boolean_t multi = B_FALSE;
+
+ assert((flags & ~(EMPTY_OK | MULTI_OK)) == 0);
+
+ if (scf_pg_get_property(pg, propname, g_prop) == -1) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ goto out;
+ }
+
+ if (scf_property_is_type(g_prop, ty) != SCF_SUCCESS) {
+ if (scf_error() == SCF_ERROR_TYPE_MISMATCH)
+ goto misconfigured;
+ scfdie();
+ }
+
+ if (scf_property_get_value(g_prop, g_val) != SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_NOT_FOUND:
+ if (flags & EMPTY_OK)
+ goto out;
+ goto misconfigured;
+
+ case SCF_ERROR_CONSTRAINT_VIOLATED:
+ if (flags & MULTI_OK) {
+ multi = B_TRUE;
+ break;
+ }
+ goto misconfigured;
+
+ default:
+ scfdie();
+ }
+ }
+
+ switch (ty) {
+ case SCF_TYPE_ASTRING:
+ r = scf_value_get_astring(g_val, vp, sz) > 0 ? SCF_SUCCESS : -1;
+ break;
+
+ case SCF_TYPE_BOOLEAN:
+ r = scf_value_get_boolean(g_val, (uint8_t *)vp);
+ break;
+
+ case SCF_TYPE_COUNT:
+ r = scf_value_get_count(g_val, (uint64_t *)vp);
+ break;
+
+ case SCF_TYPE_INTEGER:
+ r = scf_value_get_integer(g_val, (int64_t *)vp);
+ break;
+
+ case SCF_TYPE_TIME: {
+ int64_t sec;
+ int32_t ns;
+ r = scf_value_get_time(g_val, &sec, &ns);
+ ((struct timeval *)vp)->tv_sec = sec;
+ ((struct timeval *)vp)->tv_usec = ns / 1000;
+ break;
+ }
+
+ case SCF_TYPE_USTRING:
+ r = scf_value_get_ustring(g_val, vp, sz) > 0 ? SCF_SUCCESS : -1;
+ break;
+
+ default:
+#ifndef NDEBUG
+ uu_warn("%s:%d: Unknown type %d.\n", __FILE__, __LINE__, ty);
+#endif
+ abort();
+ }
+ if (r != SCF_SUCCESS)
+ scfdie();
+
+ ret = multi ? E2BIG : 0;
+ goto out;
+
+misconfigured:
+ buf_sz = max_scf_fmri_length + 1;
+ buf = safe_malloc(buf_sz);
+ if (scf_property_to_fmri(g_prop, buf, buf_sz) == -1)
+ scfdie();
+
+ uu_warn(gettext("Property \"%s\" is misconfigured.\n"), buf);
+
+ free(buf);
+
+out:
+ return (ret);
+}
+
+static scf_snapshot_t *
+get_running_snapshot(scf_instance_t *inst)
+{
+ scf_snapshot_t *snap;
+
+ snap = scf_snapshot_create(h);
+ if (snap == NULL)
+ scfdie();
+
+ if (scf_instance_get_snapshot(inst, "running", snap) == 0)
+ return (snap);
+
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_snapshot_destroy(snap);
+ return (NULL);
+}
+
+/*
+ * As pg_get_single_val(), except look the property group up in an
+ * instance. If "use_running" is set, and the running snapshot exists,
+ * do a composed lookup there. Otherwise, do an (optionally composed)
+ * lookup on the current values. Note that lookups using snapshots are
+ * always composed.
+ */
+int
+inst_get_single_val(scf_instance_t *inst, const char *pgname,
+ const char *propname, scf_type_t ty, void *vp, size_t sz, uint_t flags,
+ int use_running, int composed)
+{
+ scf_snapshot_t *snap = NULL;
+ int r;
+
+ if (use_running)
+ snap = get_running_snapshot(inst);
+ if (composed || use_running)
+ r = scf_instance_get_pg_composed(inst, snap, pgname, g_pg);
+ else
+ r = scf_instance_get_pg(inst, pgname, g_pg);
+ if (snap)
+ scf_snapshot_destroy(snap);
+ if (r == -1)
+ return (-1);
+
+ r = pg_get_single_val(g_pg, propname, ty, vp, sz, flags);
+
+ return (r);
+}
+
+static int
+instance_enabled(scf_instance_t *inst, boolean_t temp)
+{
+ uint8_t b;
+
+ if (inst_get_single_val(inst,
+ temp ? SCF_PG_GENERAL_OVR : SCF_PG_GENERAL, SCF_PROPERTY_ENABLED,
+ SCF_TYPE_BOOLEAN, &b, 0, 0, 0, 0) != 0)
+ return (-1);
+
+ return (b ? 1 : 0);
+}
+
+/*
+ * Get a string property from the restarter property group of the given
+ * instance. Return an empty string on normal problems.
+ */
+static void
+get_restarter_string_prop(scf_instance_t *inst, const char *pname,
+ char *buf, size_t buf_sz)
+{
+ if (inst_get_single_val(inst, SCF_PG_RESTARTER, pname,
+ SCF_TYPE_ASTRING, buf, buf_sz, 0, 0, 1) != 0)
+ *buf = '\0';
+}
+
+static int
+get_restarter_time_prop(scf_instance_t *inst, const char *pname,
+ struct timeval *tvp, int ok_if_empty)
+{
+ int r;
+
+ r = inst_get_single_val(inst, SCF_PG_RESTARTER, pname, SCF_TYPE_TIME,
+ tvp, NULL, ok_if_empty ? EMPTY_OK : 0, 0, 1);
+
+ return (r == 0 ? 0 : -1);
+}
+
+static int
+get_restarter_count_prop(scf_instance_t *inst, const char *pname, uint64_t *cp,
+ uint_t flags)
+{
+ return (inst_get_single_val(inst, SCF_PG_RESTARTER, pname,
+ SCF_TYPE_COUNT, cp, 0, flags, 0, 1));
+}
+
+
+/*
+ * Generic functions
+ */
+
+static int
+propvals_to_pids(scf_propertygroup_t *pg, const char *pname, pid_t **pidsp,
+ uint_t *np, scf_property_t *prop, scf_value_t *val, scf_iter_t *iter)
+{
+ scf_type_t ty;
+ int r, fd, err;
+ uint64_t c;
+ ct_stathdl_t ctst;
+ pid_t *pids;
+ uint_t m;
+
+ if (scf_pg_get_property(pg, pname, prop) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ return (ENOENT);
+ }
+
+ if (scf_property_type(prop, &ty) != 0)
+ scfdie();
+
+ if (ty != SCF_TYPE_COUNT)
+ return (EINVAL);
+
+ if (scf_iter_property_values(iter, prop) != 0)
+ scfdie();
+
+ for (;;) {
+ r = scf_iter_next_value(iter, val);
+ if (r == -1)
+ scfdie();
+ if (r == 0)
+ break;
+
+ if (scf_value_get_count(val, &c) != 0)
+ scfdie();
+
+ fd = contract_open(c, NULL, "status", O_RDONLY);
+ if (fd < 0)
+ continue;
+
+ err = ct_status_read(fd, CTD_ALL, &ctst);
+ if (err != 0) {
+ uu_warn(gettext("Could not read status of contract "
+ "%ld: %s.\n"), c, strerror(err));
+ (void) close(fd);
+ continue;
+ }
+
+ (void) close(fd);
+
+ r = ct_pr_status_get_members(ctst, &pids, &m);
+ assert(r == 0);
+
+ if (m == 0) {
+ ct_status_free(ctst);
+ continue;
+ }
+
+ *pidsp = realloc(*pidsp, (*np + m) * sizeof (*pidsp));
+ if (*pidsp == NULL)
+ uu_die(gettext("Out of memory"));
+
+ bcopy(pids, *pidsp + *np, m * sizeof (*pids));
+ *np += m;
+
+ ct_status_free(ctst);
+ }
+
+ return (0);
+}
+
+static int
+instance_processes(scf_instance_t *inst, pid_t **pids, uint_t *np)
+{
+ scf_iter_t *iter;
+ int ret;
+
+ if ((iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, g_pg) == 0) {
+ *pids = NULL;
+ *np = 0;
+
+ (void) propvals_to_pids(g_pg, scf_property_contract, pids, np,
+ g_prop, g_val, iter);
+
+ (void) propvals_to_pids(g_pg, SCF_PROPERTY_TRANSIENT_CONTRACT,
+ pids, np, g_prop, g_val, iter);
+
+ ret = 0;
+ } else {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ ret = -1;
+ }
+
+ scf_iter_destroy(iter);
+
+ return (ret);
+}
+
+static int
+get_psinfo(pid_t pid, psinfo_t *psip)
+{
+ char path[100];
+ int fd;
+
+ (void) snprintf(path, sizeof (path), "/proc/%lu/psinfo", pid);
+
+ fd = open64(path, O_RDONLY);
+ if (fd < 0)
+ return (-1);
+
+ if (read(fd, psip, sizeof (*psip)) < 0)
+ uu_die(gettext("Could not read info for process %lu"), pid);
+
+ (void) close(fd);
+
+ return (0);
+}
+
+
+
+/*
+ * Column sprint and sortkey functions
+ */
+
+struct column {
+ const char *name;
+ int width;
+
+ /*
+ * This function should write the value for the column into buf, and
+ * grow or allocate buf accordingly. It should always write at least
+ * width bytes, blanking unused bytes with spaces. If the field is
+ * greater than the column width we allow it to overlap other columns.
+ * In particular, it shouldn't write any null bytes. (Though an extra
+ * null byte past the end is currently tolerated.) If the property
+ * group is non-NULL, then we are dealing with a legacy service.
+ */
+ void (*sprint)(char **, scf_walkinfo_t *);
+
+ int sortkey_width;
+
+ /*
+ * This function should write sortkey_width bytes into buf which will
+ * cause memcmp() to sort it properly. (Unlike sprint() above,
+ * however, an extra null byte may overrun the buffer.) The second
+ * argument controls whether the results are sorted in forward or
+ * reverse order.
+ */
+ void (*get_sortkey)(char *, int, scf_walkinfo_t *);
+};
+
+static void
+reverse_bytes(char *buf, size_t len)
+{
+ int i;
+
+ for (i = 0; i < len; ++i)
+ buf[i] = ~buf[i];
+}
+
+/* CTID */
+#define CTID_COLUMN_WIDTH 6
+
+static void
+sprint_ctid(char **buf, scf_walkinfo_t *wip)
+{
+ int r;
+ uint64_t c;
+ size_t newsize = (*buf ? strlen(*buf) : 0) + CTID_COLUMN_WIDTH + 2;
+ char *newbuf = safe_malloc(newsize);
+
+ if (wip->pg != NULL)
+ r = pg_get_single_val(wip->pg, scf_property_contract,
+ SCF_TYPE_COUNT, &c, 0, EMPTY_OK | MULTI_OK);
+ else
+ r = get_restarter_count_prop(wip->inst, scf_property_contract,
+ &c, EMPTY_OK | MULTI_OK);
+
+ if (r == 0)
+ (void) snprintf(newbuf, newsize, "%s%*lu ",
+ *buf ? *buf : "", CTID_COLUMN_WIDTH, (ctid_t)c);
+ else if (r == E2BIG)
+ (void) snprintf(newbuf, newsize, "%s%*lu* ",
+ *buf ? *buf : "", CTID_COLUMN_WIDTH - 1, (ctid_t)c);
+ else
+ (void) snprintf(newbuf, newsize, "%s%*s ",
+ *buf ? *buf : "", CTID_COLUMN_WIDTH, "-");
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+#define CTID_SORTKEY_WIDTH (sizeof (uint64_t))
+
+static void
+sortkey_ctid(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ int r;
+ uint64_t c;
+
+ if (wip->pg != NULL)
+ r = pg_get_single_val(wip->pg, scf_property_contract,
+ SCF_TYPE_COUNT, &c, 0, EMPTY_OK);
+ else
+ r = get_restarter_count_prop(wip->inst, scf_property_contract,
+ &c, EMPTY_OK);
+
+ if (r == 0) {
+ /*
+ * Use the id itself, but it must be big-endian for this to
+ * work.
+ */
+ c = BE_64(c);
+
+ bcopy(&c, buf, CTID_SORTKEY_WIDTH);
+ } else {
+ bzero(buf, CTID_SORTKEY_WIDTH);
+ }
+
+ if (reverse)
+ reverse_bytes(buf, CTID_SORTKEY_WIDTH);
+}
+
+/* DESC */
+#define DESC_COLUMN_WIDTH 100
+
+static void
+sprint_desc(char **buf, scf_walkinfo_t *wip)
+{
+ char *x;
+ size_t newsize;
+ char *newbuf;
+
+ if (common_name_buf == NULL)
+ common_name_buf = safe_malloc(max_scf_value_length + 1);
+
+ bzero(common_name_buf, max_scf_value_length + 1);
+
+ if (wip->pg != NULL) {
+ common_name_buf[0] = '-';
+ } else if (inst_get_single_val(wip->inst, SCF_PG_TM_COMMON_NAME, locale,
+ SCF_TYPE_USTRING, common_name_buf, max_scf_value_length, 0,
+ 1, 1) == -1 &&
+ inst_get_single_val(wip->inst, SCF_PG_TM_COMMON_NAME, "C",
+ SCF_TYPE_USTRING, common_name_buf, max_scf_value_length, 0,
+ 1, 1) == -1) {
+ common_name_buf[0] = '-';
+ }
+
+ /*
+ * Collapse multi-line tm_common_name values into a single line.
+ */
+ for (x = common_name_buf; *x != '\0'; x++)
+ if (*x == '\n')
+ *x = ' ';
+
+ if (strlen(common_name_buf) > DESC_COLUMN_WIDTH)
+ newsize = (*buf ? strlen(*buf) : 0) +
+ strlen(common_name_buf) + 1;
+ else
+ newsize = (*buf ? strlen(*buf) : 0) + DESC_COLUMN_WIDTH + 1;
+ newbuf = safe_malloc(newsize);
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ DESC_COLUMN_WIDTH, common_name_buf);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+/* ARGSUSED */
+static void
+sortkey_desc(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ bzero(buf, DESC_COLUMN_WIDTH);
+}
+
+/* State columns (STATE, NSTATE, S, N, SN, STA, NSTA) */
+
+static char
+state_to_char(const char *state)
+{
+ if (strcmp(state, SCF_STATE_STRING_UNINIT) == 0)
+ return ('u');
+
+ if (strcmp(state, SCF_STATE_STRING_OFFLINE) == 0)
+ return ('0');
+
+ if (strcmp(state, SCF_STATE_STRING_ONLINE) == 0)
+ return ('1');
+
+ if (strcmp(state, SCF_STATE_STRING_MAINT) == 0)
+ return ('m');
+
+ if (strcmp(state, SCF_STATE_STRING_DISABLED) == 0)
+ return ('d');
+
+ if (strcmp(state, SCF_STATE_STRING_DEGRADED) == 0)
+ return ('D');
+
+ if (strcmp(state, SCF_STATE_STRING_LEGACY) == 0)
+ return ('L');
+
+ return ('?');
+}
+
+/* Return true if inst is transitioning. */
+static int
+transitioning(scf_instance_t *inst)
+{
+ char nstate_name[MAX_SCF_STATE_STRING_SZ];
+
+ get_restarter_string_prop(inst, scf_property_next_state, nstate_name,
+ sizeof (nstate_name));
+
+ return (state_to_char(nstate_name) != '?');
+}
+
+/* ARGSUSED */
+static void
+sortkey_states(const char *pname, char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ char state_name[MAX_SCF_STATE_STRING_SZ];
+
+ /*
+ * Lower numbers are printed first, so these are arranged from least
+ * interesting ("legacy run") to most interesting (unknown).
+ */
+ if (wip->pg == NULL) {
+ get_restarter_string_prop(wip->inst, pname, state_name,
+ sizeof (state_name));
+
+ if (strcmp(state_name, SCF_STATE_STRING_ONLINE) == 0)
+ *buf = 2;
+ else if (strcmp(state_name, SCF_STATE_STRING_DEGRADED) == 0)
+ *buf = 3;
+ else if (strcmp(state_name, SCF_STATE_STRING_OFFLINE) == 0)
+ *buf = 4;
+ else if (strcmp(state_name, SCF_STATE_STRING_MAINT) == 0)
+ *buf = 5;
+ else if (strcmp(state_name, SCF_STATE_STRING_DISABLED) == 0)
+ *buf = 1;
+ else if (strcmp(state_name, SCF_STATE_STRING_UNINIT) == 0)
+ *buf = 6;
+ else
+ *buf = 7;
+ } else
+ *buf = 0;
+
+ if (reverse)
+ *buf = 255 - *buf;
+}
+
+static void
+sprint_state(char **buf, scf_walkinfo_t *wip)
+{
+ char state_name[MAX_SCF_STATE_STRING_SZ + 1];
+ size_t newsize;
+ char *newbuf;
+
+ if (wip->pg == NULL) {
+ get_restarter_string_prop(wip->inst, scf_property_state,
+ state_name, sizeof (state_name));
+
+ /* Don't print blank fields, to ease parsing. */
+ if (state_name[0] == '\0') {
+ state_name[0] = '-';
+ state_name[1] = '\0';
+ }
+
+ if (!opt_nstate_shown && transitioning(wip->inst)) {
+ /* Append an asterisk if nstate is valid. */
+ (void) strcat(state_name, "*");
+ }
+ } else
+ (void) strcpy(state_name, SCF_STATE_STRING_LEGACY);
+
+ newsize = (*buf ? strlen(*buf) : 0) + MAX_SCF_STATE_STRING_SZ + 2;
+ newbuf = safe_malloc(newsize);
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ MAX_SCF_STATE_STRING_SZ + 1, state_name);
+
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sortkey_state(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ sortkey_states(scf_property_state, buf, reverse, wip);
+}
+
+static void
+sprint_nstate(char **buf, scf_walkinfo_t *wip)
+{
+ char next_state_name[MAX_SCF_STATE_STRING_SZ];
+ boolean_t blank = 0;
+ size_t newsize;
+ char *newbuf;
+
+ if (wip->pg == NULL) {
+ get_restarter_string_prop(wip->inst, scf_property_next_state,
+ next_state_name, sizeof (next_state_name));
+
+ /* Don't print blank fields, to ease parsing. */
+ if (next_state_name[0] == '\0' ||
+ strcmp(next_state_name, SCF_STATE_STRING_NONE) == 0)
+ blank = 1;
+ } else
+ blank = 1;
+
+ if (blank) {
+ next_state_name[0] = '-';
+ next_state_name[1] = '\0';
+ }
+
+ newsize = (*buf ? strlen(*buf) : 0) + MAX_SCF_STATE_STRING_SZ + 1;
+ newbuf = safe_malloc(newsize);
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ MAX_SCF_STATE_STRING_SZ - 1, next_state_name);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sortkey_nstate(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ sortkey_states(scf_property_next_state, buf, reverse, wip);
+}
+
+static void
+sprint_s(char **buf, scf_walkinfo_t *wip)
+{
+ char tmp[3];
+ char state_name[MAX_SCF_STATE_STRING_SZ];
+ size_t newsize = (*buf ? strlen(*buf) : 0) + 4;
+ char *newbuf = safe_malloc(newsize);
+
+ if (wip->pg == NULL) {
+ get_restarter_string_prop(wip->inst, scf_property_state,
+ state_name, sizeof (state_name));
+ tmp[0] = state_to_char(state_name);
+
+ if (!opt_nstate_shown && transitioning(wip->inst))
+ tmp[1] = '*';
+ else
+ tmp[1] = ' ';
+ } else {
+ tmp[0] = 'L';
+ tmp[1] = ' ';
+ }
+ tmp[2] = ' ';
+ (void) snprintf(newbuf, newsize, "%s%-*s", *buf ? *buf : "",
+ 3, tmp);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sprint_n(char **buf, scf_walkinfo_t *wip)
+{
+ char tmp[2];
+ size_t newsize = (*buf ? strlen(*buf) : 0) + 3;
+ char *newbuf = safe_malloc(newsize);
+ char nstate_name[MAX_SCF_STATE_STRING_SZ];
+
+ if (wip->pg == NULL) {
+ get_restarter_string_prop(wip->inst, scf_property_next_state,
+ nstate_name, sizeof (nstate_name));
+
+ if (strcmp(nstate_name, SCF_STATE_STRING_NONE) == 0)
+ tmp[0] = '-';
+ else
+ tmp[0] = state_to_char(nstate_name);
+ } else
+ tmp[0] = '-';
+
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ 2, tmp);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sprint_sn(char **buf, scf_walkinfo_t *wip)
+{
+ char tmp[3];
+ size_t newsize = (*buf ? strlen(*buf) : 0) + 4;
+ char *newbuf = safe_malloc(newsize);
+ char nstate_name[MAX_SCF_STATE_STRING_SZ];
+ char state_name[MAX_SCF_STATE_STRING_SZ];
+
+ if (wip->pg == NULL) {
+ get_restarter_string_prop(wip->inst, scf_property_state,
+ state_name, sizeof (state_name));
+ get_restarter_string_prop(wip->inst, scf_property_next_state,
+ nstate_name, sizeof (nstate_name));
+ tmp[0] = state_to_char(state_name);
+
+ if (strcmp(nstate_name, SCF_STATE_STRING_NONE) == 0)
+ tmp[1] = '-';
+ else
+ tmp[1] = state_to_char(nstate_name);
+ } else {
+ tmp[0] = 'L';
+ tmp[1] = '-';
+ }
+
+ tmp[2] = ' ';
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ 3, tmp);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+/* ARGSUSED */
+static void
+sortkey_sn(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ sortkey_state(buf, reverse, wip);
+ sortkey_nstate(buf + 1, reverse, wip);
+}
+
+static const char *
+state_abbrev(const char *state)
+{
+ if (strcmp(state, SCF_STATE_STRING_UNINIT) == 0)
+ return ("UN");
+ if (strcmp(state, SCF_STATE_STRING_OFFLINE) == 0)
+ return ("OFF");
+ if (strcmp(state, SCF_STATE_STRING_ONLINE) == 0)
+ return ("ON");
+ if (strcmp(state, SCF_STATE_STRING_MAINT) == 0)
+ return ("MNT");
+ if (strcmp(state, SCF_STATE_STRING_DISABLED) == 0)
+ return ("DIS");
+ if (strcmp(state, SCF_STATE_STRING_DEGRADED) == 0)
+ return ("DGD");
+ if (strcmp(state, SCF_STATE_STRING_LEGACY) == 0)
+ return ("LRC");
+
+ return ("?");
+}
+
+static void
+sprint_sta(char **buf, scf_walkinfo_t *wip)
+{
+ char state_name[MAX_SCF_STATE_STRING_SZ];
+ char sta[5];
+ size_t newsize = (*buf ? strlen(*buf) : 0) + 6;
+ char *newbuf = safe_malloc(newsize);
+
+ if (wip->pg == NULL)
+ get_restarter_string_prop(wip->inst, scf_property_state,
+ state_name, sizeof (state_name));
+ else
+ (void) strcpy(state_name, SCF_STATE_STRING_LEGACY);
+
+ (void) strcpy(sta, state_abbrev(state_name));
+
+ if (wip->pg == NULL && !opt_nstate_shown && transitioning(wip->inst))
+ (void) strcat(sta, "*");
+
+ (void) snprintf(newbuf, newsize, "%s%-4s ", *buf ? *buf : "", sta);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sprint_nsta(char **buf, scf_walkinfo_t *wip)
+{
+ char state_name[MAX_SCF_STATE_STRING_SZ];
+ size_t newsize = (*buf ? strlen(*buf) : 0) + 6;
+ char *newbuf = safe_malloc(newsize);
+
+ if (wip->pg == NULL)
+ get_restarter_string_prop(wip->inst, scf_property_next_state,
+ state_name, sizeof (state_name));
+ else
+ (void) strcpy(state_name, SCF_STATE_STRING_NONE);
+
+ if (strcmp(state_name, SCF_STATE_STRING_NONE) == 0)
+ (void) snprintf(newbuf, newsize, "%s%-4s ", *buf ? *buf : "",
+ "-");
+ else
+ (void) snprintf(newbuf, newsize, "%s%-4s ", *buf ? *buf : "",
+ state_abbrev(state_name));
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+/* FMRI */
+#define FMRI_COLUMN_WIDTH 50
+static void
+sprint_fmri(char **buf, scf_walkinfo_t *wip)
+{
+ char *fmri_buf = safe_malloc(max_scf_fmri_length + 1);
+ size_t newsize;
+ char *newbuf;
+
+ if (wip->pg == NULL) {
+ if (scf_instance_to_fmri(wip->inst, fmri_buf,
+ max_scf_fmri_length + 1) == -1)
+ scfdie();
+ } else {
+ (void) strcpy(fmri_buf, LEGACY_SCHEME);
+ if (pg_get_single_val(wip->pg, SCF_LEGACY_PROPERTY_NAME,
+ SCF_TYPE_ASTRING, fmri_buf + sizeof (LEGACY_SCHEME) - 1,
+ max_scf_fmri_length + 1 - (sizeof (LEGACY_SCHEME) - 1),
+ 0) != 0)
+ (void) strcat(fmri_buf, LEGACY_UNKNOWN);
+ }
+
+ if (strlen(fmri_buf) > FMRI_COLUMN_WIDTH)
+ newsize = (*buf ? strlen(*buf) : 0) + strlen(fmri_buf) + 2;
+ else
+ newsize = (*buf ? strlen(*buf) : 0) + FMRI_COLUMN_WIDTH + 2;
+ newbuf = safe_malloc(newsize);
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ FMRI_COLUMN_WIDTH, fmri_buf);
+ free(fmri_buf);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sortkey_fmri(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ char *tmp = NULL;
+
+ sprint_fmri(&tmp, wip);
+ bcopy(tmp, buf, FMRI_COLUMN_WIDTH);
+ free(tmp);
+ if (reverse)
+ reverse_bytes(buf, FMRI_COLUMN_WIDTH);
+}
+
+/* Component columns */
+#define COMPONENT_COLUMN_WIDTH 20
+static void
+sprint_scope(char **buf, scf_walkinfo_t *wip)
+{
+ char *scope_buf = safe_malloc(max_scf_name_length + 1);
+ size_t newsize = (*buf ? strlen(*buf) : 0) + COMPONENT_COLUMN_WIDTH + 2;
+ char *newbuf = safe_malloc(newsize);
+
+ assert(wip->scope != NULL);
+
+ if (scf_scope_get_name(wip->scope, scope_buf, max_scf_name_length) < 0)
+ scfdie();
+
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ COMPONENT_COLUMN_WIDTH, scope_buf);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+ free(scope_buf);
+}
+
+static void
+sortkey_scope(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ char *tmp = NULL;
+
+ sprint_scope(&tmp, wip);
+ bcopy(tmp, buf, COMPONENT_COLUMN_WIDTH);
+ free(tmp);
+ if (reverse)
+ reverse_bytes(buf, COMPONENT_COLUMN_WIDTH);
+}
+
+static void
+sprint_service(char **buf, scf_walkinfo_t *wip)
+{
+ char *svc_buf = safe_malloc(max_scf_name_length + 1);
+ char *newbuf;
+ size_t newsize;
+
+ if (wip->pg == NULL) {
+ if (scf_service_get_name(wip->svc, svc_buf,
+ max_scf_name_length + 1) < 0)
+ scfdie();
+ } else {
+ if (pg_get_single_val(wip->pg, "name", SCF_TYPE_ASTRING,
+ svc_buf, max_scf_name_length + 1, EMPTY_OK) != 0)
+ (void) strcpy(svc_buf, LEGACY_UNKNOWN);
+ }
+
+
+ if (strlen(svc_buf) > COMPONENT_COLUMN_WIDTH)
+ newsize = (*buf ? strlen(*buf) : 0) + strlen(svc_buf) + 2;
+ else
+ newsize = (*buf ? strlen(*buf) : 0) +
+ COMPONENT_COLUMN_WIDTH + 2;
+ newbuf = safe_malloc(newsize);
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ COMPONENT_COLUMN_WIDTH, svc_buf);
+ free(svc_buf);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+static void
+sortkey_service(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ char *tmp = NULL;
+
+ sprint_service(&tmp, wip);
+ bcopy(tmp, buf, COMPONENT_COLUMN_WIDTH);
+ free(tmp);
+ if (reverse)
+ reverse_bytes(buf, COMPONENT_COLUMN_WIDTH);
+}
+
+/* INST */
+static void
+sprint_instance(char **buf, scf_walkinfo_t *wip)
+{
+ char *tmp = safe_malloc(max_scf_name_length + 1);
+ size_t newsize = (*buf ? strlen(*buf) : 0) + COMPONENT_COLUMN_WIDTH + 2;
+ char *newbuf = safe_malloc(newsize);
+
+ if (wip->pg == NULL) {
+ if (scf_instance_get_name(wip->inst, tmp,
+ max_scf_name_length + 1) < 0)
+ scfdie();
+ } else {
+ tmp[0] = '-';
+ tmp[1] = '\0';
+ }
+
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ COMPONENT_COLUMN_WIDTH, tmp);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+ free(tmp);
+}
+
+static void
+sortkey_instance(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ char *tmp = NULL;
+
+ sprint_instance(&tmp, wip);
+ bcopy(tmp, buf, COMPONENT_COLUMN_WIDTH);
+ free(tmp);
+ if (reverse)
+ reverse_bytes(buf, COMPONENT_COLUMN_WIDTH);
+}
+
+/* STIME */
+#define STIME_COLUMN_WIDTH 8
+#define FORMAT_TIME "%k:%M:%S"
+#define FORMAT_DATE "%b_%d "
+#define FORMAT_YEAR "%Y "
+
+static void
+sprint_stime(char **buf, scf_walkinfo_t *wip)
+{
+ int r;
+ struct timeval tv;
+ time_t then;
+ struct tm *tm;
+ char st_buf[STIME_COLUMN_WIDTH + 1];
+ size_t newsize = (*buf ? strlen(*buf) : 0) + STIME_COLUMN_WIDTH + 2;
+ char *newbuf = safe_malloc(newsize);
+
+ if (wip->pg == NULL) {
+ r = get_restarter_time_prop(wip->inst,
+ SCF_PROPERTY_STATE_TIMESTAMP, &tv, 0);
+ } else {
+ r = pg_get_single_val(wip->pg, SCF_PROPERTY_STATE_TIMESTAMP,
+ SCF_TYPE_TIME, &tv, NULL, 0);
+ }
+
+ if (r != 0) {
+ (void) snprintf(newbuf, newsize, "%s%-*s", *buf ? *buf : "",
+ STIME_COLUMN_WIDTH + 1, "?");
+ return;
+ }
+
+ then = (time_t)tv.tv_sec;
+
+ tm = localtime(&then);
+ /*
+ * Print time if started within the past 24 hours, print date
+ * if within the past 12 months, print year if started greater than
+ * 12 months ago.
+ */
+ if (now - then < 24 * 60 * 60)
+ (void) strftime(st_buf, sizeof (st_buf), gettext(FORMAT_TIME),
+ tm);
+ else if (now - then < 12 * 30 * 24 * 60 * 60)
+ (void) strftime(st_buf, sizeof (st_buf), gettext(FORMAT_DATE),
+ tm);
+ else
+ (void) strftime(st_buf, sizeof (st_buf), gettext(FORMAT_YEAR),
+ tm);
+
+ (void) snprintf(newbuf, newsize, "%s%-*s ", *buf ? *buf : "",
+ STIME_COLUMN_WIDTH + 1, st_buf);
+ if (*buf)
+ free(*buf);
+ *buf = newbuf;
+}
+
+#define STIME_SORTKEY_WIDTH (sizeof (uint64_t) + sizeof (uint32_t))
+
+/* ARGSUSED */
+static void
+sortkey_stime(char *buf, int reverse, scf_walkinfo_t *wip)
+{
+ struct timeval tv;
+ int r;
+
+ if (wip->pg == NULL)
+ r = get_restarter_time_prop(wip->inst,
+ SCF_PROPERTY_STATE_TIMESTAMP, &tv, 0);
+ else
+ r = pg_get_single_val(wip->pg, SCF_PROPERTY_STATE_TIMESTAMP,
+ SCF_TYPE_TIME, &tv, NULL, 0);
+
+ if (r == 0) {
+ int64_t sec;
+ int32_t us;
+
+ /* Stick it straight into the buffer. */
+ sec = tv.tv_sec;
+ us = tv.tv_usec;
+
+ sec = BE_64(sec);
+ us = BE_32(us);
+ bcopy(&sec, buf, sizeof (sec));
+ bcopy(&us, buf + sizeof (sec), sizeof (us));
+ } else {
+ bzero(buf, STIME_SORTKEY_WIDTH);
+ }
+
+ if (reverse)
+ reverse_bytes(buf, STIME_SORTKEY_WIDTH);
+}
+
+
+/*
+ * Information about columns which can be displayed. If you add something,
+ * check MAX_COLUMN_NAME_LENGTH_STR & update description_of_column() below.
+ */
+static const struct column columns[] = {
+ { "CTID", CTID_COLUMN_WIDTH, sprint_ctid,
+ CTID_SORTKEY_WIDTH, sortkey_ctid },
+ { "DESC", DESC_COLUMN_WIDTH, sprint_desc,
+ DESC_COLUMN_WIDTH, sortkey_desc },
+ { "FMRI", FMRI_COLUMN_WIDTH, sprint_fmri,
+ FMRI_COLUMN_WIDTH, sortkey_fmri },
+ { "INST", COMPONENT_COLUMN_WIDTH, sprint_instance,
+ COMPONENT_COLUMN_WIDTH, sortkey_instance },
+ { "N", 1, sprint_n, 1, sortkey_nstate },
+ { "NSTA", 4, sprint_nsta, 1, sortkey_nstate },
+ { "NSTATE", MAX_SCF_STATE_STRING_SZ - 1, sprint_nstate,
+ 1, sortkey_nstate },
+ { "S", 2, sprint_s, 1, sortkey_state },
+ { "SCOPE", COMPONENT_COLUMN_WIDTH, sprint_scope,
+ COMPONENT_COLUMN_WIDTH, sortkey_scope },
+ { "SN", 2, sprint_sn, 2, sortkey_sn },
+ { "SVC", COMPONENT_COLUMN_WIDTH, sprint_service,
+ COMPONENT_COLUMN_WIDTH, sortkey_service },
+ { "STA", 4, sprint_sta, 1, sortkey_state },
+ { "STATE", MAX_SCF_STATE_STRING_SZ - 1 + 1, sprint_state,
+ 1, sortkey_state },
+ { "STIME", STIME_COLUMN_WIDTH, sprint_stime,
+ STIME_SORTKEY_WIDTH, sortkey_stime },
+};
+
+#define MAX_COLUMN_NAME_LENGTH_STR "6"
+
+static const int ncolumns = sizeof (columns) / sizeof (columns[0]);
+
+/*
+ * Necessary thanks to gettext() & xgettext.
+ */
+static const char *
+description_of_column(int c)
+{
+ const char *s = NULL;
+
+ switch (c) {
+ case 0:
+ s = gettext("contract ID for service (see contract(4))");
+ break;
+ case 1:
+ s = gettext("human-readable description of the service");
+ break;
+ case 2:
+ s = gettext("Fault Managed Resource Identifier for service");
+ break;
+ case 3:
+ s = gettext("portion of the FMRI indicating service instance");
+ break;
+ case 4:
+ s = gettext("abbreviation for next state (if in transition)");
+ break;
+ case 5:
+ s = gettext("abbreviation for next state (if in transition)");
+ break;
+ case 6:
+ s = gettext("name for next state (if in transition)");
+ break;
+ case 7:
+ s = gettext("abbreviation for current state");
+ break;
+ case 8:
+ s = gettext("name for scope associated with service");
+ break;
+ case 9:
+ s = gettext("abbreviation for current state and next state");
+ break;
+ case 10:
+ s = gettext("portion of the FMRI representing service name");
+ break;
+ case 11:
+ s = gettext("abbreviation for current state");
+ break;
+ case 12:
+ s = gettext("name for current state");
+ break;
+ case 13:
+ s = gettext("time of last state change");
+ break;
+ }
+
+ assert(s != NULL);
+ return (s);
+}
+
+
+static void
+print_usage(const char *progname, FILE *f, boolean_t do_exit)
+{
+ (void) fprintf(f, gettext(
+ "Usage: %1$s [-aHpv] [-o col[,col ... ]] [-R restarter] "
+ "[-sS col] [<service> ...]\n"
+ " %1$s -d | -D [-Hpv] [-o col[,col ... ]] [-sS col] "
+ "[<service> ...]\n"
+ " %1$s -l <service> ...\n"
+ " %1$s -x [-v] [<service> ...]\n"
+ " %1$s -?\n"), progname);
+
+ if (do_exit)
+ exit(UU_EXIT_USAGE);
+}
+
+#define argserr(progname) print_usage(progname, stderr, B_TRUE)
+
+static void
+print_help(const char *progname)
+{
+ int i;
+
+ print_usage(progname, stdout, B_FALSE);
+
+ (void) printf(gettext("\n"
+ "\t-a list all service instances rather than "
+ "only those that are enabled\n"
+ "\t-d list dependencies of the specified service(s)\n"
+ "\t-D list dependents of the specified service(s)\n"
+ "\t-H omit header line from output\n"
+ "\t-l list detailed information about the specified service(s)\n"
+ "\t-o list only the specified columns in the output\n"
+ "\t-p list process IDs and names associated with each service\n"
+ "\t-R list only those services with the specified restarter\n"
+ "\t-s sort output in ascending order by the specified column(s)\n"
+ "\t-S sort output in descending order by the specified column(s)\n"
+ "\t-v list verbose information appropriate to the type of output\n"
+ "\t-x explain the status of services that might require maintenance,\n"
+ "\t or explain the status of the specified service(s)\n"
+ "\n\t"
+ "Services can be specified using an FMRI, abbreviation, or fnmatch(5)\n"
+ "\tpattern, as shown in these examples for svc:/network/smtp:sendmail\n"
+ "\n"
+ "\t%1$s [opts] svc:/network/smtp:sendmail\n"
+ "\t%1$s [opts] network/smtp:sendmail\n"
+ "\t%1$s [opts] network/*mail\n"
+ "\t%1$s [opts] network/smtp\n"
+ "\t%1$s [opts] smtp:sendmail\n"
+ "\t%1$s [opts] smtp\n"
+ "\t%1$s [opts] sendmail\n"
+ "\n\t"
+ "Columns for output or sorting can be specified using these names:\n"
+ "\n"), progname);
+
+ for (i = 0; i < ncolumns; i++) {
+ (void) printf("\t%-" MAX_COLUMN_NAME_LENGTH_STR "s %s\n",
+ columns[i].name, description_of_column(i));
+ }
+}
+
+
+/*
+ * A getsubopt()-like function which returns an index into the columns table.
+ * On success, *optionp is set to point to the next sub-option, or the
+ * terminating null if there are none.
+ */
+static int
+getcolumnopt(char **optionp)
+{
+ char *str = *optionp, *cp;
+ int i;
+
+ assert(optionp != NULL);
+ assert(*optionp != NULL);
+
+ cp = strchr(*optionp, ',');
+ if (cp != NULL)
+ *cp = '\0';
+
+ for (i = 0; i < ncolumns; ++i) {
+ if (strcasecmp(str, columns[i].name) == 0) {
+ if (cp != NULL)
+ *optionp = cp + 1;
+ else
+ *optionp = strchr(*optionp, '\0');
+
+ return (i);
+ }
+ }
+
+ return (-1);
+}
+
+static void
+print_header()
+{
+ int i;
+ char *line_buf, *cp;
+
+ line_buf = safe_malloc(line_sz);
+ cp = line_buf;
+ for (i = 0; i < opt_cnum; ++i) {
+ const struct column * const colp = &columns[opt_columns[i]];
+
+ (void) snprintf(cp, colp->width + 1, "%-*s", colp->width,
+ colp->name);
+ cp += colp->width;
+ *cp++ = ' ';
+ }
+
+ /* Trim the trailing whitespace */
+ --cp;
+ while (*cp == ' ')
+ --cp;
+ *(cp+1) = '\0';
+ (void) puts(line_buf);
+
+ free(line_buf);
+}
+
+
+
+/*
+ * Long listing (-l) functions.
+ */
+
+static int
+pidcmp(const void *l, const void *r)
+{
+ pid_t lp = *(pid_t *)l, rp = *(pid_t *)r;
+
+ if (lp < rp)
+ return (-1);
+ if (lp > rp)
+ return (1);
+ return (0);
+}
+
+/*
+ * This is the strlen() of the longest label ("description"), plus intercolumn
+ * space.
+ */
+#define DETAILED_WIDTH (11 + 2)
+
+static void
+detailed_list_processes(scf_instance_t *inst)
+{
+ uint64_t c;
+ pid_t *pids;
+ uint_t i, n;
+ psinfo_t psi;
+
+ if (get_restarter_count_prop(inst, scf_property_contract, &c,
+ EMPTY_OK) != 0)
+ return;
+
+ if (instance_processes(inst, &pids, &n) != 0)
+ return;
+
+ qsort(pids, n, sizeof (*pids), pidcmp);
+
+ for (i = 0; i < n; ++i) {
+ (void) printf("%-*s%lu", DETAILED_WIDTH, gettext("process"),
+ pids[i]);
+
+ if (get_psinfo(pids[i], &psi) == 0)
+ (void) printf(" %.*s", PRARGSZ, psi.pr_psargs);
+
+ (void) putchar('\n');
+ }
+
+ free(pids);
+}
+
+/*
+ * Determines the state of a dependency. If the FMRI specifies a file, then we
+ * fake up a state based on whether we can access the file.
+ */
+static void
+get_fmri_state(char *fmri, char *state, size_t state_sz)
+{
+ char *lfmri;
+ const char *svc_name, *inst_name, *pg_name, *path;
+ scf_service_t *svc;
+ scf_instance_t *inst;
+ scf_iter_t *iter;
+
+ lfmri = safe_strdup(fmri);
+
+ /*
+ * Check for file:// dependencies
+ */
+ if (scf_parse_file_fmri(lfmri, NULL, &path) == SCF_SUCCESS) {
+ struct stat64 statbuf;
+ const char *msg;
+
+ if (stat64(path, &statbuf) == 0)
+ msg = "online";
+ else if (errno == ENOENT)
+ msg = "absent";
+ else
+ msg = "unknown";
+
+ (void) strlcpy(state, msg, state_sz);
+ return;
+ }
+
+ /*
+ * scf_parse_file_fmri() may have overwritten part of the string, so
+ * copy it back.
+ */
+ (void) strcpy(lfmri, fmri);
+
+ if (scf_parse_svc_fmri(lfmri, NULL, &svc_name, &inst_name,
+ &pg_name, NULL) != SCF_SUCCESS) {
+ free(lfmri);
+ (void) strlcpy(state, "invalid", state_sz);
+ return;
+ }
+
+ free(lfmri);
+
+ if (svc_name == NULL || pg_name != NULL) {
+ (void) strlcpy(state, "invalid", state_sz);
+ return;
+ }
+
+ if (inst_name != NULL) {
+ /* instance: get state */
+ inst = scf_instance_create(h);
+ if (inst == NULL)
+ scfdie();
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL,
+ NULL, SCF_DECODE_FMRI_EXACT) == SCF_SUCCESS)
+ get_restarter_string_prop(inst, scf_property_state,
+ state, state_sz);
+ else {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ (void) strlcpy(state, "invalid", state_sz);
+ break;
+ case SCF_ERROR_NOT_FOUND:
+ (void) strlcpy(state, "absent", state_sz);
+ break;
+
+ default:
+ scfdie();
+ }
+ }
+
+ scf_instance_destroy(inst);
+ return;
+ }
+
+ /*
+ * service: If only one instance, use that state. Otherwise, say
+ * "multiple".
+ */
+ if ((svc = scf_service_create(h)) == NULL ||
+ (inst = scf_instance_create(h)) == NULL ||
+ (iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL,
+ SCF_DECODE_FMRI_EXACT) != SCF_SUCCESS) {
+ switch (scf_error()) {
+ case SCF_ERROR_INVALID_ARGUMENT:
+ (void) strlcpy(state, "invalid", state_sz);
+ goto out;
+ case SCF_ERROR_NOT_FOUND:
+ (void) strlcpy(state, "absent", state_sz);
+ goto out;
+
+ default:
+ scfdie();
+ }
+ }
+
+ if (scf_iter_service_instances(iter, svc) != SCF_SUCCESS)
+ scfdie();
+
+ switch (scf_iter_next_instance(iter, inst)) {
+ case 0:
+ (void) strlcpy(state, "absent", state_sz);
+ goto out;
+
+ case 1:
+ break;
+
+ default:
+ scfdie();
+ }
+
+ /* Get the state in case this is the only instance. */
+ get_restarter_string_prop(inst, scf_property_state, state, state_sz);
+
+ switch (scf_iter_next_instance(iter, inst)) {
+ case 0:
+ break;
+
+ case 1:
+ /* Nope, multiple instances. */
+ (void) strlcpy(state, "multiple", state_sz);
+ goto out;
+
+ default:
+ scfdie();
+ }
+
+out:
+ scf_iter_destroy(iter);
+ scf_instance_destroy(inst);
+ scf_service_destroy(svc);
+}
+
+static void
+print_detailed_dependency(scf_propertygroup_t *pg)
+{
+ scf_property_t *eprop;
+ scf_iter_t *iter;
+ scf_type_t ty;
+ char *val_buf;
+ int i;
+
+ if ((eprop = scf_property_create(h)) == NULL ||
+ (iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ val_buf = safe_malloc(max_scf_value_length + 1);
+
+ if (scf_pg_get_property(pg, SCF_PROPERTY_ENTITIES, eprop) !=
+ SCF_SUCCESS ||
+ scf_property_type(eprop, &ty) != SCF_SUCCESS ||
+ ty != SCF_TYPE_FMRI)
+ return;
+
+ (void) printf("%-*s", DETAILED_WIDTH, gettext("dependency"));
+
+ /* Print the grouping */
+ if (pg_get_single_val(pg, SCF_PROPERTY_GROUPING, SCF_TYPE_ASTRING,
+ val_buf, max_scf_value_length + 1, 0) == 0)
+ (void) fputs(val_buf, stdout);
+ else
+ (void) putchar('?');
+
+ (void) putchar('/');
+
+ if (pg_get_single_val(pg, SCF_PROPERTY_RESTART_ON, SCF_TYPE_ASTRING,
+ val_buf, max_scf_value_length + 1, 0) == 0)
+ (void) fputs(val_buf, stdout);
+ else
+ (void) putchar('?');
+
+ /* Print the dependency entities. */
+ if (scf_iter_property_values(iter, eprop) == -1)
+ scfdie();
+
+ while ((i = scf_iter_next_value(iter, g_val)) == 1) {
+ char state[MAX_SCF_STATE_STRING_SZ];
+
+ if (scf_value_get_astring(g_val, val_buf,
+ max_scf_value_length + 1) < 0)
+ scfdie();
+
+ (void) putchar(' ');
+ (void) fputs(val_buf, stdout);
+
+ /* Print the state. */
+ state[0] = '-';
+ state[1] = '\0';
+
+ get_fmri_state(val_buf, state, sizeof (state));
+
+ (void) printf(" (%s)", state);
+ }
+ if (i == -1)
+ scfdie();
+
+ (void) putchar('\n');
+
+ free(val_buf);
+ scf_iter_destroy(iter);
+ scf_property_destroy(eprop);
+}
+
+/* ARGSUSED */
+static int
+print_detailed(void *unused, scf_walkinfo_t *wip)
+{
+ scf_snapshot_t *snap;
+ scf_propertygroup_t *rpg;
+ scf_iter_t *pg_iter;
+
+ char *buf;
+ char *timebuf;
+ size_t tbsz;
+ int ret;
+ uint64_t c;
+ int temp, perm;
+ struct timeval tv;
+ time_t stime;
+ struct tm *tmp;
+
+ const char * const fmt = "%-*s%s\n";
+
+ assert(wip->pg == NULL);
+
+ rpg = scf_pg_create(h);
+ if (rpg == NULL)
+ scfdie();
+
+ if (first_paragraph)
+ first_paragraph = 0;
+ else
+ (void) putchar('\n');
+
+ buf = safe_malloc(max_scf_fmri_length + 1);
+
+ if (scf_instance_to_fmri(wip->inst, buf, max_scf_fmri_length + 1) != -1)
+ (void) printf(fmt, DETAILED_WIDTH, "fmri", buf);
+
+ if (common_name_buf == NULL)
+ common_name_buf = safe_malloc(max_scf_value_length + 1);
+
+ if (inst_get_single_val(wip->inst, SCF_PG_TM_COMMON_NAME, locale,
+ SCF_TYPE_USTRING, common_name_buf, max_scf_value_length, 0, 1, 1)
+ == 0)
+ (void) printf(fmt, DETAILED_WIDTH, gettext("name"),
+ common_name_buf);
+ else if (inst_get_single_val(wip->inst, SCF_PG_TM_COMMON_NAME, "C",
+ SCF_TYPE_USTRING, common_name_buf, max_scf_value_length, 0, 1, 1)
+ == 0)
+ (void) printf(fmt, DETAILED_WIDTH, gettext("name"),
+ common_name_buf);
+
+ /*
+ * Synthesize an 'enabled' property that hides the enabled_ovr
+ * implementation from the user. If the service has been temporarily
+ * set to a state other than its permanent value, alert the user with
+ * a '(temporary)' message.
+ */
+ perm = instance_enabled(wip->inst, B_FALSE);
+ temp = instance_enabled(wip->inst, B_TRUE);
+ if (temp != -1) {
+ if (temp != perm)
+ (void) printf(gettext("%-*s%s (temporary)\n"),
+ DETAILED_WIDTH, gettext("enabled"),
+ temp ? gettext("true") : gettext("false"));
+ else
+ (void) printf(fmt, DETAILED_WIDTH,
+ gettext("enabled"), temp ? gettext("true") :
+ gettext("false"));
+ } else if (perm != -1) {
+ (void) printf(fmt, DETAILED_WIDTH, gettext("enabled"),
+ perm ? gettext("true") : gettext("false"));
+ }
+
+ /*
+ * Property values may be longer than max_scf_fmri_length, but these
+ * shouldn't be, so we'll just reuse buf. The user can use svcprop if
+ * he suspects something fishy.
+ */
+ if (scf_instance_get_pg(wip->inst, SCF_PG_RESTARTER, rpg) != 0) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ scf_pg_destroy(rpg);
+ rpg = NULL;
+ }
+
+ if (rpg) {
+ if (pg_get_single_val(rpg, scf_property_state, SCF_TYPE_ASTRING,
+ buf, max_scf_fmri_length + 1, 0) == 0)
+ (void) printf(fmt, DETAILED_WIDTH, gettext("state"),
+ buf);
+
+ if (pg_get_single_val(rpg, scf_property_next_state,
+ SCF_TYPE_ASTRING, buf, max_scf_fmri_length + 1, 0) == 0)
+ (void) printf(fmt, DETAILED_WIDTH,
+ gettext("next_state"), buf);
+
+ if (pg_get_single_val(rpg, SCF_PROPERTY_STATE_TIMESTAMP,
+ SCF_TYPE_TIME, &tv, NULL, 0) == 0) {
+ stime = tv.tv_sec;
+ tmp = localtime(&stime);
+ for (tbsz = 50; ; tbsz *= 2) {
+ timebuf = safe_malloc(tbsz);
+ if (strftime(timebuf, tbsz, NULL, tmp) != 0)
+ break;
+ free(timebuf);
+ }
+ (void) printf(fmt, DETAILED_WIDTH,
+ gettext("state_time"),
+ timebuf);
+ free(timebuf);
+ }
+
+ }
+
+ if (pg_get_single_val(rpg, SCF_PROPERTY_ALT_LOGFILE,
+ SCF_TYPE_ASTRING, buf, max_scf_fmri_length + 1, 0) == 0)
+ (void) printf(fmt, DETAILED_WIDTH, gettext("alt_logfile"),
+ buf);
+
+ if (pg_get_single_val(rpg, SCF_PROPERTY_LOGFILE,
+ SCF_TYPE_ASTRING, buf, max_scf_fmri_length + 1, 0) == 0)
+ (void) printf(fmt, DETAILED_WIDTH, gettext("logfile"), buf);
+
+ if (inst_get_single_val(wip->inst, SCF_PG_GENERAL,
+ SCF_PROPERTY_RESTARTER, SCF_TYPE_ASTRING, buf,
+ max_scf_fmri_length + 1, 0, 0, 1) == 0)
+ (void) printf(fmt, DETAILED_WIDTH, gettext("restarter"), buf);
+ else
+ (void) printf(fmt, DETAILED_WIDTH, gettext("restarter"),
+ SCF_SERVICE_STARTD);
+
+ free(buf);
+
+ if (rpg) {
+ scf_iter_t *iter;
+
+ if ((iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ if (scf_pg_get_property(rpg, scf_property_contract, g_prop) ==
+ 0) {
+ if (scf_property_is_type(g_prop, SCF_TYPE_COUNT) == 0) {
+ (void) printf("%-*s", DETAILED_WIDTH,
+ "contract_id");
+
+ if (scf_iter_property_values(iter, g_prop) != 0)
+ scfdie();
+
+ for (;;) {
+ ret = scf_iter_next_value(iter, g_val);
+ if (ret == -1)
+ scfdie();
+ if (ret == 0)
+ break;
+
+ if (scf_value_get_count(g_val, &c) != 0)
+ scfdie();
+ (void) printf("%lu ", (ctid_t)c);
+ }
+
+ (void) putchar('\n');
+ } else {
+ if (scf_error() != SCF_ERROR_TYPE_MISMATCH)
+ scfdie();
+ }
+ } else {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ }
+
+ scf_iter_destroy(iter);
+ } else {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+ }
+
+ scf_pg_destroy(rpg);
+
+ /* Dependencies. */
+ if ((pg_iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ snap = get_running_snapshot(wip->inst);
+
+ if (scf_iter_instance_pgs_typed_composed(pg_iter, wip->inst, snap,
+ SCF_GROUP_DEPENDENCY) != SCF_SUCCESS)
+ scfdie();
+
+ while ((ret = scf_iter_next_pg(pg_iter, g_pg)) == 1)
+ print_detailed_dependency(g_pg);
+ if (ret == -1)
+ scfdie();
+
+ scf_snapshot_destroy(snap);
+ scf_iter_destroy(pg_iter);
+
+ if (opt_processes)
+ detailed_list_processes(wip->inst);
+
+ return (0);
+}
+
+/*
+ * Append a one-lined description of each process in inst's contract(s) and
+ * return the augmented string.
+ */
+static char *
+add_processes(char *line, scf_instance_t *inst, scf_propertygroup_t *lpg)
+{
+ pid_t *pids = NULL;
+ uint_t i, n = 0;
+
+ if (lpg == NULL) {
+ if (instance_processes(inst, &pids, &n) != 0)
+ return (line);
+ } else {
+ scf_iter_t *iter;
+
+ if ((iter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ (void) propvals_to_pids(lpg, scf_property_contract, &pids, &n,
+ g_prop, g_val, iter);
+
+ scf_iter_destroy(iter);
+ }
+
+ if (n == 0)
+ return (line);
+
+ qsort(pids, n, sizeof (*pids), pidcmp);
+
+ for (i = 0; i < n; ++i) {
+ char *cp, stime[9];
+ psinfo_t psi;
+ struct tm *tm;
+ int len = 1 + 15 + 8 + 3 + 6 + 1 + PRFNSZ;
+
+ if (get_psinfo(pids[i], &psi) != 0)
+ continue;
+
+ line = realloc(line, strlen(line) + len);
+ if (line == NULL)
+ uu_die(gettext("Out of memory.\n"));
+
+ cp = strchr(line, '\0');
+
+ tm = localtime(&psi.pr_start.tv_sec);
+
+ /*
+ * Print time if started within the past 24 hours, print date
+ * if within the past 12 months, print year if started greater
+ * than 12 months ago.
+ */
+ if (now - psi.pr_start.tv_sec < 24 * 60 * 60)
+ (void) strftime(stime, sizeof (stime), gettext(FORMAT_TIME),
+ tm);
+ else if (now - psi.pr_start.tv_sec < 12 * 30 * 24 * 60 * 60)
+ (void) strftime(stime, sizeof (stime), gettext(FORMAT_DATE),
+ tm);
+ else
+ (void) strftime(stime, sizeof (stime), gettext(FORMAT_YEAR),
+ tm);
+
+ (void) snprintf(cp, len, "\n %-8s %6ld %.*s",
+ stime, pids[i], PRFNSZ, psi.pr_fname);
+ }
+
+ free(pids);
+
+ return (line);
+}
+
+/*ARGSUSED*/
+static int
+list_instance(void *unused, scf_walkinfo_t *wip)
+{
+ struct avl_string *lp;
+ char *cp;
+ int i;
+ uu_avl_index_t idx;
+
+ /*
+ * If the user has specified a restarter, check for a match first
+ */
+ if (restarters != NULL) {
+ struct pfmri_list *rest;
+ int match;
+ char *restarter_fmri;
+ const char *scope_name, *svc_name, *inst_name, *pg_name;
+
+ /* legacy services don't have restarters */
+ if (wip->pg != NULL)
+ return (0);
+
+ restarter_fmri = safe_malloc(max_scf_fmri_length + 1);
+
+ if (inst_get_single_val(wip->inst, SCF_PG_GENERAL,
+ SCF_PROPERTY_RESTARTER, SCF_TYPE_ASTRING, restarter_fmri,
+ max_scf_fmri_length + 1, 0, 0, 1) != 0)
+ (void) strcpy(restarter_fmri, SCF_SERVICE_STARTD);
+
+ if (scf_parse_svc_fmri(restarter_fmri, &scope_name, &svc_name,
+ &inst_name, &pg_name, NULL) != SCF_SUCCESS) {
+ free(restarter_fmri);
+ return (0);
+ }
+
+ match = 0;
+ for (rest = restarters; rest != NULL; rest = rest->next) {
+ if (strcmp(rest->scope, scope_name) == 0 &&
+ strcmp(rest->service, svc_name) == 0 &&
+ strcmp(rest->instance, inst_name) == 0)
+ match = 1;
+ }
+
+ free(restarter_fmri);
+
+ if (!match)
+ return (0);
+ }
+
+ if (wip->pg == NULL && ht_buckets != NULL && ht_add(wip->fmri)) {
+ /* It was already there. */
+ return (0);
+ }
+
+ lp = safe_malloc(sizeof (*lp));
+
+ lp->str = NULL;
+ for (i = 0; i < opt_cnum; ++i) {
+ columns[opt_columns[i]].sprint(&lp->str, wip);
+ }
+ cp = lp->str + strlen(lp->str);
+ cp--;
+ while (*cp == ' ')
+ cp--;
+ *(cp+1) = '\0';
+
+ /* If we're supposed to list the processes, too, do that now. */
+ if (opt_processes)
+ lp->str = add_processes(lp->str, wip->inst, wip->pg);
+
+ /* Create the sort key. */
+ cp = lp->key = safe_malloc(sortkey_sz);
+ for (i = 0; i < opt_snum; ++i) {
+ int j = opt_sort[i] & 0xff;
+
+ assert(columns[j].get_sortkey != NULL);
+ columns[j].get_sortkey(cp, opt_sort[i] & ~0xff, wip);
+ cp += columns[j].sortkey_width;
+ }
+
+ /* Insert into AVL tree. */
+ uu_avl_node_init(lp, &lp->node, lines_pool);
+ (void) uu_avl_find(lines, lp, NULL, &idx);
+ uu_avl_insert(lines, lp, idx);
+
+ return (0);
+}
+
+static int
+list_if_enabled(void *unused, scf_walkinfo_t *wip)
+{
+ if (wip->pg != NULL ||
+ instance_enabled(wip->inst, B_FALSE) == 1 ||
+ instance_enabled(wip->inst, B_TRUE) == 1)
+ return (list_instance(unused, wip));
+
+ return (0);
+}
+
+/*
+ * Service FMRI selection: Lookup and call list_instance() for the instances.
+ * Instance FMRI selection: Lookup and call list_instance().
+ *
+ * Note: This is shoehorned into a walk_dependencies() callback prototype so
+ * it can be used in list_dependencies.
+ */
+static int
+list_svc_or_inst_fmri(void *complain, scf_walkinfo_t *wip)
+{
+ char *fmri;
+ const char *svc_name, *inst_name, *pg_name, *save;
+ scf_iter_t *iter;
+ int ret;
+
+ fmri = safe_strdup(wip->fmri);
+
+ if (scf_parse_svc_fmri(fmri, NULL, &svc_name, &inst_name, &pg_name,
+ NULL) != SCF_SUCCESS) {
+ if (complain)
+ uu_warn(gettext("FMRI \"%s\" is invalid.\n"),
+ wip->fmri);
+ exit_status = UU_EXIT_FATAL;
+ free(fmri);
+ return (0);
+ }
+
+ /*
+ * Yes, this invalidates *_name, but we only care whether they're NULL
+ * or not.
+ */
+ free(fmri);
+
+ if (svc_name == NULL || pg_name != NULL) {
+ if (complain)
+ uu_warn(gettext("FMRI \"%s\" does not designate a "
+ "service or instance.\n"), wip->fmri);
+ return (0);
+ }
+
+ if (inst_name != NULL) {
+ /* instance */
+ if (scf_handle_decode_fmri(h, wip->fmri, wip->scope, wip->svc,
+ wip->inst, NULL, NULL, 0) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (complain)
+ uu_warn(gettext(
+ "Instance \"%s\" does not exist.\n"),
+ wip->fmri);
+ return (0);
+ }
+
+ return (list_instance(NULL, wip));
+ }
+
+ /* service: Walk the instances. */
+ if (scf_handle_decode_fmri(h, wip->fmri, wip->scope, wip->svc, NULL,
+ NULL, NULL, 0) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ if (complain)
+ uu_warn(gettext("Service \"%s\" does not exist.\n"),
+ wip->fmri);
+
+ exit_status = UU_EXIT_FATAL;
+
+ return (0);
+ }
+
+ iter = scf_iter_create(h);
+ if (iter == NULL)
+ scfdie();
+
+ if (scf_iter_service_instances(iter, wip->svc) != SCF_SUCCESS)
+ scfdie();
+
+ if ((fmri = malloc(max_scf_fmri_length + 1)) == NULL) {
+ scf_iter_destroy(iter);
+ exit_status = UU_EXIT_FATAL;
+ return (0);
+ }
+
+ save = wip->fmri;
+ wip->fmri = fmri;
+ while ((ret = scf_iter_next_instance(iter, wip->inst)) == 1) {
+ if (scf_instance_to_fmri(wip->inst, fmri,
+ max_scf_fmri_length + 1) <= 0)
+ scfdie();
+ (void) list_instance(NULL, wip);
+ }
+ free(fmri);
+ wip->fmri = save;
+ if (ret == -1)
+ scfdie();
+
+ exit_status = UU_EXIT_OK;
+
+ scf_iter_destroy(iter);
+
+ return (0);
+}
+
+/*
+ * Dependency selection: Straightforward since each instance lists the
+ * services it depends on.
+ */
+
+static void
+walk_dependencies(scf_walkinfo_t *wip, scf_walk_callback callback, void *data)
+{
+ scf_snapshot_t *snap;
+ scf_iter_t *iter, *viter;
+ int ret, vret;
+ char *dep;
+
+ assert(wip->inst != NULL);
+
+ if ((iter = scf_iter_create(h)) == NULL ||
+ (viter = scf_iter_create(h)) == NULL)
+ scfdie();
+
+ snap = get_running_snapshot(wip->inst);
+
+ if (scf_iter_instance_pgs_typed_composed(iter, wip->inst, snap,
+ SCF_GROUP_DEPENDENCY) != SCF_SUCCESS)
+ scfdie();
+
+ dep = safe_malloc(max_scf_value_length + 1);
+
+ while ((ret = scf_iter_next_pg(iter, g_pg)) == 1) {
+ scf_type_t ty;
+
+ /* Ignore exclude_any dependencies. */
+ if (scf_pg_get_property(g_pg, SCF_PROPERTY_GROUPING, g_prop) !=
+ SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ continue;
+ }
+
+ if (scf_property_type(g_prop, &ty) != SCF_SUCCESS)
+ scfdie();
+
+ if (ty != SCF_TYPE_ASTRING)
+ continue;
+
+ if (scf_property_get_value(g_prop, g_val) != SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_CONSTRAINT_VIOLATED)
+ scfdie();
+
+ continue;
+ }
+
+ if (scf_value_get_astring(g_val, dep,
+ max_scf_value_length + 1) < 0)
+ scfdie();
+
+ if (strcmp(dep, SCF_DEP_EXCLUDE_ALL) == 0)
+ continue;
+
+ if (scf_pg_get_property(g_pg, SCF_PROPERTY_ENTITIES, g_prop) !=
+ SCF_SUCCESS) {
+ if (scf_error() != SCF_ERROR_NOT_FOUND)
+ scfdie();
+
+ continue;
+ }
+
+ if (scf_iter_property_values(viter, g_prop) != SCF_SUCCESS)
+ scfdie();
+
+ while ((vret = scf_iter_next_value(viter, g_val)) == 1) {
+ if (scf_value_get_astring(g_val, dep,
+ max_scf_value_length + 1) < 0)
+ scfdie();
+
+ wip->fmri = dep;
+ if (callback(data, wip) != 0)
+ goto out;
+ }
+ if (vret == -1)
+ scfdie();
+ }
+ if (ret == -1)
+ scfdie();
+
+out:
+ scf_iter_destroy(viter);
+ scf_iter_destroy(iter);
+ scf_snapshot_destroy(snap);
+}
+
+static int
+list_dependencies(void *data, scf_walkinfo_t *wip)
+{
+ walk_dependencies(wip, list_svc_or_inst_fmri, data);
+ return (0);
+}
+
+
+/*
+ * Dependent selection: The "providing" service's or instance's FMRI is parsed
+ * into the provider_* variables, the instances are walked, and any instance
+ * which lists an FMRI which parses to these components is selected. This is
+ * inefficient in the face of multiple operands, but that should be uncommon.
+ */
+
+static char *provider_scope;
+static char *provider_svc;
+static char *provider_inst; /* NULL for services */
+
+/*ARGSUSED*/
+static int
+check_against_provider(void *arg, scf_walkinfo_t *wip)
+{
+ char *cfmri;
+ const char *scope_name, *svc_name, *inst_name, *pg_name;
+ int *matchp = arg;
+
+ cfmri = safe_strdup(wip->fmri);
+
+ if (scf_parse_svc_fmri(cfmri, &scope_name, &svc_name, &inst_name,
+ &pg_name, NULL) != SCF_SUCCESS) {
+ free(cfmri);
+ return (0);
+ }
+
+ if (svc_name == NULL || pg_name != NULL) {
+ free(cfmri);
+ return (0);
+ }
+
+ /*
+ * If the user has specified an instance, then also match dependencies
+ * on the service itself.
+ */
+ *matchp = (strcmp(provider_scope, scope_name) == 0 &&
+ strcmp(provider_svc, svc_name) == 0 &&
+ (provider_inst == NULL ? (inst_name == NULL) :
+ (inst_name == NULL || strcmp(provider_inst, inst_name) == 0)));
+
+ free(cfmri);
+
+ /* Stop on matches. */
+ return (*matchp);
+}
+
+static int
+list_if_dependent(void *unused, scf_walkinfo_t *wip)
+{
+ /* Only proceed if this instance depends on provider_*. */
+ int match = 0;
+
+ (void) walk_dependencies(wip, check_against_provider, &match);
+
+ if (match)
+ return (list_instance(unused, wip));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+list_dependents(void *unused, scf_walkinfo_t *wip)
+{
+ char *save;
+ int ret;
+
+ if (scf_scope_get_name(wip->scope, provider_scope,
+ max_scf_fmri_length) <= 0 ||
+ scf_service_get_name(wip->svc, provider_svc,
+ max_scf_fmri_length) <= 0)
+ scfdie();
+
+ save = provider_inst;
+ if (wip->inst == NULL)
+ provider_inst = NULL;
+ else if (scf_instance_get_name(wip->inst, provider_inst,
+ max_scf_fmri_length) <= 0)
+ scfdie();
+
+ ret = scf_walk_fmri(h, 0, NULL, 0, list_if_dependent, NULL, NULL,
+ uu_warn);
+
+ provider_inst = save;
+
+ return (ret);
+}
+
+/*
+ * main() & helpers
+ */
+
+static void
+add_sort_column(const char *col, int reverse)
+{
+ int i;
+
+ ++opt_snum;
+
+ opt_sort = realloc(opt_sort, opt_snum * sizeof (*opt_sort));
+ if (opt_sort == NULL)
+ uu_die(gettext("Too many sort criteria: out of memory.\n"));
+
+ for (i = 0; i < ncolumns; ++i) {
+ if (strcasecmp(col, columns[i].name) == 0)
+ break;
+ }
+
+ if (i < ncolumns)
+ opt_sort[opt_snum - 1] = (reverse ? i | 0x100 : i);
+ else
+ uu_die(gettext("Unrecognized sort column \"%s\".\n"), col);
+
+ sortkey_sz += columns[i].sortkey_width;
+}
+
+static void
+add_restarter(const char *fmri)
+{
+ char *cfmri;
+ const char *pg_name;
+ struct pfmri_list *rest;
+
+ cfmri = safe_strdup(fmri);
+ rest = safe_malloc(sizeof (*rest));
+
+ if (scf_parse_svc_fmri(cfmri, &rest->scope, &rest->service,
+ &rest->instance, &pg_name, NULL) != SCF_SUCCESS)
+ uu_die(gettext("Restarter FMRI \"%s\" is invalid.\n"), fmri);
+
+ if (rest->instance == NULL || pg_name != NULL)
+ uu_die(gettext("Restarter FMRI \"%s\" does not designate an "
+ "instance.\n"), fmri);
+
+ rest->next = restarters;
+ restarters = rest;
+ return;
+
+err:
+ free(cfmri);
+ free(rest);
+}
+
+/* ARGSUSED */
+static int
+line_cmp(const void *l_arg, const void *r_arg, void *private)
+{
+ const struct avl_string *l = l_arg;
+ const struct avl_string *r = r_arg;
+
+ return (memcmp(l->key, r->key, sortkey_sz));
+}
+
+/* ARGSUSED */
+static int
+print_line(void *e, void *private)
+{
+ struct avl_string *lp = e;
+
+ (void) puts(lp->str);
+
+ return (UU_WALK_NEXT);
+}
+
+int
+main(int argc, char **argv)
+{
+ char opt, opt_mode;
+ int i, n;
+ char *columns_str = NULL;
+ char *cp;
+ const char *progname;
+ int err;
+
+ int show_all = 0;
+ int show_header = 1;
+
+ const char * const options = "aHpvo:R:s:S:dDl?x";
+
+ (void) setlocale(LC_ALL, "");
+
+ locale = setlocale(LC_MESSAGES, "");
+ if (locale) {
+ locale = safe_strdup(locale);
+ sanitize_locale(locale);
+ }
+
+ (void) textdomain(TEXT_DOMAIN);
+ progname = uu_setpname(argv[0]);
+
+ exit_status = UU_EXIT_OK;
+
+ max_scf_name_length = scf_limit(SCF_LIMIT_MAX_NAME_LENGTH);
+ max_scf_value_length = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH);
+ max_scf_fmri_length = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH);
+
+ if (max_scf_name_length == -1 || max_scf_value_length == -1 ||
+ max_scf_fmri_length == -1)
+ scfdie();
+
+ now = time(NULL);
+ assert(now != -1);
+
+ /*
+ * opt_mode is the mode of operation. 0 for plain, 'd' for
+ * dependencies, 'D' for dependents, and 'l' for detailed (long). We
+ * need to know now so we know which options are valid.
+ */
+ opt_mode = 0;
+ while ((opt = getopt(argc, argv, options)) != -1) {
+ switch (opt) {
+ case '?':
+ if (optopt == '?') {
+ print_help(progname);
+ return (UU_EXIT_OK);
+ } else {
+ argserr(progname);
+ /* NOTREACHED */
+ }
+
+ case 'd':
+ case 'D':
+ case 'l':
+ if (opt_mode != 0)
+ argserr(progname);
+
+ opt_mode = opt;
+ break;
+
+ case 'x':
+ if (opt_mode != 0)
+ argserr(progname);
+
+ opt_mode = opt;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ sortkey_sz = 0;
+
+ optind = 1; /* Reset getopt() */
+ while ((opt = getopt(argc, argv, options)) != -1) {
+ switch (opt) {
+ case 'a':
+ if (opt_mode != 0)
+ argserr(progname);
+ show_all = 1;
+ break;
+
+ case 'H':
+ if (opt_mode == 'l' || opt_mode == 'x')
+ argserr(progname);
+ show_header = 0;
+ break;
+
+ case 'p':
+ if (opt_mode == 'x')
+ argserr(progname);
+ opt_processes = 1;
+ break;
+
+ case 'v':
+ if (opt_mode == 'l')
+ argserr(progname);
+ opt_verbose = 1;
+ break;
+
+ case 'o':
+ if (opt_mode == 'l' || opt_mode == 'x')
+ argserr(progname);
+ columns_str = optarg;
+ break;
+
+ case 'R':
+ if (opt_mode != 0 || opt_mode == 'x')
+ argserr(progname);
+
+ add_restarter(optarg);
+ break;
+
+ case 's':
+ case 'S':
+ if (opt_mode != 0)
+ argserr(progname);
+
+ add_sort_column(optarg, optopt == 'S');
+ break;
+
+ case 'd':
+ case 'D':
+ case 'l':
+ case 'x':
+ assert(opt_mode == optopt);
+ break;
+
+ case '?':
+ argserr(progname);
+ /* NOTREACHED */
+
+ default:
+ assert(0);
+ abort();
+ }
+ }
+
+ /*
+ * -a is only meaningful when given no arguments
+ */
+ if (show_all && optind != argc)
+ uu_warn(gettext("-a ignored when used with arguments.\n"));
+
+ h = scf_handle_create(SCF_VERSION);
+ if (h == NULL)
+ scfdie();
+
+ if (scf_handle_bind(h) == -1)
+ uu_die(gettext("Could not bind to repository server: %s. "
+ "Exiting.\n"), scf_strerror(scf_error()));
+
+ if ((g_pg = scf_pg_create(h)) == NULL ||
+ (g_prop = scf_property_create(h)) == NULL ||
+ (g_val = scf_value_create(h)) == NULL)
+ scfdie();
+
+ argc -= optind;
+ argv += optind;
+
+ /*
+ * If we're in long mode, take care of it now before we deal with the
+ * sorting and the columns, since we won't use them anyway.
+ */
+ if (opt_mode == 'l') {
+ if (argc == 0)
+ argserr(progname);
+
+ if ((err = scf_walk_fmri(h, argc, argv, SCF_WALK_MULTIPLE,
+ print_detailed, NULL, &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ return (exit_status);
+ }
+
+ if (opt_mode == 'x') {
+ explain(opt_verbose, argc, argv);
+
+ return (exit_status);
+ }
+
+
+ if (opt_snum == 0) {
+ /* Default sort. */
+ add_sort_column("state", 0);
+ add_sort_column("stime", 0);
+ add_sort_column("fmri", 0);
+ }
+
+ if (columns_str == NULL) {
+ if (!opt_verbose)
+ columns_str = safe_strdup("state,stime,fmri");
+ else
+ columns_str =
+ safe_strdup("state,nstate,stime,ctid,fmri");
+ }
+
+ /* Decode columns_str into opt_columns. */
+ line_sz = 0;
+
+ opt_cnum = 1;
+ for (cp = columns_str; *cp != '\0'; ++cp)
+ if (*cp == ',')
+ ++opt_cnum;
+
+ opt_columns = malloc(opt_cnum * sizeof (*opt_columns));
+ if (opt_columns == NULL)
+ uu_die(gettext("Too many columns.\n"));
+
+ for (n = 0; *columns_str != '\0'; ++n) {
+ i = getcolumnopt(&columns_str);
+ if (i == -1)
+ uu_die(gettext("Unknown column \"%s\".\n"),
+ columns_str);
+
+ if (strcmp(columns[i].name, "N") == 0 ||
+ strcmp(columns[i].name, "SN") == 0 ||
+ strcmp(columns[i].name, "NSTA") == 0 ||
+ strcmp(columns[i].name, "NSTATE") == 0)
+ opt_nstate_shown = 1;
+
+ opt_columns[n] = i;
+ line_sz += columns[i].width + 1;
+ }
+
+
+ if ((lines_pool = uu_avl_pool_create("lines_pool",
+ sizeof (struct avl_string), offsetof(struct avl_string, node),
+ line_cmp, UU_AVL_DEBUG)) == NULL ||
+ (lines = uu_avl_create(lines_pool, NULL, 0)) == NULL)
+ uu_die(gettext("Unexpected libuutil error: %s. Exiting.\n"),
+ uu_strerror(uu_error()));
+
+ switch (opt_mode) {
+ case 0:
+ ht_init();
+
+ /* Always show all FMRIs when given arguments or restarters */
+ if (argc != 0 || restarters != NULL)
+ show_all = 1;
+
+ if ((err = scf_walk_fmri(h, argc, argv,
+ SCF_WALK_MULTIPLE | SCF_WALK_LEGACY,
+ show_all ? list_instance : list_if_enabled, NULL,
+ &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+ break;
+
+ case 'd':
+ if (argc == 0)
+ argserr(progname);
+
+ if ((err = scf_walk_fmri(h, argc, argv,
+ SCF_WALK_MULTIPLE, list_dependencies, NULL,
+ &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+ break;
+
+ case 'D':
+ if (argc == 0)
+ argserr(progname);
+
+ provider_scope = safe_malloc(max_scf_fmri_length);
+ provider_svc = safe_malloc(max_scf_fmri_length);
+ provider_inst = safe_malloc(max_scf_fmri_length);
+
+ if ((err = scf_walk_fmri(h, argc, argv,
+ SCF_WALK_MULTIPLE | SCF_WALK_SERVICE,
+ list_dependents, NULL, &exit_status, uu_warn)) != 0) {
+ uu_warn(gettext("failed to iterate over "
+ "instances: %s\n"), scf_strerror(err));
+ exit_status = UU_EXIT_FATAL;
+ }
+
+ free(provider_scope);
+ free(provider_svc);
+ free(provider_inst);
+ break;
+
+ default:
+ assert(0);
+ abort();
+ }
+
+ if (show_header)
+ print_header();
+
+ (void) uu_avl_walk(lines, print_line, NULL, 0);
+
+ return (exit_status);
+}
diff --git a/usr/src/cmd/svc/svcs/svcs.h b/usr/src/cmd/svc/svcs/svcs.h
new file mode 100644
index 0000000000..5f0cb83360
--- /dev/null
+++ b/usr/src/cmd/svc/svcs/svcs.h
@@ -0,0 +1,66 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SVCS_H
+#define _SVCS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <libscf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern ssize_t max_scf_fmri_length;
+extern ssize_t max_scf_name_length;
+extern ssize_t max_scf_value_length;
+extern char *locale;
+extern int exit_status;
+
+#ifndef NDEBUG
+#define scfdie() do_scfdie(__FILE__, __LINE__)
+
+void do_scfdie(const char *, int);
+#else
+void scfdie(void);
+#endif
+
+void *safe_malloc(size_t);
+char *safe_strdup(const char *);
+
+int pg_get_single_val(scf_propertygroup_t *, const char *, scf_type_t, void *,
+ size_t, uint_t);
+int inst_get_single_val(scf_instance_t *, const char *, const char *,
+ scf_type_t, void *, size_t, uint_t, int, int);
+
+void explain(int, int, char **);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SVCS_H */