mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-15 16:07:11 +08:00
vendor: initial vendor
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
709
vendor/github.com/docker/cli/AUTHORS
generated
vendored
Normal file
709
vendor/github.com/docker/cli/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,709 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `scripts/docs/generate-authors.sh`.
|
||||
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron.L.Xu <likexu@harmonycloud.cn>
|
||||
Abdur Rehman <abdur_rehman@mentor.com>
|
||||
Abhinandan Prativadi <abhi@docker.com>
|
||||
Abin Shahab <ashahab@altiscale.com>
|
||||
Ace Tang <aceapril@126.com>
|
||||
Addam Hardy <addam.hardy@gmail.com>
|
||||
Adolfo Ochagavía <aochagavia92@gmail.com>
|
||||
Adrien Duermael <adrien@duermael.com>
|
||||
Adrien Folie <folie.adrien@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||
Aidan Feldman <aidan.feldman@gmail.com>
|
||||
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||
AJ Bowen <aj@gandi.net>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
Akim Demaille <akim.demaille@docker.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
||||
Alex Mayer <amayer5125@gmail.com>
|
||||
Alexander Boyd <alex@opengroove.org>
|
||||
Alexander Larsson <alexl@redhat.com>
|
||||
Alexander Morozov <lk4d4@docker.com>
|
||||
Alexander Ryabov <i@sepa.spb.ru>
|
||||
Alexandre González <agonzalezro@gmail.com>
|
||||
Alfred Landrum <alfred.landrum@docker.com>
|
||||
Alicia Lauerman <alicia@eta.im>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com>
|
||||
Alvin Deng <alvin.q.deng@utexas.edu>
|
||||
Amen Belayneh <amenbelayneh@gmail.com>
|
||||
Amir Goldstein <amir73il@aquasec.com>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Amit Shukla <amit.shukla@docker.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Anda Xu <anda.xu@docker.com>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andreas Köhler <andi5.py@gmx.net>
|
||||
Andrew France <andrew@avito.co.uk>
|
||||
Andrew Hsu <andrewhsu@docker.com>
|
||||
Andrew Macpherson <hopscotch23@gmail.com>
|
||||
Andrew McDonnell <bugs@andrewmcdonnell.net>
|
||||
Andrew Po <absourd.noise@gmail.com>
|
||||
Andrey Petrov <andrey.petrov@shazow.net>
|
||||
André Martins <aanm90@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Andy Rothfusz <github@developersupport.net>
|
||||
Anil Madhavapeddy <anil@recoil.org>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||
Anne Henmi <anne.henmi@docker.com>
|
||||
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||
Barnaby Gray <barnaby@pickle.me.uk>
|
||||
Bastiaan Bakker <bbakker@xebia.com>
|
||||
BastianHofmann <bastianhofmann@me.com>
|
||||
Ben Bonnefoy <frenchben@docker.com>
|
||||
Ben Creasy <ben@bencreasy.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||
Benoit Sigoure <tsunanet@gmail.com>
|
||||
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||
Bill Wang <ozbillwang@gmail.com>
|
||||
Bin Liu <liubin0329@gmail.com>
|
||||
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
Bogdan Anton <contact@bogdananton.ro>
|
||||
Boris Pruessmann <boris@pruessmann.org>
|
||||
Bradley Cicenas <bradley.cicenas@gmail.com>
|
||||
Brandon Mitchell <git@bmitch.net>
|
||||
Brandon Philips <brandon.philips@coreos.com>
|
||||
Brent Salisbury <brent.salisbury@docker.com>
|
||||
Bret Fisher <bret@bretfisher.com>
|
||||
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bryan Bess <squarejaw@bsbess.com>
|
||||
Bryan Boreham <bjboreham@gmail.com>
|
||||
Bryan Murphy <bmurphy1976@gmail.com>
|
||||
bryfry <bryon.fryer@gmail.com>
|
||||
Cameron Spear <cameronspear@gmail.com>
|
||||
Cao Weiwei <cao.weiwei30@zte.com.cn>
|
||||
Carlo Mion <mion00@gmail.com>
|
||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||
Ce Gao <ce.gao@outlook.com>
|
||||
Cedric Davies <cedricda@microsoft.com>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Chad Faragher <wyckster@hotmail.com>
|
||||
Chao Wang <wangchao.fnst@cn.fujitsu.com>
|
||||
Charles Chan <charleswhchan@users.noreply.github.com>
|
||||
Charles Law <claw@conduce.com>
|
||||
Charles Smith <charles.smith@docker.com>
|
||||
Charlie Drage <charlie@charliedrage.com>
|
||||
ChaYoung You <yousbe@gmail.com>
|
||||
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||
Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
|
||||
Chen Mingjie <chenmingjie0828@163.com>
|
||||
Chen Qiu <cheney-90@hotmail.com>
|
||||
Chris Gavin <chris@chrisgavin.me>
|
||||
Chris Gibson <chris@chrisg.io>
|
||||
Chris McKinnel <chrismckinnel@gmail.com>
|
||||
Chris Snow <chsnow123@gmail.com>
|
||||
Chris Weyl <cweyl@alumni.drew.edu>
|
||||
Christian Persson <saser@live.se>
|
||||
Christian Stefanescu <st.chris@gmail.com>
|
||||
Christophe Robin <crobin@nekoo.com>
|
||||
Christophe Vidal <kriss@krizalys.com>
|
||||
Christopher Biscardi <biscarch@sketcht.com>
|
||||
Christopher Crone <christopher.crone@docker.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com>
|
||||
Clinton Kitson <clintonskitson@gmail.com>
|
||||
Coenraad Loubser <coenraad@wish.org.za>
|
||||
Colin Hebert <hebert.colin@gmail.com>
|
||||
Collin Guarino <collin.guarino@gmail.com>
|
||||
Colm Hally <colmhally@gmail.com>
|
||||
Corey Farrell <git@cfware.com>
|
||||
Corey Quon <corey.quon@docker.com>
|
||||
Craig Wilhite <crwilhit@microsoft.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com>
|
||||
Dafydd Crosby <dtcrsby@gmail.com>
|
||||
dalanlan <dalanlan925@gmail.com>
|
||||
Damien Nadé <github@livna.org>
|
||||
Dan Cotora <dan@bluevision.ro>
|
||||
Daniel Dao <dqminh@cloudflare.com>
|
||||
Daniel Farrell <dfarrell@redhat.com>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Goosen <daniel.goosen@surveysampling.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Daniel Nephin <dnephin@docker.com>
|
||||
Daniel Norberg <dano@spotify.com>
|
||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||
Daniel Zhang <jmzwcn@gmail.com>
|
||||
Danny Berger <dpb587@gmail.com>
|
||||
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||
Darren Stahl <darst@microsoft.com>
|
||||
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||
Dave Goodchild <buddhamagnet@gmail.com>
|
||||
Dave Henderson <dhenderson@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
David Beitey <david@davidjb.com>
|
||||
David Calavera <david.calavera@gmail.com>
|
||||
David Cramer <davcrame@cisco.com>
|
||||
David Dooling <dooling@gmail.com>
|
||||
David Gageot <david@gageot.net>
|
||||
David Lechner <david@lechnology.com>
|
||||
David Sheets <dsheets@docker.com>
|
||||
David Williamson <david.williamson@docker.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
David Young <yangboh@cn.ibm.com>
|
||||
Deng Guangxing <dengguangxing@huawei.com>
|
||||
Denis Defreyne <denis@soundcloud.com>
|
||||
Denis Gladkikh <denis@gladkikh.email>
|
||||
Denis Ollier <larchunix@users.noreply.github.com>
|
||||
Dennis Docter <dennis@d23.nl>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Deshi Xiao <dxiao@redhat.com>
|
||||
Dharmit Shah <shahdharmit@gmail.com>
|
||||
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||
Dieter Reuter <dieter.reuter@me.com>
|
||||
Dima Stopel <dima@twistlock.com>
|
||||
Dimitry Andric <d.andric@activevideo.com>
|
||||
Ding Fei <dingfei@stars.org.cn>
|
||||
Diogo Monica <diogo@docker.com>
|
||||
Dmitry Gusev <dmitry.gusev@gmail.com>
|
||||
Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||
Don Kjer <don.kjer@gmail.com>
|
||||
Dong Chen <dongluo.chen@docker.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Drew Erny <drew.erny@docker.com>
|
||||
Ed Costello <epc@epcostello.com>
|
||||
Elango Sivanandam <elango.siva@docker.com>
|
||||
Eli Uriegas <eli.uriegas@docker.com>
|
||||
Eli Uriegas <seemethere101@gmail.com>
|
||||
Elias Faxö <elias.faxo@tre.se>
|
||||
Elliot Luo <956941328@qq.com>
|
||||
Eric Curtin <ericcurtin17@gmail.com>
|
||||
Eric G. Noriega <enoriega@vizuri.com>
|
||||
Eric Rosenberg <ehaydenr@gmail.com>
|
||||
Eric Sage <eric.david.sage@gmail.com>
|
||||
Eric-Olivier Lamey <eo@lamey.me>
|
||||
Erica Windisch <erica@windisch.us>
|
||||
Erik Hollensbe <github@hollensbe.org>
|
||||
Erik St. Martin <alakriti@gmail.com>
|
||||
Essam A. Hassan <es.hassan187@gmail.com>
|
||||
Ethan Haynes <ethanhaynes@alumni.harvard.edu>
|
||||
Euan Kemp <euank@euank.com>
|
||||
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||
Evan Allrich <evan@unguku.com>
|
||||
Evan Hazlett <ejhazlett@gmail.com>
|
||||
Evan Krall <krall@yelp.com>
|
||||
Evelyn Xu <evelynhsu21@gmail.com>
|
||||
Everett Toews <everett.toews@rackspace.com>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabrizio Soppelsa <fsoppelsa@mirantis.com>
|
||||
Felix Hupfeld <felix@quobyte.com>
|
||||
Felix Rabe <felix@rabe.io>
|
||||
Filip Jareš <filipjares@gmail.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
Florian Klein <florian.klein@free.fr>
|
||||
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||
François Scala <francois.scala@swiss-as.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederic Hemberger <mail@frederic-hemberger.de>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
|
||||
Frieder Bluemle <frieder.bluemle@gmail.com>
|
||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||
Gaetan de Villele <gdevillele@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com>
|
||||
Gary Schaetz <gary@schaetzkc.com>
|
||||
Genki Takiuchi <genki@s21g.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Gianluca Borello <g.borello@gmail.com>
|
||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||
Gou Rao <gou@portworx.com>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||
Guilhem Lettron <guilhem+github@lettron.fr>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||
Guillaume Le Floch <glfloch@gmail.com>
|
||||
gwx296173 <gaojing3@huawei.com>
|
||||
Günther Jungbluth <gunther@gameslabs.net>
|
||||
Hakan Özler <hakan.ozler@kodcu.com>
|
||||
Hao Zhang <21521210@zju.edu.cn>
|
||||
Harald Albers <github@albersweb.de>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harry Zhang <harryz@hyper.sh>
|
||||
He Simei <hesimei@zju.edu.cn>
|
||||
Helen Xie <chenjg@harmonycloud.cn>
|
||||
Henning Sprang <henning.sprang@gmail.com>
|
||||
Henry N <henrynmail-github@yahoo.de>
|
||||
Hernan Garcia <hernandanielg@gmail.com>
|
||||
Hongbin Lu <hongbin034@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
huqun <huqun@zju.edu.cn>
|
||||
Huu Nguyen <huu@prismskylabs.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
Ian Campbell <ian.campbell@docker.com>
|
||||
Ian Philpot <ian.philpot@microsoft.com>
|
||||
Ignacio Capurro <icapurrofagian@gmail.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
Ilya Sotkov <ilya@sotkov.com>
|
||||
Ioan Eugen Stan <eu@ieugen.ro>
|
||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||
Ivan Grcic <igrcic@gmail.com>
|
||||
Ivan Markin <sw@nogoegst.net>
|
||||
Jacob Atzen <jacob@jacobatzen.dk>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||
Jake Lambert <jake.lambert@volusion.com>
|
||||
Jake Sanders <jsand@google.com>
|
||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
Jamie Hannaford <jamie@limetree.org>
|
||||
Jan Koprowski <jan.koprowski@gmail.com>
|
||||
Jan Pazdziora <jpazdziora@redhat.com>
|
||||
Jan-Jaap Driessen <janjaapdriessen@gmail.com>
|
||||
Jana Radhakrishnan <mrjana@docker.com>
|
||||
Jared Hocutt <jaredh@netapp.com>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason Heiss <jheiss@aput.net>
|
||||
Jason Plum <jplum@devonit.com>
|
||||
Jay Kamat <github@jgkamat.33mail.com>
|
||||
Jean Rouge <rougej+github@gmail.com>
|
||||
Jean-Christophe Sirot <jean-christophe.sirot@docker.com>
|
||||
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
||||
Jeff Lindsay <progrium@gmail.com>
|
||||
Jeff Nickoloff <jeff.nickoloff@gmail.com>
|
||||
Jeff Silberman <jsilberm@gmail.com>
|
||||
Jeremy Chambers <jeremy@thehipbot.com>
|
||||
Jeremy Unruh <jeremybunruh@gmail.com>
|
||||
Jeremy Yallop <yallop@docker.com>
|
||||
Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jesse Adametz <jesseadametz@gmail.com>
|
||||
Jessica Frazelle <jessfraz@google.com>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jimmy Leger <jimmy.leger@gmail.com>
|
||||
Jimmy Song <rootsongjc@gmail.com>
|
||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
Joe Doliner <jdoliner@pachyderm.io>
|
||||
Joe Gordon <joe.gordon0@gmail.com>
|
||||
Joel Handwell <joelhandwell@gmail.com>
|
||||
Joey Geiger <jgeiger@gmail.com>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||
John Feminella <jxf@jxf.me>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Laswell <john.n.laswell@gmail.com>
|
||||
John Maguire <jmaguire@duosecurity.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
John Stephens <johnstep@docker.com>
|
||||
John Tims <john.k.tims@gmail.com>
|
||||
John V. Martinez <jvmatl@gmail.com>
|
||||
John Willis <john.willis@docker.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jonathan Lee <jonjohn1232009@gmail.com>
|
||||
Jonathan Lomas <jonathan@floatinglomas.ca>
|
||||
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||
Jonh Wendell <jonh.wendell@redhat.com>
|
||||
Jordan Jennings <jjn2009@gmail.com>
|
||||
Joseph Kern <jkern@semafour.net>
|
||||
Josh Bodah <jb3689@yahoo.com>
|
||||
Josh Chorlton <jchorlton@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Josh Horwitz <horwitz@addthis.com>
|
||||
Josh Soref <jsoref@gmail.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Julien Kassar <github@kassisol.com>
|
||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||
Justyn Temme <justyntemme@gmail.com>
|
||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||
Jérémie Drouet <jeremie.drouet@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||
Jörg Thalheim <joerg@higgsboson.tk>
|
||||
Kai Blin <kai@samba.org>
|
||||
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
|
||||
Kara Alexandra <kalexandra@us.ibm.com>
|
||||
Kareem Khazem <karkhaz@karkhaz.com>
|
||||
Karthik Nayak <Karthik.188@gmail.com>
|
||||
Kat Samperi <kat.samperi@gmail.com>
|
||||
Katie McLaughlin <katie@glasnt.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Kei Ohmura <ohmura.kei@gmail.com>
|
||||
Keith Hudgins <greenman@greenman.org>
|
||||
Ken Cochrane <kencochrane@gmail.com>
|
||||
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kevin Burke <kev@inburke.com>
|
||||
Kevin Feyrer <kevin.feyrer@btinternet.com>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
|
||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||
Kevin Richardson <kevin@kevinrichardson.co>
|
||||
khaled souf <khaled.souf@gmail.com>
|
||||
Kim Eik <kim@heldig.org>
|
||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
||||
Krasi Georgiev <krasi@vip-consult.solutions>
|
||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||
Kun Zhang <zkazure@gmail.com>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||
Kyle Spiers <kyle@spiers.me>
|
||||
Lachlan Cooper <lachlancooper@gmail.com>
|
||||
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||
Lars Kellogg-Stedman <lars@redhat.com>
|
||||
Laura Frank <ljfrank@gmail.com>
|
||||
Laurent Erignoux <lerignoux@gmail.com>
|
||||
Lee Gaines <eightlimbed@gmail.com>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
Lennie <github@consolejunkie.net>
|
||||
Leo Gallucci <elgalu3@gmail.com>
|
||||
Lewis Daly <lewisdaly@me.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Li Yi <weiyuan.yl@alibaba-inc.com>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Lifubang <lifubang@acmcoder.com>
|
||||
Lihua Tang <lhtang@alauda.io>
|
||||
Lily Guo <lily.guo@docker.com>
|
||||
Lin Lu <doraalin@163.com>
|
||||
Linus Heckemann <lheckemann@twig-world.com>
|
||||
Liping Xue <lipingxue@gmail.com>
|
||||
Liron Levin <liron@twistlock.com>
|
||||
liwenqi <vikilwq@zju.edu.cn>
|
||||
lixiaobing10051267 <li.xiaobing1@zte.com.cn>
|
||||
Lloyd Dewolf <foolswisdom@gmail.com>
|
||||
Lorenzo Fontana <lo@linux.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
Lucas Chan <lucas-github@lucaschan.com>
|
||||
Luka Hartwig <mail@lukahartwig.de>
|
||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||
Lydell Manganti <LydellManganti@users.noreply.github.com>
|
||||
Lénaïc Huard <lhuard@amadeus.com>
|
||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||
Mabin <bin.ma@huawei.com>
|
||||
Madhav Puri <madhav.puri@gmail.com>
|
||||
Madhu Venugopal <madhu@socketplane.io>
|
||||
Malte Janduda <mail@janduda.net>
|
||||
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
||||
Mansi Nahar <mmn4185@rit.edu>
|
||||
mapk0y <mapk0y@gmail.com>
|
||||
Marc Bihlmaier <marc.bihlmaier@reddoxx.com>
|
||||
Marco Mariani <marco.mariani@alterway.fr>
|
||||
Marco Vedovati <mvedovati@suse.com>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Marianna Tessel <mtesselh@gmail.com>
|
||||
Marius Sturm <marius@graylog.com>
|
||||
Mark Oates <fl0yd@me.com>
|
||||
Marsh Macy <marsma@microsoft.com>
|
||||
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
||||
Mary Anthony <mary.anthony@docker.com>
|
||||
Mason Fish <mason.fish@docker.com>
|
||||
Mason Malone <mason.malone@gmail.com>
|
||||
Mateusz Major <apkd@users.noreply.github.com>
|
||||
Mathieu Champlon <mathieu.champlon@docker.com>
|
||||
Matt Gucci <matt9ucci@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matteo Orefice <matteo.orefice@bites4bits.software>
|
||||
Matthew Heon <mheon@redhat.com>
|
||||
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||
Mauro Porras P <mauroporrasp@gmail.com>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
Maxime Petazzoni <max@signalfuse.com>
|
||||
Mei ChunTao <mei.chuntao@zte.com.cn>
|
||||
Micah Zoltu <micah@newrelic.com>
|
||||
Michael A. Smith <michael@smith-li.com>
|
||||
Michael Bridgen <mikeb@squaremobius.net>
|
||||
Michael Crosby <michael@docker.com>
|
||||
Michael Friis <friism@gmail.com>
|
||||
Michael Irwin <mikesir87@gmail.com>
|
||||
Michael Käufl <docker@c.michael-kaeufl.de>
|
||||
Michael Prokop <github@michael-prokop.at>
|
||||
Michael Scharf <github@scharf.gr>
|
||||
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||
Michael Steinert <mike.steinert@gmail.com>
|
||||
Michael West <mwest@mdsol.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Michał Czeraszkiewicz <czerasz@gmail.com>
|
||||
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com>
|
||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Mike Casas <mkcsas0@gmail.com>
|
||||
Mike Danese <mikedanese@google.com>
|
||||
Mike Dillon <mike@embody.org>
|
||||
Mike Goelzer <mike.goelzer@docker.com>
|
||||
Mike MacCana <mike.maccana@gmail.com>
|
||||
mikelinjie <294893458@qq.com>
|
||||
Mikhail Vasin <vasin@cloud-tv.ru>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Mindaugas Rukas <momomg@gmail.com>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Mohammad Banikazemi <mb@us.ibm.com>
|
||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||
Mohini Anne Dsouza <mohini3917@gmail.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
muicoder <muicoder@gmail.com>
|
||||
Muthukumar R <muthur@gmail.com>
|
||||
Máximo Cuadros <mcuadros@gmail.com>
|
||||
Nace Oroz <orkica@gmail.com>
|
||||
Nahum Shalman <nshalman@omniti.com>
|
||||
Nalin Dahyabhai <nalin@redhat.com>
|
||||
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
|
||||
Natalie Parker <nparker@omnifone.com>
|
||||
Nate Brennand <nate.brennand@clever.com>
|
||||
Nathan Hsieh <hsieh.nathan@gmail.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com>
|
||||
Nathan McCauley <nathan.mccauley@docker.com>
|
||||
Neil Peterson <neilpeterson@outlook.com>
|
||||
Nick Adcock <nick.adcock@docker.com>
|
||||
Nico Stapelbroek <nstapelbroek@gmail.com>
|
||||
Nicola Kabar <nicolaka@gmail.com>
|
||||
Nicolas Borboën <ponsfrilus@gmail.com>
|
||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||
Nikolay Milovanov <nmil@itransformers.net>
|
||||
Nir Soffer <nsoffer@redhat.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
ohmystack <jun.jiang02@ele.me>
|
||||
Olle Jonsson <olle.jonsson@gmail.com>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||
Otto Kekäläinen <otto@seravo.fi>
|
||||
Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick Lang <plang@microsoft.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
Paul Kehrer <paul.l.kehrer@gmail.com>
|
||||
Paul Lietar <paul@lietar.net>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Pavel Pospisil <pospispa@gmail.com>
|
||||
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
|
||||
Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
|
||||
Per Lundberg <per.lundberg@ecraft.com>
|
||||
Peter Edge <peter.edge@gmail.com>
|
||||
Peter Hsu <shhsu@microsoft.com>
|
||||
Peter Jaffe <pjaffe@nevo.com>
|
||||
Peter Kehl <peter.kehl@gmail.com>
|
||||
Peter Nagy <xificurC@gmail.com>
|
||||
Peter Salvatore <peter@psftw.com>
|
||||
Peter Waller <p@pwaller.net>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Philip Alexander Etling <paetling@gmail.com>
|
||||
Philipp Gillé <philipp.gille@gmail.com>
|
||||
Philipp Schmied <pschmied@schutzwerk.com>
|
||||
pidster <pid@pidster.com>
|
||||
pixelistik <pixelistik@users.noreply.github.com>
|
||||
Pratik Karki <prertik@outlook.com>
|
||||
Prayag Verma <prayag.verma@gmail.com>
|
||||
Preston Cowley <preston.cowley@sony.com>
|
||||
Pure White <daniel48@126.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
||||
qudongfang <qudongfang@gmail.com>
|
||||
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||
Ray Tsang <rayt@google.com>
|
||||
Reficul <xuzhenglun@gmail.com>
|
||||
Remy Suen <remy.suen@gmail.com>
|
||||
Renaud Gaubert <rgaubert@nvidia.com>
|
||||
Ricardo N Feliciano <FelicianoTech@gmail.com>
|
||||
Rich Moyse <rich@moyse.us>
|
||||
Richard Mathie <richard.mathie@amey.co.uk>
|
||||
Richard Scothern <richard.scothern@gmail.com>
|
||||
Rick Wieman <git@rickw.nl>
|
||||
Ritesh H Shukla <sritesh@vmware.com>
|
||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||
Robert Wallis <smilingrob@gmail.com>
|
||||
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||
Robin Speekenbrink <robin@kingsquare.nl>
|
||||
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
||||
Rogelio Canedo <rcanedo@mappy.priv>
|
||||
Roland Kammerer <roland.kammerer@linbit.com>
|
||||
Roman Dudin <katrmr@gmail.com>
|
||||
Rory Hunter <roryhunter2@gmail.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Rubens Figueiredo <r.figueiredo.52@gmail.com>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Ryan Belgrave <rmb1993@gmail.com>
|
||||
Ryan Detzel <ryan.detzel@gmail.com>
|
||||
Ryan Stelly <ryan.stelly@live.com>
|
||||
Ryan Wilson-Perkin <ryanwilsonperkin@gmail.com>
|
||||
Ryan Zhang <ryan.zhang@docker.com>
|
||||
Sainath Grandhi <sainath.grandhi@intel.com>
|
||||
Sakeven Jiang <jc5930@sina.cn>
|
||||
Sally O'Malley <somalley@redhat.com>
|
||||
Sam Neirinck <sam@samneirinck.com>
|
||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||
Sami Tabet <salph.tabet@gmail.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
Santhosh Manohar <santhosh@docker.com>
|
||||
Scott Brenner <scott@scottbrenner.me>
|
||||
Scott Collier <emailscottcollier@gmail.com>
|
||||
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||
Sean Rodman <srodman7689@gmail.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sergey Tryuber <Sergeant007@users.noreply.github.com>
|
||||
Serhat Gülçiçek <serhat25@gmail.com>
|
||||
Sevki Hasirci <s@sevki.org>
|
||||
Shaun Kaasten <shaunk@gmail.com>
|
||||
Sheng Yang <sheng@yasker.org>
|
||||
Shijiang Wei <mountkin@gmail.com>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
Shoubhik Bose <sbose78@gmail.com>
|
||||
Shukui Yang <yangshukui@huawei.com>
|
||||
Sian Lerk Lau <kiawin@gmail.com>
|
||||
Sidhartha Mani <sidharthamn@gmail.com>
|
||||
sidharthamani <sid@rancher.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Simei He <hesimei@zju.edu.cn>
|
||||
Simon Ferquel <simon.ferquel@docker.com>
|
||||
Sindhu S <sindhus@live.in>
|
||||
Slava Semushin <semushin@redhat.com>
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
Song Gao <song@gao.io>
|
||||
Spencer Brown <spencer@spencerbrown.org>
|
||||
squeegels <1674195+squeegels@users.noreply.github.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <scherer_stefan@icloud.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephane Jeandeaux <stephane.jeandeaux@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Rust <srust@blockbridge.com>
|
||||
Steve Durrheimer <s.durrheimer@gmail.com>
|
||||
Steve Richards <steve.richards@docker.com>
|
||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sungwon Han <sungwon.han@navercorp.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Sébastien HOUZÉ <cto@verylastroom.com>
|
||||
T K Sourabh <sourabhtk37@gmail.com>
|
||||
TAGOMORI Satoshi <tagomoris@gmail.com>
|
||||
taiji-tech <csuhqg@foxmail.com>
|
||||
Taylor Jones <monitorjbl@gmail.com>
|
||||
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thomas Gazagnaire <thomas@gazagnaire.org>
|
||||
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||
Thomas Leonard <thomas.leonard@docker.com>
|
||||
Thomas Léveil <thomasleveil@gmail.com>
|
||||
Thomas Riccardi <thomas@deepomatic.com>
|
||||
Thomas Swift <tgs242@gmail.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tianyi Wang <capkurmagati@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||
Tim Hockin <thockin@google.com>
|
||||
Tim Smith <timbot@google.com>
|
||||
Tim Waugh <twaugh@redhat.com>
|
||||
Tim Wraight <tim.wraight@tangentlabs.co.uk>
|
||||
timfeirg <kkcocogogo@gmail.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
Tobias Bradtke <webwurst@gmail.com>
|
||||
Tobias Gesellchen <tobias@gesellix.de>
|
||||
Todd Whiteman <todd.whiteman@joyent.com>
|
||||
Tom Denham <tom@tomdee.co.uk>
|
||||
Tom Fotherby <tom+github@peopleperhour.com>
|
||||
Tom Klingenberg <tklingenberg@lastflood.net>
|
||||
Tom Milligan <code@tommilligan.net>
|
||||
Tom X. Tobin <tomxtobin@tomxtobin.com>
|
||||
Tomas Tomecek <ttomecek@redhat.com>
|
||||
Tomasz Kopczynski <tomek@kopczynski.net.pl>
|
||||
Tomáš Hrčka <thrcka@redhat.com>
|
||||
Tony Abboud <tdabboud@hotmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Trapier Marshall <trapier.marshall@docker.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tristan Carel <tristan@cogniteev.com>
|
||||
Tycho Andersen <tycho@docker.com>
|
||||
Tycho Andersen <tycho@tycho.ws>
|
||||
uhayate <uhayate.gong@daocloud.io>
|
||||
Ulysses Souza <ulysses.souza@docker.com>
|
||||
Umesh Yadav <umesh4257@gmail.com>
|
||||
Valentin Lorentz <progval+git@progval.net>
|
||||
Veres Lajos <vlajos@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Viktor Stanchev <me@viktorstanchev.com>
|
||||
Vimal Raghubir <vraghubir0418@gmail.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||
Vincent Demeester <vincent.demeester@docker.com>
|
||||
Vincent Woo <me@vincentwoo.com>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
Vivek Goyal <vgoyal@redhat.com>
|
||||
Wang Jie <wangjie5@chinaskycloud.com>
|
||||
Wang Lei <wanglei@tenxcloud.com>
|
||||
Wang Long <long.wanglong@huawei.com>
|
||||
Wang Ping <present.wp@icloud.com>
|
||||
Wang Xing <hzwangxing@corp.netease.com>
|
||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||
Wayne Song <wsong@docker.com>
|
||||
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
William Henry <whenry@redhat.com>
|
||||
Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoxi He <xxhe@alauda.io>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
Yan Feng <yanfeng2@huawei.com>
|
||||
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||
Yassine Tijani <yasstij11@gmail.com>
|
||||
Yi EungJun <eungjun.yi@navercorp.com>
|
||||
Ying Li <ying.li@docker.com>
|
||||
Yong Tang <yong.tang.github@outlook.com>
|
||||
Yosef Fertel <yfertel@gmail.com>
|
||||
Yu Peng <yu.peng36@zte.com.cn>
|
||||
Yuan Sun <sunyuan3@huawei.com>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||
Zachary Romero <zacromero3@gmail.com>
|
||||
zebrilee <zebrilee@gmail.com>
|
||||
Zhang Kun <zkazure@gmail.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
Zhang Wentao <zhangwentao234@huawei.com>
|
||||
ZhangHang <stevezhang2014@gmail.com>
|
||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||
Álex González <agonzalezro@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
Átila Camurça Alves <camurca.home@gmail.com>
|
||||
徐俊杰 <paco.xu@daocloud.io>
|
191
vendor/github.com/docker/cli/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/cli/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2017 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
19
vendor/github.com/docker/cli/NOTICE
generated
vendored
Normal file
19
vendor/github.com/docker/cli/NOTICE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Docker
|
||||
Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
23
vendor/github.com/docker/cli/cli-plugins/manager/candidate.go
generated
vendored
Normal file
23
vendor/github.com/docker/cli/cli-plugins/manager/candidate.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Candidate represents a possible plugin candidate, for mocking purposes
|
||||
type Candidate interface {
|
||||
Path() string
|
||||
Metadata() ([]byte, error)
|
||||
}
|
||||
|
||||
type candidate struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (c *candidate) Path() string {
|
||||
return c.path
|
||||
}
|
||||
|
||||
func (c *candidate) Metadata() ([]byte, error) {
|
||||
return exec.Command(c.path, MetadataSubcommandName).Output()
|
||||
}
|
60
vendor/github.com/docker/cli/cli-plugins/manager/cobra.go
generated
vendored
Normal file
60
vendor/github.com/docker/cli/cli-plugins/manager/cobra.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
// CommandAnnotationPlugin is added to every stub command added by
|
||||
// AddPluginCommandStubs with the value "true" and so can be
|
||||
// used to distinguish plugin stubs from regular commands.
|
||||
CommandAnnotationPlugin = "com.docker.cli.plugin"
|
||||
|
||||
// CommandAnnotationPluginVendor is added to every stub command
|
||||
// added by AddPluginCommandStubs and contains the vendor of
|
||||
// that plugin.
|
||||
CommandAnnotationPluginVendor = "com.docker.cli.plugin.vendor"
|
||||
|
||||
// CommandAnnotationPluginVersion is added to every stub command
|
||||
// added by AddPluginCommandStubs and contains the version of
|
||||
// that plugin.
|
||||
CommandAnnotationPluginVersion = "com.docker.cli.plugin.version"
|
||||
|
||||
// CommandAnnotationPluginInvalid is added to any stub command
|
||||
// added by AddPluginCommandStubs for an invalid command (that
|
||||
// is, one which failed it's candidate test) and contains the
|
||||
// reason for the failure.
|
||||
CommandAnnotationPluginInvalid = "com.docker.cli.plugin-invalid"
|
||||
)
|
||||
|
||||
// AddPluginCommandStubs adds a stub cobra.Commands for each valid and invalid
|
||||
// plugin. The command stubs will have several annotations added, see
|
||||
// `CommandAnnotationPlugin*`.
|
||||
func AddPluginCommandStubs(dockerCli command.Cli, cmd *cobra.Command) error {
|
||||
plugins, err := ListPlugins(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range plugins {
|
||||
vendor := p.Vendor
|
||||
if vendor == "" {
|
||||
vendor = "unknown"
|
||||
}
|
||||
annotations := map[string]string{
|
||||
CommandAnnotationPlugin: "true",
|
||||
CommandAnnotationPluginVendor: vendor,
|
||||
CommandAnnotationPluginVersion: p.Version,
|
||||
}
|
||||
if p.Err != nil {
|
||||
annotations[CommandAnnotationPluginInvalid] = p.Err.Error()
|
||||
}
|
||||
cmd.AddCommand(&cobra.Command{
|
||||
Use: p.Name,
|
||||
Short: p.ShortDescription,
|
||||
Run: func(_ *cobra.Command, _ []string) {},
|
||||
Annotations: annotations,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
43
vendor/github.com/docker/cli/cli-plugins/manager/error.go
generated
vendored
Normal file
43
vendor/github.com/docker/cli/cli-plugins/manager/error.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// pluginError is set as Plugin.Err by NewPlugin if the plugin
|
||||
// candidate fails one of the candidate tests. This exists primarily
|
||||
// to implement encoding.TextMarshaller such that rendering a plugin as JSON
|
||||
// (e.g. for `docker info -f '{{json .CLIPlugins}}'`) renders the Err
|
||||
// field as a useful string and not just `{}`. See
|
||||
// https://github.com/golang/go/issues/10748 for some discussion
|
||||
// around why the builtin error type doesn't implement this.
|
||||
type pluginError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
// Error satisfies the core error interface for pluginError.
|
||||
func (e *pluginError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
// Cause satisfies the errors.causer interface for pluginError.
|
||||
func (e *pluginError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// MarshalText marshalls the pluginError into a textual form.
|
||||
func (e *pluginError) MarshalText() (text []byte, err error) {
|
||||
return []byte(e.cause.Error()), nil
|
||||
}
|
||||
|
||||
// wrapAsPluginError wraps an error in a pluginError with an
|
||||
// additional message, analogous to errors.Wrapf.
|
||||
func wrapAsPluginError(err error, msg string) error {
|
||||
return &pluginError{cause: errors.Wrap(err, msg)}
|
||||
}
|
||||
|
||||
// NewPluginError creates a new pluginError, analogous to
|
||||
// errors.Errorf.
|
||||
func NewPluginError(msg string, args ...interface{}) error {
|
||||
return &pluginError{cause: errors.Errorf(msg, args...)}
|
||||
}
|
185
vendor/github.com/docker/cli/cli-plugins/manager/manager.go
generated
vendored
Normal file
185
vendor/github.com/docker/cli/cli-plugins/manager/manager.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// ReexecEnvvar is the name of an ennvar which is set to the command
|
||||
// used to originally invoke the docker CLI when executing a
|
||||
// plugin. Assuming $PATH and $CWD remain unchanged this should allow
|
||||
// the plugin to re-execute the original CLI.
|
||||
const ReexecEnvvar = "DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"
|
||||
|
||||
// errPluginNotFound is the error returned when a plugin could not be found.
|
||||
type errPluginNotFound string
|
||||
|
||||
func (e errPluginNotFound) NotFound() {}
|
||||
|
||||
func (e errPluginNotFound) Error() string {
|
||||
return "Error: No such CLI plugin: " + string(e)
|
||||
}
|
||||
|
||||
type notFound interface{ NotFound() }
|
||||
|
||||
// IsNotFound is true if the given error is due to a plugin not being found.
|
||||
func IsNotFound(err error) bool {
|
||||
_, ok := err.(notFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
func getPluginDirs(dockerCli command.Cli) ([]string, error) {
|
||||
var pluginDirs []string
|
||||
|
||||
if cfg := dockerCli.ConfigFile(); cfg != nil {
|
||||
pluginDirs = append(pluginDirs, cfg.CLIPluginsExtraDirs...)
|
||||
}
|
||||
pluginDir, err := config.Path("cli-plugins")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pluginDirs = append(pluginDirs, pluginDir)
|
||||
pluginDirs = append(pluginDirs, defaultSystemPluginDirs...)
|
||||
return pluginDirs, nil
|
||||
}
|
||||
|
||||
func addPluginCandidatesFromDir(res map[string][]string, d string) error {
|
||||
dentries, err := ioutil.ReadDir(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dentry := range dentries {
|
||||
switch dentry.Mode() & os.ModeType {
|
||||
case 0, os.ModeSymlink:
|
||||
// Regular file or symlink, keep going
|
||||
default:
|
||||
// Something else, ignore.
|
||||
continue
|
||||
}
|
||||
name := dentry.Name()
|
||||
if !strings.HasPrefix(name, NamePrefix) {
|
||||
continue
|
||||
}
|
||||
name = strings.TrimPrefix(name, NamePrefix)
|
||||
var err error
|
||||
if name, err = trimExeSuffix(name); err != nil {
|
||||
continue
|
||||
}
|
||||
res[name] = append(res[name], filepath.Join(d, dentry.Name()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listPluginCandidates returns a map from plugin name to the list of (unvalidated) Candidates. The list is in descending order of priority.
|
||||
func listPluginCandidates(dirs []string) (map[string][]string, error) {
|
||||
result := make(map[string][]string)
|
||||
for _, d := range dirs {
|
||||
// Silently ignore any directories which we cannot
|
||||
// Stat (e.g. due to permissions or anything else) or
|
||||
// which is not a directory.
|
||||
if fi, err := os.Stat(d); err != nil || !fi.IsDir() {
|
||||
continue
|
||||
}
|
||||
if err := addPluginCandidatesFromDir(result, d); err != nil {
|
||||
// Silently ignore paths which don't exist.
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err // Or return partial result?
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListPlugins produces a list of the plugins available on the system
|
||||
func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {
|
||||
pluginDirs, err := getPluginDirs(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
candidates, err := listPluginCandidates(pluginDirs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var plugins []Plugin
|
||||
for _, paths := range candidates {
|
||||
if len(paths) == 0 {
|
||||
continue
|
||||
}
|
||||
c := &candidate{paths[0]}
|
||||
p, err := newPlugin(c, rootcmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.ShadowedPaths = paths[1:]
|
||||
plugins = append(plugins, p)
|
||||
}
|
||||
|
||||
return plugins, nil
|
||||
}
|
||||
|
||||
// PluginRunCommand returns an "os/exec".Cmd which when .Run() will execute the named plugin.
|
||||
// The rootcmd argument is referenced to determine the set of builtin commands in order to detect conficts.
|
||||
// The error returned satisfies the IsNotFound() predicate if no plugin was found or if the first candidate plugin was invalid somehow.
|
||||
func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command) (*exec.Cmd, error) {
|
||||
// This uses the full original args, not the args which may
|
||||
// have been provided by cobra to our caller. This is because
|
||||
// they lack e.g. global options which we must propagate here.
|
||||
args := os.Args[1:]
|
||||
if !pluginNameRe.MatchString(name) {
|
||||
// We treat this as "not found" so that callers will
|
||||
// fallback to their "invalid" command path.
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
||||
exename := addExeSuffix(NamePrefix + name)
|
||||
pluginDirs, err := getPluginDirs(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range pluginDirs {
|
||||
path := filepath.Join(d, exename)
|
||||
|
||||
// We stat here rather than letting the exec tell us
|
||||
// ENOENT because the latter does not distinguish a
|
||||
// file not existing from its dynamic loader or one of
|
||||
// its libraries not existing.
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
c := &candidate{path: path}
|
||||
plugin, err := newPlugin(c, rootcmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if plugin.Err != nil {
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
||||
cmd := exec.Command(plugin.Path, args...)
|
||||
// Using dockerCli.{In,Out,Err}() here results in a hang until something is input.
|
||||
// See: - https://github.com/golang/go/issues/10338
|
||||
// - https://github.com/golang/go/commit/d000e8742a173aa0659584aa01b7ba2834ba28ab
|
||||
// os.Stdin is a *os.File which avoids this behaviour. We don't need the functionality
|
||||
// of the wrappers here anyway.
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, ReexecEnvvar+"="+os.Args[0])
|
||||
|
||||
return cmd, nil
|
||||
}
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
8
vendor/github.com/docker/cli/cli-plugins/manager/manager_unix.go
generated
vendored
Normal file
8
vendor/github.com/docker/cli/cli-plugins/manager/manager_unix.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// +build !windows
|
||||
|
||||
package manager
|
||||
|
||||
var defaultSystemPluginDirs = []string{
|
||||
"/usr/local/lib/docker/cli-plugins", "/usr/local/libexec/docker/cli-plugins",
|
||||
"/usr/lib/docker/cli-plugins", "/usr/libexec/docker/cli-plugins",
|
||||
}
|
11
vendor/github.com/docker/cli/cli-plugins/manager/manager_windows.go
generated
vendored
Normal file
11
vendor/github.com/docker/cli/cli-plugins/manager/manager_windows.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var defaultSystemPluginDirs = []string{
|
||||
filepath.Join(os.Getenv("ProgramData"), "Docker", "cli-plugins"),
|
||||
filepath.Join(os.Getenv("ProgramFiles"), "Docker", "cli-plugins"),
|
||||
}
|
25
vendor/github.com/docker/cli/cli-plugins/manager/metadata.go
generated
vendored
Normal file
25
vendor/github.com/docker/cli/cli-plugins/manager/metadata.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package manager
|
||||
|
||||
const (
|
||||
// NamePrefix is the prefix required on all plugin binary names
|
||||
NamePrefix = "docker-"
|
||||
|
||||
// MetadataSubcommandName is the name of the plugin subcommand
|
||||
// which must be supported by every plugin and returns the
|
||||
// plugin metadata.
|
||||
MetadataSubcommandName = "docker-cli-plugin-metadata"
|
||||
)
|
||||
|
||||
// Metadata provided by the plugin. See docs/extend/cli_plugins.md for canonical information.
|
||||
type Metadata struct {
|
||||
// SchemaVersion describes the version of this struct. Mandatory, must be "0.1.0"
|
||||
SchemaVersion string `json:",omitempty"`
|
||||
// Vendor is the name of the plugin vendor. Mandatory
|
||||
Vendor string `json:",omitempty"`
|
||||
// Version is the optional version of this plugin.
|
||||
Version string `json:",omitempty"`
|
||||
// ShortDescription should be suitable for a single line help message.
|
||||
ShortDescription string `json:",omitempty"`
|
||||
// URL is a pointer to the plugin's homepage.
|
||||
URL string `json:",omitempty"`
|
||||
}
|
107
vendor/github.com/docker/cli/cli-plugins/manager/plugin.go
generated
vendored
Normal file
107
vendor/github.com/docker/cli/cli-plugins/manager/plugin.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
pluginNameRe = regexp.MustCompile("^[a-z][a-z0-9]*$")
|
||||
)
|
||||
|
||||
// Plugin represents a potential plugin with all it's metadata.
|
||||
type Plugin struct {
|
||||
Metadata
|
||||
|
||||
Name string `json:",omitempty"`
|
||||
Path string `json:",omitempty"`
|
||||
|
||||
// Err is non-nil if the plugin failed one of the candidate tests.
|
||||
Err error `json:",omitempty"`
|
||||
|
||||
// ShadowedPaths contains the paths of any other plugins which this plugin takes precedence over.
|
||||
ShadowedPaths []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// newPlugin determines if the given candidate is valid and returns a
|
||||
// Plugin. If the candidate fails one of the tests then `Plugin.Err`
|
||||
// is set, and is always a `pluginError`, but the `Plugin` is still
|
||||
// returned with no error. An error is only returned due to a
|
||||
// non-recoverable error.
|
||||
func newPlugin(c Candidate, rootcmd *cobra.Command) (Plugin, error) {
|
||||
path := c.Path()
|
||||
if path == "" {
|
||||
return Plugin{}, errors.New("plugin candidate path cannot be empty")
|
||||
}
|
||||
|
||||
// The candidate listing process should have skipped anything
|
||||
// which would fail here, so there are all real errors.
|
||||
fullname := filepath.Base(path)
|
||||
if fullname == "." {
|
||||
return Plugin{}, errors.Errorf("unable to determine basename of plugin candidate %q", path)
|
||||
}
|
||||
var err error
|
||||
if fullname, err = trimExeSuffix(fullname); err != nil {
|
||||
return Plugin{}, errors.Wrapf(err, "plugin candidate %q", path)
|
||||
}
|
||||
if !strings.HasPrefix(fullname, NamePrefix) {
|
||||
return Plugin{}, errors.Errorf("plugin candidate %q: does not have %q prefix", path, NamePrefix)
|
||||
}
|
||||
|
||||
p := Plugin{
|
||||
Name: strings.TrimPrefix(fullname, NamePrefix),
|
||||
Path: path,
|
||||
}
|
||||
|
||||
// Now apply the candidate tests, so these update p.Err.
|
||||
if !pluginNameRe.MatchString(p.Name) {
|
||||
p.Err = NewPluginError("plugin candidate %q did not match %q", p.Name, pluginNameRe.String())
|
||||
return p, nil
|
||||
}
|
||||
|
||||
if rootcmd != nil {
|
||||
for _, cmd := range rootcmd.Commands() {
|
||||
// Ignore conflicts with commands which are
|
||||
// just plugin stubs (i.e. from a previous
|
||||
// call to AddPluginCommandStubs).
|
||||
if p := cmd.Annotations[CommandAnnotationPlugin]; p == "true" {
|
||||
continue
|
||||
}
|
||||
if cmd.Name() == p.Name {
|
||||
p.Err = NewPluginError("plugin %q duplicates builtin command", p.Name)
|
||||
return p, nil
|
||||
}
|
||||
if cmd.HasAlias(p.Name) {
|
||||
p.Err = NewPluginError("plugin %q duplicates an alias of builtin command %q", p.Name, cmd.Name())
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We are supposed to check for relevant execute permissions here. Instead we rely on an attempt to execute.
|
||||
meta, err := c.Metadata()
|
||||
if err != nil {
|
||||
p.Err = wrapAsPluginError(err, "failed to fetch metadata")
|
||||
return p, nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(meta, &p.Metadata); err != nil {
|
||||
p.Err = wrapAsPluginError(err, "invalid metadata")
|
||||
return p, nil
|
||||
}
|
||||
|
||||
if p.Metadata.SchemaVersion != "0.1.0" {
|
||||
p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
|
||||
return p, nil
|
||||
}
|
||||
if p.Metadata.Vendor == "" {
|
||||
p.Err = NewPluginError("plugin metadata does not define a vendor")
|
||||
return p, nil
|
||||
}
|
||||
return p, nil
|
||||
}
|
10
vendor/github.com/docker/cli/cli-plugins/manager/suffix_unix.go
generated
vendored
Normal file
10
vendor/github.com/docker/cli/cli-plugins/manager/suffix_unix.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build !windows
|
||||
|
||||
package manager
|
||||
|
||||
func trimExeSuffix(s string) (string, error) {
|
||||
return s, nil
|
||||
}
|
||||
func addExeSuffix(s string) string {
|
||||
return s
|
||||
}
|
26
vendor/github.com/docker/cli/cli-plugins/manager/suffix_windows.go
generated
vendored
Normal file
26
vendor/github.com/docker/cli/cli-plugins/manager/suffix_windows.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This is made slightly more complex due to needing to be case insensitive.
|
||||
func trimExeSuffix(s string) (string, error) {
|
||||
ext := filepath.Ext(s)
|
||||
if ext == "" {
|
||||
return "", errors.Errorf("path %q lacks required file extension", s)
|
||||
}
|
||||
|
||||
exe := ".exe"
|
||||
if !strings.EqualFold(ext, exe) {
|
||||
return "", errors.Errorf("path %q lacks required %q suffix", s, exe)
|
||||
}
|
||||
return strings.TrimSuffix(s, ext), nil
|
||||
}
|
||||
|
||||
func addExeSuffix(s string) string {
|
||||
return s + ".exe"
|
||||
}
|
155
vendor/github.com/docker/cli/cli-plugins/plugin/plugin.go
generated
vendored
Normal file
155
vendor/github.com/docker/cli/cli-plugins/plugin/plugin.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli-plugins/manager"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// PersistentPreRunE must be called by any plugin command (or
|
||||
// subcommand) which uses the cobra `PersistentPreRun*` hook. Plugins
|
||||
// which do not make use of `PersistentPreRun*` do not need to call
|
||||
// this (although it remains safe to do so). Plugins are recommended
|
||||
// to use `PersistenPreRunE` to enable the error to be
|
||||
// returned. Should not be called outside of a command's
|
||||
// PersistentPreRunE hook and must not be run unless Run has been
|
||||
// called.
|
||||
var PersistentPreRunE func(*cobra.Command, []string) error
|
||||
|
||||
func runPlugin(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager.Metadata) error {
|
||||
tcmd := newPluginCommand(dockerCli, plugin, meta)
|
||||
|
||||
var persistentPreRunOnce sync.Once
|
||||
PersistentPreRunE = func(_ *cobra.Command, _ []string) error {
|
||||
var err error
|
||||
persistentPreRunOnce.Do(func() {
|
||||
var opts []command.InitializeOpt
|
||||
if os.Getenv("DOCKER_CLI_PLUGIN_USE_DIAL_STDIO") != "" {
|
||||
opts = append(opts, withPluginClientConn(plugin.Name()))
|
||||
}
|
||||
err = tcmd.Initialize(opts...)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
cmd, _, err := tcmd.HandleGlobalFlags()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.Execute()
|
||||
}
|
||||
|
||||
// Run is the top-level entry point to the CLI plugin framework. It should be called from your plugin's `main()` function.
|
||||
func Run(makeCmd func(command.Cli) *cobra.Command, meta manager.Metadata) {
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
plugin := makeCmd(dockerCli)
|
||||
|
||||
if err := runPlugin(dockerCli, plugin, meta); err != nil {
|
||||
if sterr, ok := err.(cli.StatusError); ok {
|
||||
if sterr.Status != "" {
|
||||
fmt.Fprintln(dockerCli.Err(), sterr.Status)
|
||||
}
|
||||
// StatusError should only be used for errors, and all errors should
|
||||
// have a non-zero exit status, so never exit with 0
|
||||
if sterr.StatusCode == 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(sterr.StatusCode)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func withPluginClientConn(name string) command.InitializeOpt {
|
||||
return command.WithInitializeClient(func(dockerCli *command.DockerCli) (client.APIClient, error) {
|
||||
cmd := "docker"
|
||||
if x := os.Getenv(manager.ReexecEnvvar); x != "" {
|
||||
cmd = x
|
||||
}
|
||||
var flags []string
|
||||
|
||||
// Accumulate all the global arguments, that is those
|
||||
// up to (but not including) the plugin's name. This
|
||||
// ensures that `docker system dial-stdio` is
|
||||
// evaluating the same set of `--config`, `--tls*` etc
|
||||
// global options as the plugin was called with, which
|
||||
// in turn is the same as what the original docker
|
||||
// invocation was passed.
|
||||
for _, a := range os.Args[1:] {
|
||||
if a == name {
|
||||
break
|
||||
}
|
||||
flags = append(flags, a)
|
||||
}
|
||||
flags = append(flags, "system", "dial-stdio")
|
||||
|
||||
helper, err := connhelper.GetCommandConnectionHelper(cmd, flags...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewClientWithOpts(client.WithDialContext(helper.Dialer))
|
||||
})
|
||||
}
|
||||
|
||||
func newPluginCommand(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager.Metadata) *cli.TopLevelCommand {
|
||||
name := plugin.Name()
|
||||
fullname := manager.NamePrefix + name
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: fmt.Sprintf("docker [OPTIONS] %s [ARG...]", name),
|
||||
Short: fullname + " is a Docker CLI plugin",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
PersistentPreRunE: PersistentPreRunE,
|
||||
TraverseChildren: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
opts, flags := cli.SetupPluginRootCommand(cmd)
|
||||
|
||||
cmd.SetOutput(dockerCli.Out())
|
||||
|
||||
cmd.AddCommand(
|
||||
plugin,
|
||||
newMetadataSubcommand(plugin, meta),
|
||||
)
|
||||
|
||||
cli.DisableFlagsInUseLine(cmd)
|
||||
|
||||
return cli.NewTopLevelCommand(cmd, dockerCli, opts, flags)
|
||||
}
|
||||
|
||||
func newMetadataSubcommand(plugin *cobra.Command, meta manager.Metadata) *cobra.Command {
|
||||
if meta.ShortDescription == "" {
|
||||
meta.ShortDescription = plugin.Short
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: manager.MetadataSubcommandName,
|
||||
Hidden: true,
|
||||
// Suppress the global/parent PersistentPreRunE, which
|
||||
// needlessly initializes the client and tries to
|
||||
// connect to the daemon.
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetEscapeHTML(false)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(meta)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
347
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
Normal file
347
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
Normal file
@ -0,0 +1,347 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
pluginmanager "github.com/docker/cli/cli-plugins/manager"
|
||||
"github.com/docker/cli/cli/command"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// setupCommonRootCommand contains the setup common to
|
||||
// SetupRootCommand and SetupPluginRootCommand.
|
||||
func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet, *cobra.Command) {
|
||||
opts := cliflags.NewClientOptions()
|
||||
flags := rootCmd.Flags()
|
||||
|
||||
flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files")
|
||||
opts.Common.InstallFlags(flags)
|
||||
|
||||
cobra.AddTemplateFunc("add", func(a, b int) int { return a + b })
|
||||
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
|
||||
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
|
||||
cobra.AddTemplateFunc("hasInvalidPlugins", hasInvalidPlugins)
|
||||
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
|
||||
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
|
||||
cobra.AddTemplateFunc("invalidPlugins", invalidPlugins)
|
||||
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
|
||||
cobra.AddTemplateFunc("vendorAndVersion", vendorAndVersion)
|
||||
cobra.AddTemplateFunc("invalidPluginReason", invalidPluginReason)
|
||||
cobra.AddTemplateFunc("isPlugin", isPlugin)
|
||||
cobra.AddTemplateFunc("decoratedName", decoratedName)
|
||||
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
rootCmd.SetHelpTemplate(helpTemplate)
|
||||
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
|
||||
rootCmd.SetHelpCommand(helpCommand)
|
||||
|
||||
return opts, flags, helpCommand
|
||||
}
|
||||
|
||||
// SetupRootCommand sets default usage, help, and error handling for the
|
||||
// root command.
|
||||
func SetupRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet, *cobra.Command) {
|
||||
opts, flags, helpCmd := setupCommonRootCommand(rootCmd)
|
||||
|
||||
rootCmd.SetVersionTemplate("Docker version {{.Version}}\n")
|
||||
|
||||
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
|
||||
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
|
||||
rootCmd.PersistentFlags().Lookup("help").Hidden = true
|
||||
|
||||
return opts, flags, helpCmd
|
||||
}
|
||||
|
||||
// SetupPluginRootCommand sets default usage, help and error handling for a plugin root command.
|
||||
func SetupPluginRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet) {
|
||||
opts, flags, _ := setupCommonRootCommand(rootCmd)
|
||||
|
||||
rootCmd.PersistentFlags().BoolP("help", "", false, "Print usage")
|
||||
rootCmd.PersistentFlags().Lookup("help").Hidden = true
|
||||
|
||||
return opts, flags
|
||||
}
|
||||
|
||||
// FlagErrorFunc prints an error message which matches the format of the
|
||||
// docker/cli/cli error messages
|
||||
func FlagErrorFunc(cmd *cobra.Command, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage := ""
|
||||
if cmd.HasSubCommands() {
|
||||
usage = "\n\n" + cmd.UsageString()
|
||||
}
|
||||
return StatusError{
|
||||
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
|
||||
StatusCode: 125,
|
||||
}
|
||||
}
|
||||
|
||||
// TopLevelCommand encapsulates a top-level cobra command (either
|
||||
// docker CLI or a plugin) and global flag handling logic necessary
|
||||
// for plugins.
|
||||
type TopLevelCommand struct {
|
||||
cmd *cobra.Command
|
||||
dockerCli *command.DockerCli
|
||||
opts *cliflags.ClientOptions
|
||||
flags *pflag.FlagSet
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewTopLevelCommand returns a new TopLevelCommand object
|
||||
func NewTopLevelCommand(cmd *cobra.Command, dockerCli *command.DockerCli, opts *cliflags.ClientOptions, flags *pflag.FlagSet) *TopLevelCommand {
|
||||
return &TopLevelCommand{cmd, dockerCli, opts, flags, os.Args[1:]}
|
||||
}
|
||||
|
||||
// SetArgs sets the args (default os.Args[:1] used to invoke the command
|
||||
func (tcmd *TopLevelCommand) SetArgs(args []string) {
|
||||
tcmd.args = args
|
||||
tcmd.cmd.SetArgs(args)
|
||||
}
|
||||
|
||||
// SetFlag sets a flag in the local flag set of the top-level command
|
||||
func (tcmd *TopLevelCommand) SetFlag(name, value string) {
|
||||
tcmd.cmd.Flags().Set(name, value)
|
||||
}
|
||||
|
||||
// HandleGlobalFlags takes care of parsing global flags defined on the
|
||||
// command, it returns the underlying cobra command and the args it
|
||||
// will be called with (or an error).
|
||||
//
|
||||
// On success the caller is responsible for calling Initialize()
|
||||
// before calling `Execute` on the returned command.
|
||||
func (tcmd *TopLevelCommand) HandleGlobalFlags() (*cobra.Command, []string, error) {
|
||||
cmd := tcmd.cmd
|
||||
|
||||
// We manually parse the global arguments and find the
|
||||
// subcommand in order to properly deal with plugins. We rely
|
||||
// on the root command never having any non-flag arguments. We
|
||||
// create our own FlagSet so that we can configure it
|
||||
// (e.g. `SetInterspersed` below) in an idempotent way.
|
||||
flags := pflag.NewFlagSet(cmd.Name(), pflag.ContinueOnError)
|
||||
|
||||
// We need !interspersed to ensure we stop at the first
|
||||
// potential command instead of accumulating it into
|
||||
// flags.Args() and then continuing on and finding other
|
||||
// arguments which we try and treat as globals (when they are
|
||||
// actually arguments to the subcommand).
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
// We need the single parse to see both sets of flags.
|
||||
flags.AddFlagSet(cmd.Flags())
|
||||
flags.AddFlagSet(cmd.PersistentFlags())
|
||||
// Now parse the global flags, up to (but not including) the
|
||||
// first command. The result will be that all the remaining
|
||||
// arguments are in `flags.Args()`.
|
||||
if err := flags.Parse(tcmd.args); err != nil {
|
||||
// Our FlagErrorFunc uses the cli, make sure it is initialized
|
||||
if err := tcmd.Initialize(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, cmd.FlagErrorFunc()(cmd, err)
|
||||
}
|
||||
|
||||
return cmd, flags.Args(), nil
|
||||
}
|
||||
|
||||
// Initialize finalises global option parsing and initializes the docker client.
|
||||
func (tcmd *TopLevelCommand) Initialize(ops ...command.InitializeOpt) error {
|
||||
tcmd.opts.Common.SetDefaultOptions(tcmd.flags)
|
||||
return tcmd.dockerCli.Initialize(tcmd.opts, ops...)
|
||||
}
|
||||
|
||||
// VisitAll will traverse all commands from the root.
|
||||
// This is different from the VisitAll of cobra.Command where only parents
|
||||
// are checked.
|
||||
func VisitAll(root *cobra.Command, fn func(*cobra.Command)) {
|
||||
for _, cmd := range root.Commands() {
|
||||
VisitAll(cmd, fn)
|
||||
}
|
||||
fn(root)
|
||||
}
|
||||
|
||||
// DisableFlagsInUseLine sets the DisableFlagsInUseLine flag on all
|
||||
// commands within the tree rooted at cmd.
|
||||
func DisableFlagsInUseLine(cmd *cobra.Command) {
|
||||
VisitAll(cmd, func(ccmd *cobra.Command) {
|
||||
// do not add a `[flags]` to the end of the usage line.
|
||||
ccmd.DisableFlagsInUseLine = true
|
||||
})
|
||||
}
|
||||
|
||||
var helpCommand = &cobra.Command{
|
||||
Use: "help [command]",
|
||||
Short: "Help about the command",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
cmd, args, e := c.Root().Find(args)
|
||||
if cmd == nil || e != nil || len(args) > 0 {
|
||||
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
|
||||
}
|
||||
|
||||
helpFunc := cmd.HelpFunc()
|
||||
helpFunc(cmd, args)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func isPlugin(cmd *cobra.Command) bool {
|
||||
return cmd.Annotations[pluginmanager.CommandAnnotationPlugin] == "true"
|
||||
}
|
||||
|
||||
func hasSubCommands(cmd *cobra.Command) bool {
|
||||
return len(operationSubCommands(cmd)) > 0
|
||||
}
|
||||
|
||||
func hasManagementSubCommands(cmd *cobra.Command) bool {
|
||||
return len(managementSubCommands(cmd)) > 0
|
||||
}
|
||||
|
||||
func hasInvalidPlugins(cmd *cobra.Command) bool {
|
||||
return len(invalidPlugins(cmd)) > 0
|
||||
}
|
||||
|
||||
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if isPlugin(sub) {
|
||||
continue
|
||||
}
|
||||
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func wrappedFlagUsages(cmd *cobra.Command) string {
|
||||
width := 80
|
||||
if ws, err := term.GetWinsize(0); err == nil {
|
||||
width = int(ws.Width)
|
||||
}
|
||||
return cmd.Flags().FlagUsagesWrapped(width - 1)
|
||||
}
|
||||
|
||||
func decoratedName(cmd *cobra.Command) string {
|
||||
decoration := " "
|
||||
if isPlugin(cmd) {
|
||||
decoration = "*"
|
||||
}
|
||||
return cmd.Name() + decoration
|
||||
}
|
||||
|
||||
func vendorAndVersion(cmd *cobra.Command) string {
|
||||
if vendor, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVendor]; ok && isPlugin(cmd) {
|
||||
version := ""
|
||||
if v, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVersion]; ok && v != "" {
|
||||
version = ", " + v
|
||||
}
|
||||
return fmt.Sprintf("(%s%s)", vendor, version)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if isPlugin(sub) {
|
||||
if invalidPluginReason(sub) == "" {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if sub.IsAvailableCommand() && sub.HasSubCommands() {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func invalidPlugins(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if !isPlugin(sub) {
|
||||
continue
|
||||
}
|
||||
if invalidPluginReason(sub) != "" {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func invalidPluginReason(cmd *cobra.Command) string {
|
||||
return cmd.Annotations[pluginmanager.CommandAnnotationPluginInvalid]
|
||||
}
|
||||
|
||||
var usageTemplate = `Usage:
|
||||
|
||||
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
|
||||
{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}}
|
||||
|
||||
{{if ne .Long ""}}{{ .Long | trim }}{{ else }}{{ .Short | trim }}{{end}}
|
||||
|
||||
{{- if gt .Aliases 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{ .Example }}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasAvailableFlags}}
|
||||
|
||||
Options:
|
||||
{{ wrappedFlagUsages . | trimRightSpace}}
|
||||
|
||||
{{- end}}
|
||||
{{- if hasManagementSubCommands . }}
|
||||
|
||||
Management Commands:
|
||||
|
||||
{{- range managementSubCommands . }}
|
||||
{{rpad (decoratedName .) (add .NamePadding 1)}}{{.Short}}{{ if isPlugin .}} {{vendorAndVersion .}}{{ end}}
|
||||
{{- end}}
|
||||
|
||||
{{- end}}
|
||||
{{- if hasSubCommands .}}
|
||||
|
||||
Commands:
|
||||
|
||||
{{- range operationSubCommands . }}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
{{- if hasInvalidPlugins . }}
|
||||
|
||||
Invalid Plugins:
|
||||
|
||||
{{- range invalidPlugins . }}
|
||||
{{rpad .Name .NamePadding }} {{invalidPluginReason .}}
|
||||
{{- end}}
|
||||
|
||||
{{- end}}
|
||||
|
||||
{{- if .HasSubCommands }}
|
||||
|
||||
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
|
||||
{{- end}}
|
||||
`
|
||||
|
||||
var helpTemplate = `
|
||||
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
535
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
Normal file
535
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
Normal file
@ -0,0 +1,535 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/cli/cli/config"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
dcontext "github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
kubcontext "github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/cli/cli/version"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/theupdateframework/notary"
|
||||
notaryclient "github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/passphrase"
|
||||
)
|
||||
|
||||
// Streams is an interface which exposes the standard input and output streams
|
||||
type Streams interface {
|
||||
In() *streams.In
|
||||
Out() *streams.Out
|
||||
Err() io.Writer
|
||||
}
|
||||
|
||||
// Cli represents the docker command line client.
|
||||
type Cli interface {
|
||||
Client() client.APIClient
|
||||
Out() *streams.Out
|
||||
Err() io.Writer
|
||||
In() *streams.In
|
||||
SetIn(in *streams.In)
|
||||
Apply(ops ...DockerCliOption) error
|
||||
ConfigFile() *configfile.ConfigFile
|
||||
ServerInfo() ServerInfo
|
||||
ClientInfo() ClientInfo
|
||||
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||
DefaultVersion() string
|
||||
ManifestStore() manifeststore.Store
|
||||
RegistryClient(bool) registryclient.RegistryClient
|
||||
ContentTrustEnabled() bool
|
||||
NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error)
|
||||
ContextStore() store.Store
|
||||
CurrentContext() string
|
||||
StackOrchestrator(flagValue string) (Orchestrator, error)
|
||||
DockerEndpoint() docker.Endpoint
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
// Instances of the client can be returned from NewDockerCli.
|
||||
type DockerCli struct {
|
||||
configFile *configfile.ConfigFile
|
||||
in *streams.In
|
||||
out *streams.Out
|
||||
err io.Writer
|
||||
client client.APIClient
|
||||
serverInfo ServerInfo
|
||||
clientInfo ClientInfo
|
||||
contentTrust bool
|
||||
newContainerizeClient func(string) (clitypes.ContainerizedClient, error)
|
||||
contextStore store.Store
|
||||
currentContext string
|
||||
dockerEndpoint docker.Endpoint
|
||||
contextStoreConfig store.Config
|
||||
}
|
||||
|
||||
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||
func (cli *DockerCli) DefaultVersion() string {
|
||||
return cli.clientInfo.DefaultVersion
|
||||
}
|
||||
|
||||
// Client returns the APIClient
|
||||
func (cli *DockerCli) Client() client.APIClient {
|
||||
return cli.client
|
||||
}
|
||||
|
||||
// Out returns the writer used for stdout
|
||||
func (cli *DockerCli) Out() *streams.Out {
|
||||
return cli.out
|
||||
}
|
||||
|
||||
// Err returns the writer used for stderr
|
||||
func (cli *DockerCli) Err() io.Writer {
|
||||
return cli.err
|
||||
}
|
||||
|
||||
// SetIn sets the reader used for stdin
|
||||
func (cli *DockerCli) SetIn(in *streams.In) {
|
||||
cli.in = in
|
||||
}
|
||||
|
||||
// In returns the reader used for stdin
|
||||
func (cli *DockerCli) In() *streams.In {
|
||||
return cli.in
|
||||
}
|
||||
|
||||
// ShowHelp shows the command help.
|
||||
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
cmd.SetOutput(err)
|
||||
cmd.HelpFunc()(cmd, args)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigFile returns the ConfigFile
|
||||
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
|
||||
return cli.configFile
|
||||
}
|
||||
|
||||
// ServerInfo returns the server version details for the host this client is
|
||||
// connected to
|
||||
func (cli *DockerCli) ServerInfo() ServerInfo {
|
||||
return cli.serverInfo
|
||||
}
|
||||
|
||||
// ClientInfo returns the client details for the cli
|
||||
func (cli *DockerCli) ClientInfo() ClientInfo {
|
||||
return cli.clientInfo
|
||||
}
|
||||
|
||||
// ContentTrustEnabled returns whether content trust has been enabled by an
|
||||
// environment variable.
|
||||
func (cli *DockerCli) ContentTrustEnabled() bool {
|
||||
return cli.contentTrust
|
||||
}
|
||||
|
||||
// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting
|
||||
// or otherwise the client-side DOCKER_BUILDKIT environment variable
|
||||
func BuildKitEnabled(si ServerInfo) (bool, error) {
|
||||
buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit
|
||||
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
|
||||
var err error
|
||||
buildkitEnabled, err = strconv.ParseBool(buildkitEnv)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
||||
}
|
||||
}
|
||||
return buildkitEnabled, nil
|
||||
}
|
||||
|
||||
// ManifestStore returns a store for local manifests
|
||||
func (cli *DockerCli) ManifestStore() manifeststore.Store {
|
||||
// TODO: support override default location from config file
|
||||
return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests"))
|
||||
}
|
||||
|
||||
// RegistryClient returns a client for communicating with a Docker distribution
|
||||
// registry
|
||||
func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient {
|
||||
resolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
return ResolveAuthConfig(ctx, cli, index)
|
||||
}
|
||||
return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure)
|
||||
}
|
||||
|
||||
// InitializeOpt is the type of the functional options passed to DockerCli.Initialize
|
||||
type InitializeOpt func(dockerCli *DockerCli) error
|
||||
|
||||
// WithInitializeClient is passed to DockerCli.Initialize by callers who wish to set a particular API Client for use by the CLI.
|
||||
func WithInitializeClient(makeClient func(dockerCli *DockerCli) (client.APIClient, error)) InitializeOpt {
|
||||
return func(dockerCli *DockerCli) error {
|
||||
var err error
|
||||
dockerCli.client, err = makeClient(dockerCli)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the dockerCli runs initialization that must happen after command
|
||||
// line flags are parsed.
|
||||
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...InitializeOpt) error {
|
||||
var err error
|
||||
|
||||
for _, o := range ops {
|
||||
if err := o(cli); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cliflags.SetLogLevel(opts.Common.LogLevel)
|
||||
|
||||
if opts.ConfigDir != "" {
|
||||
cliconfig.SetDir(opts.ConfigDir)
|
||||
}
|
||||
|
||||
if opts.Common.Debug {
|
||||
debug.Enable()
|
||||
}
|
||||
|
||||
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
|
||||
|
||||
baseContextSore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig)
|
||||
cli.contextStore = &ContextStoreWithDefault{
|
||||
Store: baseContextSore,
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return resolveDefaultContext(opts.Common, cli.ConfigFile(), cli.Err())
|
||||
},
|
||||
}
|
||||
cli.currentContext, err = resolveContextName(opts.Common, cli.configFile, cli.contextStore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli.dockerEndpoint, err = resolveDockerEndpoint(cli.contextStore, cli.currentContext)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to resolve docker endpoint")
|
||||
}
|
||||
|
||||
if cli.client == nil {
|
||||
cli.client, err = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile)
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
cli.dockerEndpoint.TLSPassword = password
|
||||
return newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var experimentalValue string
|
||||
// Environment variable always overrides configuration
|
||||
if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" {
|
||||
experimentalValue = cli.configFile.Experimental
|
||||
}
|
||||
hasExperimental, err := isEnabled(experimentalValue)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Experimental field")
|
||||
}
|
||||
cli.clientInfo = ClientInfo{
|
||||
DefaultVersion: cli.client.ClientVersion(),
|
||||
HasExperimental: hasExperimental,
|
||||
}
|
||||
cli.initializeFromClient()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
store := &ContextStoreWithDefault{
|
||||
Store: store.New(cliconfig.ContextStoreDir(), defaultContextStoreConfig()),
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return resolveDefaultContext(opts, configFile, ioutil.Discard)
|
||||
},
|
||||
}
|
||||
contextName, err := resolveContextName(opts, configFile, store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoint, err := resolveDockerEndpoint(store, contextName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to resolve docker endpoint")
|
||||
}
|
||||
return newAPIClientFromEndpoint(endpoint, configFile)
|
||||
}
|
||||
|
||||
func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
clientOpts, err := ep.ClientOpts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
customHeaders := configFile.HTTPHeaders
|
||||
if customHeaders == nil {
|
||||
customHeaders = map[string]string{}
|
||||
}
|
||||
customHeaders["User-Agent"] = UserAgent()
|
||||
clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders))
|
||||
return client.NewClientWithOpts(clientOpts...)
|
||||
}
|
||||
|
||||
func resolveDockerEndpoint(s store.Store, contextName string) (docker.Endpoint, error) {
|
||||
ctxMeta, err := s.GetContextMetadata(contextName)
|
||||
if err != nil {
|
||||
return docker.Endpoint{}, err
|
||||
}
|
||||
epMeta, err := docker.EndpointFromContext(ctxMeta)
|
||||
if err != nil {
|
||||
return docker.Endpoint{}, err
|
||||
}
|
||||
return docker.WithTLSData(s, contextName, epMeta)
|
||||
}
|
||||
|
||||
// Resolve the Docker endpoint for the default context (based on config, env vars and CLI flags)
|
||||
func resolveDefaultDockerEndpoint(opts *cliflags.CommonOptions) (docker.Endpoint, error) {
|
||||
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
|
||||
if err != nil {
|
||||
return docker.Endpoint{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
skipTLSVerify bool
|
||||
tlsData *dcontext.TLSData
|
||||
)
|
||||
|
||||
if opts.TLSOptions != nil {
|
||||
skipTLSVerify = opts.TLSOptions.InsecureSkipVerify
|
||||
tlsData, err = dcontext.TLSDataFromFiles(opts.TLSOptions.CAFile, opts.TLSOptions.CertFile, opts.TLSOptions.KeyFile)
|
||||
if err != nil {
|
||||
return docker.Endpoint{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return docker.Endpoint{
|
||||
EndpointMeta: docker.EndpointMeta{
|
||||
Host: host,
|
||||
SkipTLSVerify: skipTLSVerify,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isEnabled(value string) (bool, error) {
|
||||
switch value {
|
||||
case "enabled":
|
||||
return true, nil
|
||||
case "", "disabled":
|
||||
return false, nil
|
||||
default:
|
||||
return false, errors.Errorf("%q is not valid, should be either enabled or disabled", value)
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) initializeFromClient() {
|
||||
ping, err := cli.client.Ping(context.Background())
|
||||
if err != nil {
|
||||
// Default to true if we fail to connect to daemon
|
||||
cli.serverInfo = ServerInfo{HasExperimental: true}
|
||||
|
||||
if ping.APIVersion != "" {
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
cli.serverInfo = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
BuildkitVersion: ping.BuilderVersion,
|
||||
}
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
|
||||
for attempts := 0; ; attempts++ {
|
||||
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
if giveup || err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
apiclient, err := newClient(passwd)
|
||||
if !tlsconfig.IsErrEncryptedKey(err) {
|
||||
return apiclient, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
|
||||
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
|
||||
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||
}
|
||||
|
||||
// NewContainerizedEngineClient returns a containerized engine client
|
||||
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
||||
return cli.newContainerizeClient(sockPath)
|
||||
}
|
||||
|
||||
// ContextStore returns the ContextStore
|
||||
func (cli *DockerCli) ContextStore() store.Store {
|
||||
return cli.contextStore
|
||||
}
|
||||
|
||||
// CurrentContext returns the current context name
|
||||
func (cli *DockerCli) CurrentContext() string {
|
||||
return cli.currentContext
|
||||
}
|
||||
|
||||
// StackOrchestrator resolves which stack orchestrator is in use
|
||||
func (cli *DockerCli) StackOrchestrator(flagValue string) (Orchestrator, error) {
|
||||
currentContext := cli.CurrentContext()
|
||||
ctxRaw, err := cli.ContextStore().GetContextMetadata(currentContext)
|
||||
if store.IsErrContextDoesNotExist(err) {
|
||||
// case where the currentContext has been removed (CLI behavior is to fallback to using DOCKER_HOST based resolution)
|
||||
return GetStackOrchestrator(flagValue, "", cli.ConfigFile().StackOrchestrator, cli.Err())
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxMeta, err := GetDockerContext(ctxRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxOrchestrator := string(ctxMeta.StackOrchestrator)
|
||||
return GetStackOrchestrator(flagValue, ctxOrchestrator, cli.ConfigFile().StackOrchestrator, cli.Err())
|
||||
}
|
||||
|
||||
// DockerEndpoint returns the current docker endpoint
|
||||
func (cli *DockerCli) DockerEndpoint() docker.Endpoint {
|
||||
return cli.dockerEndpoint
|
||||
}
|
||||
|
||||
// Apply all the operation on the cli
|
||||
func (cli *DockerCli) Apply(ops ...DockerCliOption) error {
|
||||
for _, op := range ops {
|
||||
if err := op(cli); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
// server
|
||||
type ServerInfo struct {
|
||||
HasExperimental bool
|
||||
OSType string
|
||||
BuildkitVersion types.BuilderVersion
|
||||
}
|
||||
|
||||
// ClientInfo stores details about the supported features of the client
|
||||
type ClientInfo struct {
|
||||
HasExperimental bool
|
||||
DefaultVersion string
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with all operators applied on it.
|
||||
// It applies by default the standard streams, the content trust from
|
||||
// environment and the default containerized client constructor operations.
|
||||
func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) {
|
||||
cli := &DockerCli{}
|
||||
defaultOps := []DockerCliOption{
|
||||
WithContentTrustFromEnv(),
|
||||
WithContainerizedClient(containerizedengine.NewClient),
|
||||
}
|
||||
cli.contextStoreConfig = defaultContextStoreConfig()
|
||||
ops = append(defaultOps, ops...)
|
||||
if err := cli.Apply(ops...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cli.out == nil || cli.in == nil || cli.err == nil {
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
if cli.in == nil {
|
||||
cli.in = streams.NewIn(stdin)
|
||||
}
|
||||
if cli.out == nil {
|
||||
cli.out = streams.NewOut(stdout)
|
||||
}
|
||||
if cli.err == nil {
|
||||
cli.err = stderr
|
||||
}
|
||||
}
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
|
||||
var host string
|
||||
switch len(hosts) {
|
||||
case 0:
|
||||
host = os.Getenv("DOCKER_HOST")
|
||||
case 1:
|
||||
host = hosts[0]
|
||||
default:
|
||||
return "", errors.New("Please specify only one -H")
|
||||
}
|
||||
|
||||
return dopts.ParseHost(tlsOptions != nil, host)
|
||||
}
|
||||
|
||||
// UserAgent returns the user agent string used for making API requests
|
||||
func UserAgent() string {
|
||||
return "Docker-Client/" + version.Version + " (" + runtime.GOOS + ")"
|
||||
}
|
||||
|
||||
// resolveContextName resolves the current context name with the following rules:
|
||||
// - setting both --context and --host flags is ambiguous
|
||||
// - if --context is set, use this value
|
||||
// - if --host flag or DOCKER_HOST is set, fallbacks to use the same logic as before context-store was added
|
||||
// for backward compatibility with existing scripts
|
||||
// - if DOCKER_CONTEXT is set, use this value
|
||||
// - if Config file has a globally set "CurrentContext", use this value
|
||||
// - fallbacks to default HOST, uses TLS config from flags/env vars
|
||||
func resolveContextName(opts *cliflags.CommonOptions, config *configfile.ConfigFile, contextstore store.Store) (string, error) {
|
||||
if opts.Context != "" && len(opts.Hosts) > 0 {
|
||||
return "", errors.New("Conflicting options: either specify --host or --context, not both")
|
||||
}
|
||||
if opts.Context != "" {
|
||||
return opts.Context, nil
|
||||
}
|
||||
if len(opts.Hosts) > 0 {
|
||||
return DefaultContextName, nil
|
||||
}
|
||||
if _, present := os.LookupEnv("DOCKER_HOST"); present {
|
||||
return DefaultContextName, nil
|
||||
}
|
||||
if ctxName, ok := os.LookupEnv("DOCKER_CONTEXT"); ok {
|
||||
return ctxName, nil
|
||||
}
|
||||
if config != nil && config.CurrentContext != "" {
|
||||
_, err := contextstore.GetContextMetadata(config.CurrentContext)
|
||||
if store.IsErrContextDoesNotExist(err) {
|
||||
return "", errors.Errorf("Current context %q is not found on the file system, please check your config file at %s", config.CurrentContext, config.Filename)
|
||||
}
|
||||
return config.CurrentContext, err
|
||||
}
|
||||
return DefaultContextName, nil
|
||||
}
|
||||
|
||||
func defaultContextStoreConfig() store.Config {
|
||||
return store.NewConfig(
|
||||
func() interface{} { return &DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
store.EndpointTypeGetter(kubcontext.KubernetesEndpoint, func() interface{} { return &kubcontext.EndpointMeta{} }),
|
||||
)
|
||||
}
|
106
vendor/github.com/docker/cli/cli/command/cli_options.go
generated
vendored
Normal file
106
vendor/github.com/docker/cli/cli/command/cli_options.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
// DockerCliOption applies a modification on a DockerCli.
|
||||
type DockerCliOption func(cli *DockerCli) error
|
||||
|
||||
// WithStandardStreams sets a cli in, out and err streams with the standard streams.
|
||||
func WithStandardStreams() DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
// Set terminal emulation based on platform as required.
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
cli.in = streams.NewIn(stdin)
|
||||
cli.out = streams.NewOut(stdout)
|
||||
cli.err = stderr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithCombinedStreams uses the same stream for the output and error streams.
|
||||
func WithCombinedStreams(combined io.Writer) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.out = streams.NewOut(combined)
|
||||
cli.err = combined
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithInputStream sets a cli input stream.
|
||||
func WithInputStream(in io.ReadCloser) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.in = streams.NewIn(in)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithOutputStream sets a cli output stream.
|
||||
func WithOutputStream(out io.Writer) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.out = streams.NewOut(out)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithErrorStream sets a cli error stream.
|
||||
func WithErrorStream(err io.Writer) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.err = err
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContentTrustFromEnv enables content trust on a cli from environment variable DOCKER_CONTENT_TRUST value.
|
||||
func WithContentTrustFromEnv() DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.contentTrust = false
|
||||
if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" {
|
||||
if t, err := strconv.ParseBool(e); t || err != nil {
|
||||
// treat any other value as true
|
||||
cli.contentTrust = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContentTrust enables content trust on a cli.
|
||||
func WithContentTrust(enabled bool) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.contentTrust = enabled
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContainerizedClient sets the containerized client constructor on a cli.
|
||||
func WithContainerizedClient(containerizedFn func(string) (clitypes.ContainerizedClient, error)) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.newContainerizeClient = containerizedFn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContextEndpointType add support for an additional typed endpoint in the context store
|
||||
// Plugins should use this to store additional endpoints configuration in the context store
|
||||
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
switch endpointName {
|
||||
case docker.DockerEndpoint, kubernetes.KubernetesEndpoint:
|
||||
return fmt.Errorf("cannot change %q endpoint type", endpointName)
|
||||
}
|
||||
cli.contextStoreConfig.SetEndpoint(endpointName, endpointType)
|
||||
return nil
|
||||
}
|
||||
}
|
27
vendor/github.com/docker/cli/cli/command/context.go
generated
vendored
Normal file
27
vendor/github.com/docker/cli/cli/command/context.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
)
|
||||
|
||||
// DockerContext is a typed representation of what we put in Context metadata
|
||||
type DockerContext struct {
|
||||
Description string `json:",omitempty"`
|
||||
StackOrchestrator Orchestrator `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GetDockerContext extracts metadata from stored context metadata
|
||||
func GetDockerContext(storeMetadata store.ContextMetadata) (DockerContext, error) {
|
||||
if storeMetadata.Metadata == nil {
|
||||
// can happen if we save endpoints before assigning a context metadata
|
||||
// it is totally valid, and we should return a default initialized value
|
||||
return DockerContext{}, nil
|
||||
}
|
||||
res, ok := storeMetadata.Metadata.(DockerContext)
|
||||
if !ok {
|
||||
return DockerContext{}, errors.New("context metadata is not a valid DockerContext")
|
||||
}
|
||||
return res, nil
|
||||
}
|
198
vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
generated
vendored
Normal file
198
vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultContextName is the name reserved for the default context (config & env based)
|
||||
DefaultContextName = "default"
|
||||
)
|
||||
|
||||
// DefaultContext contains the default context data for all enpoints
|
||||
type DefaultContext struct {
|
||||
Meta store.ContextMetadata
|
||||
TLS store.ContextTLSData
|
||||
}
|
||||
|
||||
// DefaultContextResolver is a function which resolves the default context base on the configuration and the env variables
|
||||
type DefaultContextResolver func() (*DefaultContext, error)
|
||||
|
||||
// ContextStoreWithDefault implements the store.Store interface with a support for the default context
|
||||
type ContextStoreWithDefault struct {
|
||||
store.Store
|
||||
Resolver DefaultContextResolver
|
||||
}
|
||||
|
||||
// resolveDefaultContext creates a ContextMetadata for the current CLI invocation parameters
|
||||
func resolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, stderr io.Writer) (*DefaultContext, error) {
|
||||
stackOrchestrator, err := GetStackOrchestrator("", "", config.StackOrchestrator, stderr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contextTLSData := store.ContextTLSData{
|
||||
Endpoints: make(map[string]store.EndpointTLSData),
|
||||
}
|
||||
contextMetadata := store.ContextMetadata{
|
||||
Endpoints: make(map[string]interface{}),
|
||||
Metadata: DockerContext{
|
||||
Description: "",
|
||||
StackOrchestrator: stackOrchestrator,
|
||||
},
|
||||
Name: DefaultContextName,
|
||||
}
|
||||
|
||||
dockerEP, err := resolveDefaultDockerEndpoint(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP.EndpointMeta
|
||||
if dockerEP.TLSData != nil {
|
||||
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
|
||||
// Default context uses env-based kubeconfig for Kubernetes endpoint configuration
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := kubernetes.FromKubeConfig(kubeconfig, "", "")
|
||||
if (stackOrchestrator == OrchestratorKubernetes || stackOrchestrator == OrchestratorAll) && err != nil {
|
||||
return nil, errors.Wrapf(err, "default orchestrator is %s but kubernetes endpoint could not be found", stackOrchestrator)
|
||||
}
|
||||
if err == nil {
|
||||
contextMetadata.Endpoints[kubernetes.KubernetesEndpoint] = kubeEP.EndpointMeta
|
||||
if kubeEP.TLSData != nil {
|
||||
contextTLSData.Endpoints[kubernetes.KubernetesEndpoint] = *kubeEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
}
|
||||
|
||||
return &DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil
|
||||
}
|
||||
|
||||
// ListContexts implements store.Store's ListContexts
|
||||
func (s *ContextStoreWithDefault) ListContexts() ([]store.ContextMetadata, error) {
|
||||
contextList, err := s.Store.ListContexts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defaultContext, err := s.Resolver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(contextList, defaultContext.Meta), nil
|
||||
}
|
||||
|
||||
// CreateOrUpdateContext is not allowed for the default context and fails
|
||||
func (s *ContextStoreWithDefault) CreateOrUpdateContext(meta store.ContextMetadata) error {
|
||||
if meta.Name == DefaultContextName {
|
||||
return errors.New("default context cannot be created nor updated")
|
||||
}
|
||||
return s.Store.CreateOrUpdateContext(meta)
|
||||
}
|
||||
|
||||
// RemoveContext is not allowed for the default context and fails
|
||||
func (s *ContextStoreWithDefault) RemoveContext(name string) error {
|
||||
if name == DefaultContextName {
|
||||
return errors.New("default context cannot be removed")
|
||||
}
|
||||
return s.Store.RemoveContext(name)
|
||||
}
|
||||
|
||||
// GetContextMetadata implements store.Store's GetContextMetadata
|
||||
func (s *ContextStoreWithDefault) GetContextMetadata(name string) (store.ContextMetadata, error) {
|
||||
if name == DefaultContextName {
|
||||
defaultContext, err := s.Resolver()
|
||||
if err != nil {
|
||||
return store.ContextMetadata{}, err
|
||||
}
|
||||
return defaultContext.Meta, nil
|
||||
}
|
||||
return s.Store.GetContextMetadata(name)
|
||||
}
|
||||
|
||||
// ResetContextTLSMaterial is not implemented for default context and fails
|
||||
func (s *ContextStoreWithDefault) ResetContextTLSMaterial(name string, data *store.ContextTLSData) error {
|
||||
if name == DefaultContextName {
|
||||
return errors.New("The default context store does not support ResetContextTLSMaterial")
|
||||
}
|
||||
return s.Store.ResetContextTLSMaterial(name, data)
|
||||
}
|
||||
|
||||
// ResetContextEndpointTLSMaterial is not implemented for default context and fails
|
||||
func (s *ContextStoreWithDefault) ResetContextEndpointTLSMaterial(contextName string, endpointName string, data *store.EndpointTLSData) error {
|
||||
if contextName == DefaultContextName {
|
||||
return errors.New("The default context store does not support ResetContextEndpointTLSMaterial")
|
||||
}
|
||||
return s.Store.ResetContextEndpointTLSMaterial(contextName, endpointName, data)
|
||||
}
|
||||
|
||||
// ListContextTLSFiles implements store.Store's ListContextTLSFiles
|
||||
func (s *ContextStoreWithDefault) ListContextTLSFiles(name string) (map[string]store.EndpointFiles, error) {
|
||||
if name == DefaultContextName {
|
||||
defaultContext, err := s.Resolver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsfiles := make(map[string]store.EndpointFiles)
|
||||
for epName, epTLSData := range defaultContext.TLS.Endpoints {
|
||||
var files store.EndpointFiles
|
||||
for filename := range epTLSData.Files {
|
||||
files = append(files, filename)
|
||||
}
|
||||
tlsfiles[epName] = files
|
||||
}
|
||||
return tlsfiles, nil
|
||||
}
|
||||
return s.Store.ListContextTLSFiles(name)
|
||||
}
|
||||
|
||||
// GetContextTLSData implements store.Store's GetContextTLSData
|
||||
func (s *ContextStoreWithDefault) GetContextTLSData(contextName, endpointName, fileName string) ([]byte, error) {
|
||||
if contextName == DefaultContextName {
|
||||
defaultContext, err := s.Resolver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if defaultContext.TLS.Endpoints[endpointName].Files[fileName] == nil {
|
||||
return nil, &noDefaultTLSDataError{endpointName: endpointName, fileName: fileName}
|
||||
}
|
||||
return defaultContext.TLS.Endpoints[endpointName].Files[fileName], nil
|
||||
|
||||
}
|
||||
return s.Store.GetContextTLSData(contextName, endpointName, fileName)
|
||||
}
|
||||
|
||||
type noDefaultTLSDataError struct {
|
||||
endpointName string
|
||||
fileName string
|
||||
}
|
||||
|
||||
func (e *noDefaultTLSDataError) Error() string {
|
||||
return fmt.Sprintf("tls data for %s/%s/%s does not exist", DefaultContextName, e.endpointName, e.fileName)
|
||||
}
|
||||
|
||||
// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound
|
||||
func (e *noDefaultTLSDataError) NotFound() {}
|
||||
|
||||
// IsTLSDataDoesNotExist satisfies github.com/docker/cli/cli/context/store.tlsDataDoesNotExist
|
||||
func (e *noDefaultTLSDataError) IsTLSDataDoesNotExist() {}
|
||||
|
||||
// GetContextStorageInfo implements store.Store's GetContextStorageInfo
|
||||
func (s *ContextStoreWithDefault) GetContextStorageInfo(contextName string) store.ContextStorageInfo {
|
||||
if contextName == DefaultContextName {
|
||||
return store.ContextStorageInfo{MetadataPath: "<IN MEMORY>", TLSPath: "<IN MEMORY>"}
|
||||
}
|
||||
return s.Store.GetContextStorageInfo(contextName)
|
||||
}
|
47
vendor/github.com/docker/cli/cli/command/events_utils.go
generated
vendored
Normal file
47
vendor/github.com/docker/cli/cli/command/events_utils.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// EventHandler is abstract interface for user to customize
|
||||
// own handle functions of each type of events
|
||||
type EventHandler interface {
|
||||
Handle(action string, h func(eventtypes.Message))
|
||||
Watch(c <-chan eventtypes.Message)
|
||||
}
|
||||
|
||||
// InitEventHandler initializes and returns an EventHandler
|
||||
func InitEventHandler() EventHandler {
|
||||
return &eventHandler{handlers: make(map[string]func(eventtypes.Message))}
|
||||
}
|
||||
|
||||
type eventHandler struct {
|
||||
handlers map[string]func(eventtypes.Message)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) {
|
||||
w.mu.Lock()
|
||||
w.handlers[action] = h
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
// Watch ranges over the passed in event chan and processes the events based on the
|
||||
// handlers created for a given action.
|
||||
// To stop watching, close the event chan.
|
||||
func (w *eventHandler) Watch(c <-chan eventtypes.Message) {
|
||||
for e := range c {
|
||||
w.mu.Lock()
|
||||
h, exists := w.handlers[e.Action]
|
||||
w.mu.Unlock()
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("event handler: received event: %v", e)
|
||||
go h(e)
|
||||
}
|
||||
}
|
84
vendor/github.com/docker/cli/cli/command/orchestrator.go
generated
vendored
Normal file
84
vendor/github.com/docker/cli/cli/command/orchestrator.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Orchestrator type acts as an enum describing supported orchestrators.
|
||||
type Orchestrator string
|
||||
|
||||
const (
|
||||
// OrchestratorKubernetes orchestrator
|
||||
OrchestratorKubernetes = Orchestrator("kubernetes")
|
||||
// OrchestratorSwarm orchestrator
|
||||
OrchestratorSwarm = Orchestrator("swarm")
|
||||
// OrchestratorAll orchestrator
|
||||
OrchestratorAll = Orchestrator("all")
|
||||
orchestratorUnset = Orchestrator("")
|
||||
|
||||
defaultOrchestrator = OrchestratorSwarm
|
||||
envVarDockerStackOrchestrator = "DOCKER_STACK_ORCHESTRATOR"
|
||||
envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
)
|
||||
|
||||
// HasKubernetes returns true if defined orchestrator has Kubernetes capabilities.
|
||||
func (o Orchestrator) HasKubernetes() bool {
|
||||
return o == OrchestratorKubernetes || o == OrchestratorAll
|
||||
}
|
||||
|
||||
// HasSwarm returns true if defined orchestrator has Swarm capabilities.
|
||||
func (o Orchestrator) HasSwarm() bool {
|
||||
return o == OrchestratorSwarm || o == OrchestratorAll
|
||||
}
|
||||
|
||||
// HasAll returns true if defined orchestrator has both Swarm and Kubernetes capabilities.
|
||||
func (o Orchestrator) HasAll() bool {
|
||||
return o == OrchestratorAll
|
||||
}
|
||||
|
||||
func normalize(value string) (Orchestrator, error) {
|
||||
switch value {
|
||||
case "kubernetes":
|
||||
return OrchestratorKubernetes, nil
|
||||
case "swarm":
|
||||
return OrchestratorSwarm, nil
|
||||
case "", "unset": // unset is the old value for orchestratorUnset. Keep accepting this for backward compat
|
||||
return orchestratorUnset, nil
|
||||
case "all":
|
||||
return OrchestratorAll, nil
|
||||
default:
|
||||
return defaultOrchestrator, fmt.Errorf("specified orchestrator %q is invalid, please use either kubernetes, swarm or all", value)
|
||||
}
|
||||
}
|
||||
|
||||
// NormalizeOrchestrator parses an orchestrator value and checks if it is valid
|
||||
func NormalizeOrchestrator(value string) (Orchestrator, error) {
|
||||
return normalize(value)
|
||||
}
|
||||
|
||||
// GetStackOrchestrator checks DOCKER_STACK_ORCHESTRATOR environment variable and configuration file
|
||||
// orchestrator value and returns user defined Orchestrator.
|
||||
func GetStackOrchestrator(flagValue, contextValue, globalDefault string, stderr io.Writer) (Orchestrator, error) {
|
||||
// Check flag
|
||||
if o, err := normalize(flagValue); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
// Check environment variable
|
||||
env := os.Getenv(envVarDockerStackOrchestrator)
|
||||
if env == "" && os.Getenv(envVarDockerOrchestrator) != "" {
|
||||
fmt.Fprintf(stderr, "WARNING: experimental environment variable %s is set. Please use %s instead\n", envVarDockerOrchestrator, envVarDockerStackOrchestrator)
|
||||
}
|
||||
if o, err := normalize(env); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
if o, err := normalize(contextValue); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
if o, err := normalize(globalDefault); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
// Nothing set, use default orchestrator
|
||||
return defaultOrchestrator, nil
|
||||
}
|
204
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
Normal file
204
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
configtypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ElectAuthServer returns the default registry to use (by asking the daemon)
|
||||
func ElectAuthServer(ctx context.Context, cli Cli) string {
|
||||
// The daemon `/info` endpoint informs us of the default registry being
|
||||
// used. This is essential in cross-platforms environment, where for
|
||||
// example a Linux client might be interacting with a Windows daemon, hence
|
||||
// the default registry URL might be Windows specific.
|
||||
serverAddress := registry.IndexServer
|
||||
if info, err := cli.Client().Info(ctx); err != nil && debug.IsEnabled() {
|
||||
// Only report the warning if we're in debug mode to prevent nagging during engine initialization workflows
|
||||
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
|
||||
} else if info.IndexServerAddress == "" && debug.IsEnabled() {
|
||||
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
|
||||
} else {
|
||||
serverAddress = info.IndexServerAddress
|
||||
}
|
||||
return serverAddress
|
||||
}
|
||||
|
||||
// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload
|
||||
func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info
|
||||
// for the given command.
|
||||
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc {
|
||||
return func() (string, error) {
|
||||
fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName)
|
||||
indexServer := registry.GetAuthConfigKey(index)
|
||||
isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli)
|
||||
authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
|
||||
}
|
||||
err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return EncodeAuthToBase64(*authConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the
|
||||
// default index, it uses the default index name for the daemon's platform,
|
||||
// not the client's platform.
|
||||
func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
configKey := index.Name
|
||||
if index.Official {
|
||||
configKey = ElectAuthServer(ctx, cli)
|
||||
}
|
||||
|
||||
a, _ := cli.ConfigFile().GetAuthConfig(configKey)
|
||||
return types.AuthConfig(a)
|
||||
}
|
||||
|
||||
// GetDefaultAuthConfig gets the default auth config given a serverAddress
|
||||
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) {
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
var authconfig configtypes.AuthConfig
|
||||
var err error
|
||||
if checkCredStore {
|
||||
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
} else {
|
||||
authconfig = configtypes.AuthConfig{}
|
||||
}
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
res := types.AuthConfig(authconfig)
|
||||
return &res, err
|
||||
}
|
||||
|
||||
// ConfigureAuth handles prompting of user's username and password if needed
|
||||
func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthConfig, isDefaultRegistry bool) error {
|
||||
// On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210
|
||||
if runtime.GOOS == "windows" {
|
||||
cli.SetIn(streams.NewIn(os.Stdin))
|
||||
}
|
||||
|
||||
// Some links documenting this:
|
||||
// - https://code.google.com/archive/p/mintty/issues/56
|
||||
// - https://github.com/docker/docker/issues/15272
|
||||
// - https://mintty.github.io/ (compatibility)
|
||||
// Linux will hit this if you attempt `cat | docker login`, and Windows
|
||||
// will hit this if you attempt docker login from mintty where stdin
|
||||
// is a pipe, not a character based console.
|
||||
if flPassword == "" && !cli.In().IsTerminal() {
|
||||
return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
|
||||
}
|
||||
|
||||
authconfig.Username = strings.TrimSpace(authconfig.Username)
|
||||
|
||||
if flUser = strings.TrimSpace(flUser); flUser == "" {
|
||||
if isDefaultRegistry {
|
||||
// if this is a default registry (docker hub), then display the following message.
|
||||
fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.")
|
||||
}
|
||||
promptWithDefault(cli.Out(), "Username", authconfig.Username)
|
||||
flUser = readInput(cli.In(), cli.Out())
|
||||
flUser = strings.TrimSpace(flUser)
|
||||
if flUser == "" {
|
||||
flUser = authconfig.Username
|
||||
}
|
||||
}
|
||||
if flUser == "" {
|
||||
return errors.Errorf("Error: Non-null Username Required")
|
||||
}
|
||||
if flPassword == "" {
|
||||
oldState, err := term.SaveState(cli.In().FD())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Password: ")
|
||||
term.DisableEcho(cli.In().FD(), oldState)
|
||||
|
||||
flPassword = readInput(cli.In(), cli.Out())
|
||||
fmt.Fprint(cli.Out(), "\n")
|
||||
|
||||
term.RestoreTerminal(cli.In().FD(), oldState)
|
||||
if flPassword == "" {
|
||||
return errors.Errorf("Error: Password Required")
|
||||
}
|
||||
}
|
||||
|
||||
authconfig.Username = flUser
|
||||
authconfig.Password = flPassword
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readInput(in io.Reader, out io.Writer) string {
|
||||
reader := bufio.NewReader(in)
|
||||
line, _, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
fmt.Fprintln(out, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
return string(line)
|
||||
}
|
||||
|
||||
func promptWithDefault(out io.Writer, prompt string, configDefault string) {
|
||||
if configDefault == "" {
|
||||
fmt.Fprintf(out, "%s: ", prompt)
|
||||
} else {
|
||||
fmt.Fprintf(out, "%s (%s): ", prompt, configDefault)
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image
|
||||
func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) {
|
||||
// Retrieve encoded auth token from the image reference
|
||||
authConfig, err := resolveAuthConfigFromImage(ctx, cli, image)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
encodedAuth, err := EncodeAuthToBase64(authConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return encodedAuth, nil
|
||||
}
|
||||
|
||||
// resolveAuthConfigFromImage retrieves that AuthConfig using the image string
|
||||
func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) {
|
||||
registryRef, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return types.AuthConfig{}, err
|
||||
}
|
||||
repoInfo, err := registry.ParseRepositoryInfo(registryRef)
|
||||
if err != nil {
|
||||
return types.AuthConfig{}, err
|
||||
}
|
||||
return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil
|
||||
}
|
23
vendor/github.com/docker/cli/cli/command/streams.go
generated
vendored
Normal file
23
vendor/github.com/docker/cli/cli/command/streams.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/streams"
|
||||
)
|
||||
|
||||
// InStream is an input stream used by the DockerCli to read user input
|
||||
// Deprecated: Use github.com/docker/cli/cli/streams.In instead
|
||||
type InStream = streams.In
|
||||
|
||||
// OutStream is an output stream used by the DockerCli to write normal program
|
||||
// output.
|
||||
// Deprecated: Use github.com/docker/cli/cli/streams.Out instead
|
||||
type OutStream = streams.Out
|
||||
|
||||
var (
|
||||
// NewInStream returns a new InStream object from a ReadCloser
|
||||
// Deprecated: Use github.com/docker/cli/cli/streams.NewIn instead
|
||||
NewInStream = streams.NewIn
|
||||
// NewOutStream returns a new OutStream object from a Writer
|
||||
// Deprecated: Use github.com/docker/cli/cli/streams.NewOut instead
|
||||
NewOutStream = streams.NewOut
|
||||
)
|
15
vendor/github.com/docker/cli/cli/command/trust.go
generated
vendored
Normal file
15
vendor/github.com/docker/cli/cli/command/trust.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// AddTrustVerificationFlags adds content trust flags to the provided flagset
|
||||
func AddTrustVerificationFlags(fs *pflag.FlagSet, v *bool, trusted bool) {
|
||||
fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image verification")
|
||||
}
|
||||
|
||||
// AddTrustSigningFlags adds "signing" flags to the provided flagset
|
||||
func AddTrustSigningFlags(fs *pflag.FlagSet, v *bool, trusted bool) {
|
||||
fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image signing")
|
||||
}
|
163
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
Normal file
163
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// CopyToFile writes the content of the reader to the specified file
|
||||
func CopyToFile(outfile string, r io.Reader) error {
|
||||
// We use sequential file access here to avoid depleting the standby list
|
||||
// on Windows. On Linux, this is a call directly to ioutil.TempFile
|
||||
tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpPath := tmpFile.Name()
|
||||
|
||||
_, err = io.Copy(tmpFile, r)
|
||||
tmpFile.Close()
|
||||
|
||||
if err != nil {
|
||||
os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.Rename(tmpPath, outfile); err != nil {
|
||||
os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// capitalizeFirst capitalizes the first character of string
|
||||
func capitalizeFirst(s string) string {
|
||||
switch l := len(s); l {
|
||||
case 0:
|
||||
return s
|
||||
case 1:
|
||||
return strings.ToLower(s)
|
||||
default:
|
||||
return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
|
||||
func PrettyPrint(i interface{}) string {
|
||||
switch t := i.(type) {
|
||||
case nil:
|
||||
return "None"
|
||||
case string:
|
||||
return capitalizeFirst(t)
|
||||
default:
|
||||
return capitalizeFirst(fmt.Sprintf("%s", t))
|
||||
}
|
||||
}
|
||||
|
||||
// PromptForConfirmation requests and checks confirmation from user.
|
||||
// This will display the provided message followed by ' [y/N] '. If
|
||||
// the user input 'y' or 'Y' it returns true other false. If no
|
||||
// message is provided "Are you sure you want to proceed? [y/N] "
|
||||
// will be used instead.
|
||||
func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool {
|
||||
if message == "" {
|
||||
message = "Are you sure you want to proceed?"
|
||||
}
|
||||
message += " [y/N] "
|
||||
|
||||
fmt.Fprintf(outs, message)
|
||||
|
||||
// On Windows, force the use of the regular OS stdin stream.
|
||||
if runtime.GOOS == "windows" {
|
||||
ins = streams.NewIn(os.Stdin)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(ins)
|
||||
answer, _, _ := reader.ReadLine()
|
||||
return strings.ToLower(string(answer)) == "y"
|
||||
}
|
||||
|
||||
// PruneFilters returns consolidated prune filters obtained from config.json and cli
|
||||
func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
|
||||
if dockerCli.ConfigFile() == nil {
|
||||
return pruneFilters
|
||||
}
|
||||
for _, f := range dockerCli.ConfigFile().PruneFilters {
|
||||
parts := strings.SplitN(f, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
if parts[0] == "label" {
|
||||
// CLI label filter supersede config.json.
|
||||
// If CLI label filter conflict with config.json,
|
||||
// skip adding label! filter in config.json.
|
||||
if pruneFilters.Contains("label!") && pruneFilters.ExactMatch("label!", parts[1]) {
|
||||
continue
|
||||
}
|
||||
} else if parts[0] == "label!" {
|
||||
// CLI label! filter supersede config.json.
|
||||
// If CLI label! filter conflict with config.json,
|
||||
// skip adding label filter in config.json.
|
||||
if pruneFilters.Contains("label") && pruneFilters.ExactMatch("label", parts[1]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
pruneFilters.Add(parts[0], parts[1])
|
||||
}
|
||||
|
||||
return pruneFilters
|
||||
}
|
||||
|
||||
// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later.
|
||||
func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
|
||||
flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
|
||||
flags.SetAnnotation("platform", "version", []string{"1.32"})
|
||||
flags.SetAnnotation("platform", "experimental", nil)
|
||||
}
|
||||
|
||||
// ValidateOutputPath validates the output paths of the `export` and `save` commands.
|
||||
func ValidateOutputPath(path string) error {
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "" && dir != "." {
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
return errors.Errorf("invalid output path: directory %q does not exist", dir)
|
||||
}
|
||||
}
|
||||
// check whether `path` points to a regular file
|
||||
// (if the path exists and doesn't point to a directory)
|
||||
if fileInfo, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
if fileInfo.Mode().IsDir() || fileInfo.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := ValidateOutputPathFileMode(fileInfo.Mode()); err != nil {
|
||||
return errors.Wrapf(err, fmt.Sprintf("invalid output path: %q must be a directory or a regular file", path))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateOutputPathFileMode validates the output paths of the `cp` command and serves as a
|
||||
// helper to `ValidateOutputPath`
|
||||
func ValidateOutputPathFileMode(fileMode os.FileMode) error {
|
||||
switch {
|
||||
case fileMode&os.ModeDevice != 0:
|
||||
return errors.New("got a device")
|
||||
case fileMode&os.ModeIrregular != 0:
|
||||
return errors.New("got an irregular file")
|
||||
}
|
||||
return nil
|
||||
}
|
136
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
Normal file
136
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// ConfigFileName is the name of config file
|
||||
ConfigFileName = "config.json"
|
||||
configFileDir = ".docker"
|
||||
oldConfigfile = ".dockercfg"
|
||||
contextsDir = "contexts"
|
||||
)
|
||||
|
||||
var (
|
||||
configDir = os.Getenv("DOCKER_CONFIG")
|
||||
)
|
||||
|
||||
func init() {
|
||||
if configDir == "" {
|
||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
||||
}
|
||||
}
|
||||
|
||||
// Dir returns the directory the configuration file is stored in
|
||||
func Dir() string {
|
||||
return configDir
|
||||
}
|
||||
|
||||
// ContextStoreDir returns the directory the docker contexts are stored in
|
||||
func ContextStoreDir() string {
|
||||
return filepath.Join(Dir(), contextsDir)
|
||||
}
|
||||
|
||||
// SetDir sets the directory the configuration file is stored in
|
||||
func SetDir(dir string) {
|
||||
configDir = filepath.Clean(dir)
|
||||
}
|
||||
|
||||
// Path returns the path to a file relative to the config dir
|
||||
func Path(p ...string) (string, error) {
|
||||
path := filepath.Join(append([]string{Dir()}, p...)...)
|
||||
if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) {
|
||||
return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir())
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
||||
// a non-nested reader
|
||||
func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||
configFile := configfile.ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
}
|
||||
err := configFile.LegacyLoadFromReader(configData)
|
||||
return &configFile, err
|
||||
}
|
||||
|
||||
// LoadFromReader is a convenience function that creates a ConfigFile object from
|
||||
// a reader
|
||||
func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||
configFile := configfile.ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
}
|
||||
err := configFile.LoadFromReader(configData)
|
||||
return &configFile, err
|
||||
}
|
||||
|
||||
// Load reads the configuration files in the given directory, and sets up
|
||||
// the auth config information and returns values.
|
||||
// FIXME: use the internal golang config parser
|
||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
if configDir == "" {
|
||||
configDir = Dir()
|
||||
}
|
||||
|
||||
filename := filepath.Join(configDir, ConfigFileName)
|
||||
configFile := configfile.New(filename)
|
||||
|
||||
// Try happy path first - latest config file
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LoadFromReader(file)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, err
|
||||
} else if !os.IsNotExist(err) {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
|
||||
// Can't find latest config file so check for the old one
|
||||
confFile := filepath.Join(homedir.Get(), oldConfigfile)
|
||||
if _, err := os.Stat(confFile); err != nil {
|
||||
return configFile, nil //missing file is not an error
|
||||
}
|
||||
file, err := os.Open(confFile)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LegacyLoadFromReader(file)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, nil
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||
configFile, err := Load(Dir())
|
||||
if err != nil {
|
||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||
}
|
||||
return configFile
|
||||
}
|
383
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
Normal file
383
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
Normal file
@ -0,0 +1,383 @@
|
||||
package configfile
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// This constant is only used for really old config files when the
|
||||
// URL wasn't saved as part of the config file and it was just
|
||||
// assumed to be this value.
|
||||
defaultIndexServer = "https://index.docker.io/v1/"
|
||||
)
|
||||
|
||||
// ConfigFile ~/.docker/config.json file info
|
||||
type ConfigFile struct {
|
||||
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||
PsFormat string `json:"psFormat,omitempty"`
|
||||
ImagesFormat string `json:"imagesFormat,omitempty"`
|
||||
NetworksFormat string `json:"networksFormat,omitempty"`
|
||||
PluginsFormat string `json:"pluginsFormat,omitempty"`
|
||||
VolumesFormat string `json:"volumesFormat,omitempty"`
|
||||
StatsFormat string `json:"statsFormat,omitempty"`
|
||||
DetachKeys string `json:"detachKeys,omitempty"`
|
||||
CredentialsStore string `json:"credsStore,omitempty"`
|
||||
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||
Filename string `json:"-"` // Note: for internal use only
|
||||
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
||||
ServicesFormat string `json:"servicesFormat,omitempty"`
|
||||
TasksFormat string `json:"tasksFormat,omitempty"`
|
||||
SecretFormat string `json:"secretFormat,omitempty"`
|
||||
ConfigFormat string `json:"configFormat,omitempty"`
|
||||
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||
CurrentContext string `json:"currentContext,omitempty"`
|
||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyConfig contains proxy configuration settings
|
||||
type ProxyConfig struct {
|
||||
HTTPProxy string `json:"httpProxy,omitempty"`
|
||||
HTTPSProxy string `json:"httpsProxy,omitempty"`
|
||||
NoProxy string `json:"noProxy,omitempty"`
|
||||
FTPProxy string `json:"ftpProxy,omitempty"`
|
||||
}
|
||||
|
||||
// KubernetesConfig contains Kubernetes orchestrator settings
|
||||
type KubernetesConfig struct {
|
||||
AllNamespaces string `json:"allNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
// New initializes an empty configuration file for the given filename 'fn'
|
||||
func New(fn string) *ConfigFile {
|
||||
return &ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
HTTPHeaders: make(map[string]string),
|
||||
Filename: fn,
|
||||
Plugins: make(map[string]map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
|
||||
// auth config information with given directory and populates the receiver object
|
||||
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
||||
b, err := ioutil.ReadAll(configData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
|
||||
arr := strings.Split(string(b), "\n")
|
||||
if len(arr) < 2 {
|
||||
return errors.Errorf("The Auth config file is empty")
|
||||
}
|
||||
authConfig := types.AuthConfig{}
|
||||
origAuth := strings.Split(arr[0], " = ")
|
||||
if len(origAuth) != 2 {
|
||||
return errors.Errorf("Invalid Auth config file")
|
||||
}
|
||||
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig.ServerAddress = defaultIndexServer
|
||||
configFile.AuthConfigs[defaultIndexServer] = authConfig
|
||||
} else {
|
||||
for k, authConfig := range configFile.AuthConfigs {
|
||||
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig.Auth = ""
|
||||
authConfig.ServerAddress = k
|
||||
configFile.AuthConfigs[k] = authConfig
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromReader reads the configuration data given and sets up the auth config
|
||||
// information with given directory and populates the receiver object
|
||||
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
for addr, ac := range configFile.AuthConfigs {
|
||||
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ac.Auth = ""
|
||||
ac.ServerAddress = addr
|
||||
configFile.AuthConfigs[addr] = ac
|
||||
}
|
||||
return checkKubernetesConfiguration(configFile.Kubernetes)
|
||||
}
|
||||
|
||||
// ContainsAuth returns whether there is authentication configured
|
||||
// in this file or not.
|
||||
func (configFile *ConfigFile) ContainsAuth() bool {
|
||||
return configFile.CredentialsStore != "" ||
|
||||
len(configFile.CredentialHelpers) > 0 ||
|
||||
len(configFile.AuthConfigs) > 0
|
||||
}
|
||||
|
||||
// GetAuthConfigs returns the mapping of repo to auth configuration
|
||||
func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig {
|
||||
return configFile.AuthConfigs
|
||||
}
|
||||
|
||||
// SaveToWriter encodes and writes out all the authorization information to
|
||||
// the given writer
|
||||
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
||||
// Encode sensitive data into a new/temp struct
|
||||
tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs))
|
||||
for k, authConfig := range configFile.AuthConfigs {
|
||||
authCopy := authConfig
|
||||
// encode and save the authstring, while blanking out the original fields
|
||||
authCopy.Auth = encodeAuth(&authCopy)
|
||||
authCopy.Username = ""
|
||||
authCopy.Password = ""
|
||||
authCopy.ServerAddress = ""
|
||||
tmpAuthConfigs[k] = authCopy
|
||||
}
|
||||
|
||||
saveAuthConfigs := configFile.AuthConfigs
|
||||
configFile.AuthConfigs = tmpAuthConfigs
|
||||
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
||||
|
||||
data, err := json.MarshalIndent(configFile, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Save encodes and writes out all the authorization information
|
||||
func (configFile *ConfigFile) Save() error {
|
||||
if configFile.Filename == "" {
|
||||
return errors.Errorf("Can't save config with empty filename")
|
||||
}
|
||||
|
||||
dir := filepath.Dir(configFile.Filename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = configFile.SaveToWriter(temp)
|
||||
temp.Close()
|
||||
if err != nil {
|
||||
os.Remove(temp.Name())
|
||||
return err
|
||||
}
|
||||
return os.Rename(temp.Name(), configFile.Filename)
|
||||
}
|
||||
|
||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||
// then checking this against any environment variables provided to the container
|
||||
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string {
|
||||
var cfgKey string
|
||||
|
||||
if _, ok := configFile.Proxies[host]; !ok {
|
||||
cfgKey = "default"
|
||||
} else {
|
||||
cfgKey = host
|
||||
}
|
||||
|
||||
config := configFile.Proxies[cfgKey]
|
||||
permitted := map[string]*string{
|
||||
"HTTP_PROXY": &config.HTTPProxy,
|
||||
"HTTPS_PROXY": &config.HTTPSProxy,
|
||||
"NO_PROXY": &config.NoProxy,
|
||||
"FTP_PROXY": &config.FTPProxy,
|
||||
}
|
||||
m := runOpts
|
||||
if m == nil {
|
||||
m = make(map[string]*string)
|
||||
}
|
||||
for k := range permitted {
|
||||
if *permitted[k] == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := m[k]; !ok {
|
||||
m[k] = permitted[k]
|
||||
}
|
||||
if _, ok := m[strings.ToLower(k)]; !ok {
|
||||
m[strings.ToLower(k)] = permitted[k]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// encodeAuth creates a base64 encoded string to containing authorization information
|
||||
func encodeAuth(authConfig *types.AuthConfig) string {
|
||||
if authConfig.Username == "" && authConfig.Password == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
authStr := authConfig.Username + ":" + authConfig.Password
|
||||
msg := []byte(authStr)
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||
base64.StdEncoding.Encode(encoded, msg)
|
||||
return string(encoded)
|
||||
}
|
||||
|
||||
// decodeAuth decodes a base64 encoded string and returns username and password
|
||||
func decodeAuth(authStr string) (string, string, error) {
|
||||
if authStr == "" {
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||
decoded := make([]byte, decLen)
|
||||
authByte := []byte(authStr)
|
||||
n, err := base64.StdEncoding.Decode(decoded, authByte)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if n > decLen {
|
||||
return "", "", errors.Errorf("Something went wrong decoding auth config")
|
||||
}
|
||||
arr := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(arr) != 2 {
|
||||
return "", "", errors.Errorf("Invalid auth configuration file")
|
||||
}
|
||||
password := strings.Trim(arr[1], "\x00")
|
||||
return arr[0], password, nil
|
||||
}
|
||||
|
||||
// GetCredentialsStore returns a new credentials store from the settings in the
|
||||
// configuration file
|
||||
func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store {
|
||||
if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" {
|
||||
return newNativeStore(configFile, helper)
|
||||
}
|
||||
return credentials.NewFileStore(configFile)
|
||||
}
|
||||
|
||||
// var for unit testing.
|
||||
var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store {
|
||||
return credentials.NewNativeStore(configFile, helperSuffix)
|
||||
}
|
||||
|
||||
// GetAuthConfig for a repository from the credential store
|
||||
func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) {
|
||||
return configFile.GetCredentialsStore(registryHostname).Get(registryHostname)
|
||||
}
|
||||
|
||||
// getConfiguredCredentialStore returns the credential helper configured for the
|
||||
// given registry, the default credsStore, or the empty string if neither are
|
||||
// configured.
|
||||
func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string {
|
||||
if c.CredentialHelpers != nil && registryHostname != "" {
|
||||
if helper, exists := c.CredentialHelpers[registryHostname]; exists {
|
||||
return helper
|
||||
}
|
||||
}
|
||||
return c.CredentialsStore
|
||||
}
|
||||
|
||||
// GetAllCredentials returns all of the credentials stored in all of the
|
||||
// configured credential stores.
|
||||
func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) {
|
||||
auths := make(map[string]types.AuthConfig)
|
||||
addAll := func(from map[string]types.AuthConfig) {
|
||||
for reg, ac := range from {
|
||||
auths[reg] = ac
|
||||
}
|
||||
}
|
||||
|
||||
defaultStore := configFile.GetCredentialsStore("")
|
||||
newAuths, err := defaultStore.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(newAuths)
|
||||
|
||||
// Auth configs from a registry-specific helper should override those from the default store.
|
||||
for registryHostname := range configFile.CredentialHelpers {
|
||||
newAuth, err := configFile.GetAuthConfig(registryHostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auths[registryHostname] = newAuth
|
||||
}
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
// GetFilename returns the file name that this config file is based on.
|
||||
func (configFile *ConfigFile) GetFilename() string {
|
||||
return configFile.Filename
|
||||
}
|
||||
|
||||
// PluginConfig retrieves the requested option for the given plugin.
|
||||
func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) {
|
||||
if configFile.Plugins == nil {
|
||||
return "", false
|
||||
}
|
||||
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
value, ok := pluginConfig[option]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// SetPluginConfig sets the option to the given value for the given
|
||||
// plugin. Passing a value of "" will remove the option. If removing
|
||||
// the final config item for a given plugin then also cleans up the
|
||||
// overall plugin entry.
|
||||
func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) {
|
||||
if configFile.Plugins == nil {
|
||||
configFile.Plugins = make(map[string]map[string]string)
|
||||
}
|
||||
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||
if !ok {
|
||||
pluginConfig = make(map[string]string)
|
||||
configFile.Plugins[pluginname] = pluginConfig
|
||||
}
|
||||
if value != "" {
|
||||
pluginConfig[option] = value
|
||||
} else {
|
||||
delete(pluginConfig, option)
|
||||
}
|
||||
if len(pluginConfig) == 0 {
|
||||
delete(configFile.Plugins, pluginname)
|
||||
}
|
||||
}
|
||||
|
||||
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||
if kubeConfig == nil {
|
||||
return nil
|
||||
}
|
||||
switch kubeConfig.AllNamespaces {
|
||||
case "":
|
||||
case "enabled":
|
||||
case "disabled":
|
||||
default:
|
||||
return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces)
|
||||
}
|
||||
return nil
|
||||
}
|
17
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
Normal file
17
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
)
|
||||
|
||||
// Store is the interface that any credentials store must implement.
|
||||
type Store interface {
|
||||
// Erase removes credentials from the store for a given server.
|
||||
Erase(serverAddress string) error
|
||||
// Get retrieves credentials from the store for a given server.
|
||||
Get(serverAddress string) (types.AuthConfig, error)
|
||||
// GetAll retrieves all the credentials from the store.
|
||||
GetAll() (map[string]types.AuthConfig, error)
|
||||
// Store saves credentials in the store.
|
||||
Store(authConfig types.AuthConfig) error
|
||||
}
|
21
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
Normal file
21
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// DetectDefaultStore return the default credentials store for the platform if
|
||||
// the store executable is available.
|
||||
func DetectDefaultStore(store string) string {
|
||||
platformDefault := defaultCredentialsStore()
|
||||
|
||||
// user defined or no default for platform
|
||||
if store != "" || platformDefault == "" {
|
||||
return store
|
||||
}
|
||||
|
||||
if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil {
|
||||
return platformDefault
|
||||
}
|
||||
return ""
|
||||
}
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package credentials
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
return "osxkeychain"
|
||||
}
|
13
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
Normal file
13
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
if _, err := exec.LookPath("pass"); err == nil {
|
||||
return "pass"
|
||||
}
|
||||
|
||||
return "secretservice"
|
||||
}
|
7
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
Normal file
7
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !windows,!darwin,!linux
|
||||
|
||||
package credentials
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
return ""
|
||||
}
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package credentials
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
return "wincred"
|
||||
}
|
81
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
Normal file
81
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
)
|
||||
|
||||
type store interface {
|
||||
Save() error
|
||||
GetAuthConfigs() map[string]types.AuthConfig
|
||||
GetFilename() string
|
||||
}
|
||||
|
||||
// fileStore implements a credentials store using
|
||||
// the docker configuration file to keep the credentials in plain text.
|
||||
type fileStore struct {
|
||||
file store
|
||||
}
|
||||
|
||||
// NewFileStore creates a new file credentials store.
|
||||
func NewFileStore(file store) Store {
|
||||
return &fileStore{file: file}
|
||||
}
|
||||
|
||||
// Erase removes the given credentials from the file store.
|
||||
func (c *fileStore) Erase(serverAddress string) error {
|
||||
delete(c.file.GetAuthConfigs(), serverAddress)
|
||||
return c.file.Save()
|
||||
}
|
||||
|
||||
// Get retrieves credentials for a specific server from the file store.
|
||||
func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
authConfig, ok := c.file.GetAuthConfigs()[serverAddress]
|
||||
if !ok {
|
||||
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||
// them to the new format and testing
|
||||
for r, ac := range c.file.GetAuthConfigs() {
|
||||
if serverAddress == ConvertToHostname(r) {
|
||||
return ac, nil
|
||||
}
|
||||
}
|
||||
|
||||
authConfig = types.AuthConfig{}
|
||||
}
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
return c.file.GetAuthConfigs(), nil
|
||||
}
|
||||
|
||||
// Store saves the given credentials in the file store.
|
||||
func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
||||
c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig
|
||||
return c.file.Save()
|
||||
}
|
||||
|
||||
func (c *fileStore) GetFilename() string {
|
||||
return c.file.GetFilename()
|
||||
}
|
||||
|
||||
func (c *fileStore) IsFileStore() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ConvertToHostname converts a registry url which has http|https prepended
|
||||
// to just an hostname.
|
||||
// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies.
|
||||
func ConvertToHostname(url string) string {
|
||||
stripped := url
|
||||
if strings.HasPrefix(url, "http://") {
|
||||
stripped = strings.TrimPrefix(url, "http://")
|
||||
} else if strings.HasPrefix(url, "https://") {
|
||||
stripped = strings.TrimPrefix(url, "https://")
|
||||
}
|
||||
|
||||
nameParts := strings.SplitN(stripped, "/", 2)
|
||||
|
||||
return nameParts[0]
|
||||
}
|
143
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
Normal file
143
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
remoteCredentialsPrefix = "docker-credential-"
|
||||
tokenUsername = "<token>"
|
||||
)
|
||||
|
||||
// nativeStore implements a credentials store
|
||||
// using native keychain to keep credentials secure.
|
||||
// It piggybacks into a file store to keep users' emails.
|
||||
type nativeStore struct {
|
||||
programFunc client.ProgramFunc
|
||||
fileStore Store
|
||||
}
|
||||
|
||||
// NewNativeStore creates a new native store that
|
||||
// uses a remote helper program to manage credentials.
|
||||
func NewNativeStore(file store, helperSuffix string) Store {
|
||||
name := remoteCredentialsPrefix + helperSuffix
|
||||
return &nativeStore{
|
||||
programFunc: client.NewShellProgramFunc(name),
|
||||
fileStore: NewFileStore(file),
|
||||
}
|
||||
}
|
||||
|
||||
// Erase removes the given credentials from the native store.
|
||||
func (c *nativeStore) Erase(serverAddress string) error {
|
||||
if err := client.Erase(c.programFunc, serverAddress); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fallback to plain text store to remove email
|
||||
return c.fileStore.Erase(serverAddress)
|
||||
}
|
||||
|
||||
// Get retrieves credentials for a specific server from the native store.
|
||||
func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
// load user email if it exist or an empty auth config.
|
||||
auth, _ := c.fileStore.Get(serverAddress)
|
||||
|
||||
creds, err := c.getCredentialsFromStore(serverAddress)
|
||||
if err != nil {
|
||||
return auth, err
|
||||
}
|
||||
auth.Username = creds.Username
|
||||
auth.IdentityToken = creds.IdentityToken
|
||||
auth.Password = creds.Password
|
||||
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// GetAll retrieves all the credentials from the native store.
|
||||
func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
auths, err := c.listCredentialsInStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Emails are only stored in the file store.
|
||||
// This call can be safely eliminated when emails are removed.
|
||||
fileConfigs, _ := c.fileStore.GetAll()
|
||||
|
||||
authConfigs := make(map[string]types.AuthConfig)
|
||||
for registry := range auths {
|
||||
creds, err := c.getCredentialsFromStore(registry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ac := fileConfigs[registry] // might contain Email
|
||||
ac.Username = creds.Username
|
||||
ac.Password = creds.Password
|
||||
ac.IdentityToken = creds.IdentityToken
|
||||
authConfigs[registry] = ac
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
}
|
||||
|
||||
// Store saves the given credentials in the file store.
|
||||
func (c *nativeStore) Store(authConfig types.AuthConfig) error {
|
||||
if err := c.storeCredentialsInStore(authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig.Username = ""
|
||||
authConfig.Password = ""
|
||||
authConfig.IdentityToken = ""
|
||||
|
||||
// Fallback to old credential in plain text to save only the email
|
||||
return c.fileStore.Store(authConfig)
|
||||
}
|
||||
|
||||
// storeCredentialsInStore executes the command to store the credentials in the native store.
|
||||
func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error {
|
||||
creds := &credentials.Credentials{
|
||||
ServerURL: config.ServerAddress,
|
||||
Username: config.Username,
|
||||
Secret: config.Password,
|
||||
}
|
||||
|
||||
if config.IdentityToken != "" {
|
||||
creds.Username = tokenUsername
|
||||
creds.Secret = config.IdentityToken
|
||||
}
|
||||
|
||||
return client.Store(c.programFunc, creds)
|
||||
}
|
||||
|
||||
// getCredentialsFromStore executes the command to get the credentials from the native store.
|
||||
func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) {
|
||||
var ret types.AuthConfig
|
||||
|
||||
creds, err := client.Get(c.programFunc, serverAddress)
|
||||
if err != nil {
|
||||
if credentials.IsErrCredentialsNotFound(err) {
|
||||
// do not return an error if the credentials are not
|
||||
// in the keychain. Let docker ask for new credentials.
|
||||
return ret, nil
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
|
||||
if creds.Username == tokenUsername {
|
||||
ret.IdentityToken = creds.Secret
|
||||
} else {
|
||||
ret.Password = creds.Secret
|
||||
ret.Username = creds.Username
|
||||
}
|
||||
|
||||
ret.ServerAddress = serverAddress
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// listCredentialsInStore returns a listing of stored credentials as a map of
|
||||
// URL -> username.
|
||||
func (c *nativeStore) listCredentialsInStore() (map[string]string, error) {
|
||||
return client.List(c.programFunc)
|
||||
}
|
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package types
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
281
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
generated
vendored
Normal file
281
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
generated
vendored
Normal file
@ -0,0 +1,281 @@
|
||||
// Package commandconn provides a net.Conn implementation that can be used for
|
||||
// proxying (or emulating) stream via a custom command.
|
||||
//
|
||||
// For example, to provide an http.Client that can connect to a Docker daemon
|
||||
// running in a Docker container ("DIND"):
|
||||
//
|
||||
// httpClient := &http.Client{
|
||||
// Transport: &http.Transport{
|
||||
// DialContext: func(ctx context.Context, _network, _addr string) (net.Conn, error) {
|
||||
// return commandconn.New(ctx, "docker", "exec", "-it", containerID, "docker", "system", "dial-stdio")
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
package commandconn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// New returns net.Conn
|
||||
func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) {
|
||||
var (
|
||||
c commandConn
|
||||
err error
|
||||
)
|
||||
c.cmd = exec.CommandContext(ctx, cmd, args...)
|
||||
// we assume that args never contains sensitive information
|
||||
logrus.Debugf("commandconn: starting %s with %v", cmd, args)
|
||||
c.cmd.Env = os.Environ()
|
||||
setPdeathsig(c.cmd)
|
||||
c.stdin, err = c.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.stdout, err = c.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.cmd.Stderr = &stderrWriter{
|
||||
stderrMu: &c.stderrMu,
|
||||
stderr: &c.stderr,
|
||||
debugPrefix: fmt.Sprintf("commandconn (%s):", cmd),
|
||||
}
|
||||
c.localAddr = dummyAddr{network: "dummy", s: "dummy-0"}
|
||||
c.remoteAddr = dummyAddr{network: "dummy", s: "dummy-1"}
|
||||
return &c, c.cmd.Start()
|
||||
}
|
||||
|
||||
// commandConn implements net.Conn
|
||||
type commandConn struct {
|
||||
cmd *exec.Cmd
|
||||
cmdExited bool
|
||||
cmdWaitErr error
|
||||
cmdMutex sync.Mutex
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderrMu sync.Mutex
|
||||
stderr bytes.Buffer
|
||||
stdioClosedMu sync.Mutex // for stdinClosed and stdoutClosed
|
||||
stdinClosed bool
|
||||
stdoutClosed bool
|
||||
localAddr net.Addr
|
||||
remoteAddr net.Addr
|
||||
}
|
||||
|
||||
// killIfStdioClosed kills the cmd if both stdin and stdout are closed.
|
||||
func (c *commandConn) killIfStdioClosed() error {
|
||||
c.stdioClosedMu.Lock()
|
||||
stdioClosed := c.stdoutClosed && c.stdinClosed
|
||||
c.stdioClosedMu.Unlock()
|
||||
if !stdioClosed {
|
||||
return nil
|
||||
}
|
||||
return c.kill()
|
||||
}
|
||||
|
||||
// killAndWait tries sending SIGTERM to the process before sending SIGKILL.
|
||||
func killAndWait(cmd *exec.Cmd) error {
|
||||
var werr error
|
||||
if runtime.GOOS != "windows" {
|
||||
werrCh := make(chan error)
|
||||
go func() { werrCh <- cmd.Wait() }()
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
select {
|
||||
case werr = <-werrCh:
|
||||
case <-time.After(3 * time.Second):
|
||||
cmd.Process.Kill()
|
||||
werr = <-werrCh
|
||||
}
|
||||
} else {
|
||||
cmd.Process.Kill()
|
||||
werr = cmd.Wait()
|
||||
}
|
||||
return werr
|
||||
}
|
||||
|
||||
// kill returns nil if the command terminated, regardless to the exit status.
|
||||
func (c *commandConn) kill() error {
|
||||
var werr error
|
||||
c.cmdMutex.Lock()
|
||||
if c.cmdExited {
|
||||
werr = c.cmdWaitErr
|
||||
} else {
|
||||
werr = killAndWait(c.cmd)
|
||||
c.cmdWaitErr = werr
|
||||
c.cmdExited = true
|
||||
}
|
||||
c.cmdMutex.Unlock()
|
||||
if werr == nil {
|
||||
return nil
|
||||
}
|
||||
wExitErr, ok := werr.(*exec.ExitError)
|
||||
if ok {
|
||||
if wExitErr.ProcessState.Exited() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(werr, "commandconn: failed to wait")
|
||||
}
|
||||
|
||||
func (c *commandConn) onEOF(eof error) error {
|
||||
// when we got EOF, the command is going to be terminated
|
||||
var werr error
|
||||
c.cmdMutex.Lock()
|
||||
if c.cmdExited {
|
||||
werr = c.cmdWaitErr
|
||||
} else {
|
||||
werrCh := make(chan error)
|
||||
go func() { werrCh <- c.cmd.Wait() }()
|
||||
select {
|
||||
case werr = <-werrCh:
|
||||
c.cmdWaitErr = werr
|
||||
c.cmdExited = true
|
||||
case <-time.After(10 * time.Second):
|
||||
c.cmdMutex.Unlock()
|
||||
c.stderrMu.Lock()
|
||||
stderr := c.stderr.String()
|
||||
c.stderrMu.Unlock()
|
||||
return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, eof, stderr)
|
||||
}
|
||||
}
|
||||
c.cmdMutex.Unlock()
|
||||
if werr == nil {
|
||||
return eof
|
||||
}
|
||||
c.stderrMu.Lock()
|
||||
stderr := c.stderr.String()
|
||||
c.stderrMu.Unlock()
|
||||
return errors.Errorf("command %v has exited with %v, please make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr)
|
||||
}
|
||||
|
||||
func ignorableCloseError(err error) bool {
|
||||
errS := err.Error()
|
||||
ss := []string{
|
||||
os.ErrClosed.Error(),
|
||||
}
|
||||
for _, s := range ss {
|
||||
if strings.Contains(errS, s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *commandConn) CloseRead() error {
|
||||
// NOTE: maybe already closed here
|
||||
if err := c.stdout.Close(); err != nil && !ignorableCloseError(err) {
|
||||
logrus.Warnf("commandConn.CloseRead: %v", err)
|
||||
}
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdoutClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
if err := c.killIfStdioClosed(); err != nil {
|
||||
logrus.Warnf("commandConn.CloseRead: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) Read(p []byte) (int, error) {
|
||||
n, err := c.stdout.Read(p)
|
||||
if err == io.EOF {
|
||||
err = c.onEOF(err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *commandConn) CloseWrite() error {
|
||||
// NOTE: maybe already closed here
|
||||
if err := c.stdin.Close(); err != nil && !ignorableCloseError(err) {
|
||||
logrus.Warnf("commandConn.CloseWrite: %v", err)
|
||||
}
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdinClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
if err := c.killIfStdioClosed(); err != nil {
|
||||
logrus.Warnf("commandConn.CloseWrite: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) Write(p []byte) (int, error) {
|
||||
n, err := c.stdin.Write(p)
|
||||
if err == io.EOF {
|
||||
err = c.onEOF(err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *commandConn) Close() error {
|
||||
var err error
|
||||
if err = c.CloseRead(); err != nil {
|
||||
logrus.Warnf("commandConn.Close: CloseRead: %v", err)
|
||||
}
|
||||
if err = c.CloseWrite(); err != nil {
|
||||
logrus.Warnf("commandConn.Close: CloseWrite: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *commandConn) LocalAddr() net.Addr {
|
||||
return c.localAddr
|
||||
}
|
||||
func (c *commandConn) RemoteAddr() net.Addr {
|
||||
return c.remoteAddr
|
||||
}
|
||||
func (c *commandConn) SetDeadline(t time.Time) error {
|
||||
logrus.Debugf("unimplemented call: SetDeadline(%v)", t)
|
||||
return nil
|
||||
}
|
||||
func (c *commandConn) SetReadDeadline(t time.Time) error {
|
||||
logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t)
|
||||
return nil
|
||||
}
|
||||
func (c *commandConn) SetWriteDeadline(t time.Time) error {
|
||||
logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t)
|
||||
return nil
|
||||
}
|
||||
|
||||
type dummyAddr struct {
|
||||
network string
|
||||
s string
|
||||
}
|
||||
|
||||
func (d dummyAddr) Network() string {
|
||||
return d.network
|
||||
}
|
||||
|
||||
func (d dummyAddr) String() string {
|
||||
return d.s
|
||||
}
|
||||
|
||||
type stderrWriter struct {
|
||||
stderrMu *sync.Mutex
|
||||
stderr *bytes.Buffer
|
||||
debugPrefix string
|
||||
}
|
||||
|
||||
func (w *stderrWriter) Write(p []byte) (int, error) {
|
||||
logrus.Debugf("%s%s", w.debugPrefix, string(p))
|
||||
w.stderrMu.Lock()
|
||||
if w.stderr.Len() > 4096 {
|
||||
w.stderr.Reset()
|
||||
}
|
||||
n, err := w.stderr.Write(p)
|
||||
w.stderrMu.Unlock()
|
||||
return n, err
|
||||
}
|
12
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn_linux.go
generated
vendored
Normal file
12
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn_linux.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package commandconn
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Pdeathsig: syscall.SIGKILL,
|
||||
}
|
||||
}
|
10
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn_nolinux.go
generated
vendored
Normal file
10
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn_nolinux.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build !linux
|
||||
|
||||
package commandconn
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
}
|
55
vendor/github.com/docker/cli/cli/connhelper/connhelper.go
generated
vendored
Normal file
55
vendor/github.com/docker/cli/cli/connhelper/connhelper.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Package connhelper provides helpers for connecting to a remote daemon host with custom logic.
|
||||
package connhelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/cli/cli/connhelper/commandconn"
|
||||
"github.com/docker/cli/cli/connhelper/ssh"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConnectionHelper allows to connect to a remote host with custom stream provider binary.
|
||||
type ConnectionHelper struct {
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
Host string // dummy URL used for HTTP requests. e.g. "http://docker"
|
||||
}
|
||||
|
||||
// GetConnectionHelper returns Docker-specific connection helper for the given URL.
|
||||
// GetConnectionHelper returns nil without error when no helper is registered for the scheme.
|
||||
//
|
||||
// ssh://<user>@<host> URL requires Docker 18.09 or later on the remote host.
|
||||
func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) {
|
||||
u, err := url.Parse(daemonURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch scheme := u.Scheme; scheme {
|
||||
case "ssh":
|
||||
sp, err := ssh.ParseURL(daemonURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ssh host connection is not valid")
|
||||
}
|
||||
return &ConnectionHelper{
|
||||
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return commandconn.New(ctx, "ssh", append(sp.Args(), []string{"--", "docker", "system", "dial-stdio"}...)...)
|
||||
},
|
||||
Host: "http://docker",
|
||||
}, nil
|
||||
}
|
||||
// Future version may support plugins via ~/.docker/config.json. e.g. "dind"
|
||||
// See docker/cli#889 for the previous discussion.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetCommandConnectionHelper returns Docker-specific connection helper constructed from an arbitrary command.
|
||||
func GetCommandConnectionHelper(cmd string, flags ...string) (*ConnectionHelper, error) {
|
||||
return &ConnectionHelper{
|
||||
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return commandconn.New(ctx, cmd, flags...)
|
||||
},
|
||||
Host: "http://docker",
|
||||
}, nil
|
||||
}
|
63
vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
generated
vendored
Normal file
63
vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Package ssh provides the connection helper for ssh:// URL.
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseURL parses URL
|
||||
func ParseURL(daemonURL string) (*Spec, error) {
|
||||
u, err := url.Parse(daemonURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme != "ssh" {
|
||||
return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme)
|
||||
}
|
||||
|
||||
var sp Spec
|
||||
|
||||
if u.User != nil {
|
||||
sp.User = u.User.Username()
|
||||
if _, ok := u.User.Password(); ok {
|
||||
return nil, errors.New("plain-text password is not supported")
|
||||
}
|
||||
}
|
||||
sp.Host = u.Hostname()
|
||||
if sp.Host == "" {
|
||||
return nil, errors.Errorf("no host specified")
|
||||
}
|
||||
sp.Port = u.Port()
|
||||
if u.Path != "" {
|
||||
return nil, errors.Errorf("extra path after the host: %q", u.Path)
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
return nil, errors.Errorf("extra query after the host: %q", u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment)
|
||||
}
|
||||
return &sp, err
|
||||
}
|
||||
|
||||
// Spec of SSH URL
|
||||
type Spec struct {
|
||||
User string
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
// Args returns args except "ssh" itself and "-- ..."
|
||||
func (sp *Spec) Args() []string {
|
||||
var args []string
|
||||
if sp.User != "" {
|
||||
args = append(args, "-l", sp.User)
|
||||
}
|
||||
if sp.Port != "" {
|
||||
args = append(args, "-p", sp.Port)
|
||||
}
|
||||
args = append(args, sp.Host)
|
||||
return args
|
||||
}
|
6
vendor/github.com/docker/cli/cli/context/docker/constants.go
generated
vendored
Normal file
6
vendor/github.com/docker/cli/cli/context/docker/constants.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
package docker
|
||||
|
||||
const (
|
||||
// DockerEndpoint is the name of the docker endpoint in a stored context
|
||||
DockerEndpoint = "docker"
|
||||
)
|
166
vendor/github.com/docker/cli/cli/context/docker/load.go
generated
vendored
Normal file
166
vendor/github.com/docker/cli/cli/context/docker/load.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// EndpointMeta is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Docker Engine endpoint, without its tls config
|
||||
type EndpointMeta = context.EndpointMetaBase
|
||||
|
||||
// Endpoint is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Docker Engine endpoint, with its tls data
|
||||
type Endpoint struct {
|
||||
EndpointMeta
|
||||
TLSData *context.TLSData
|
||||
TLSPassword string
|
||||
}
|
||||
|
||||
// WithTLSData loads TLS materials for the endpoint
|
||||
func WithTLSData(s store.Store, contextName string, m EndpointMeta) (Endpoint, error) {
|
||||
tlsData, err := context.LoadTLSData(s, contextName, DockerEndpoint)
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: m,
|
||||
TLSData: tlsData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tlsConfig extracts a context docker endpoint TLS config
|
||||
func (c *Endpoint) tlsConfig() (*tls.Config, error) {
|
||||
if c.TLSData == nil && !c.SkipTLSVerify {
|
||||
// there is no specific tls config
|
||||
return nil, nil
|
||||
}
|
||||
var tlsOpts []func(*tls.Config)
|
||||
if c.TLSData != nil && c.TLSData.CA != nil {
|
||||
certPool := x509.NewCertPool()
|
||||
if !certPool.AppendCertsFromPEM(c.TLSData.CA) {
|
||||
return nil, errors.New("failed to retrieve context tls info: ca.pem seems invalid")
|
||||
}
|
||||
tlsOpts = append(tlsOpts, func(cfg *tls.Config) {
|
||||
cfg.RootCAs = certPool
|
||||
})
|
||||
}
|
||||
if c.TLSData != nil && c.TLSData.Key != nil && c.TLSData.Cert != nil {
|
||||
keyBytes := c.TLSData.Key
|
||||
pemBlock, _ := pem.Decode(keyBytes)
|
||||
if pemBlock == nil {
|
||||
return nil, fmt.Errorf("no valid private key found")
|
||||
}
|
||||
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(c.TLSPassword))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
|
||||
}
|
||||
keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
|
||||
}
|
||||
|
||||
x509cert, err := tls.X509KeyPair(c.TLSData.Cert, keyBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to retrieve context tls info")
|
||||
}
|
||||
tlsOpts = append(tlsOpts, func(cfg *tls.Config) {
|
||||
cfg.Certificates = []tls.Certificate{x509cert}
|
||||
})
|
||||
}
|
||||
if c.SkipTLSVerify {
|
||||
tlsOpts = append(tlsOpts, func(cfg *tls.Config) {
|
||||
cfg.InsecureSkipVerify = true
|
||||
})
|
||||
}
|
||||
return tlsconfig.ClientDefault(tlsOpts...), nil
|
||||
}
|
||||
|
||||
// ClientOpts returns a slice of Client options to configure an API client with this endpoint
|
||||
func (c *Endpoint) ClientOpts() ([]func(*client.Client) error, error) {
|
||||
var result []func(*client.Client) error
|
||||
if c.Host != "" {
|
||||
helper, err := connhelper.GetConnectionHelper(c.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helper == nil {
|
||||
tlsConfig, err := c.tlsConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result,
|
||||
client.WithHost(c.Host),
|
||||
withHTTPClient(tlsConfig),
|
||||
)
|
||||
|
||||
} else {
|
||||
httpClient := &http.Client{
|
||||
// No tls
|
||||
// No proxy
|
||||
Transport: &http.Transport{
|
||||
DialContext: helper.Dialer,
|
||||
},
|
||||
}
|
||||
result = append(result,
|
||||
client.WithHTTPClient(httpClient),
|
||||
client.WithHost(helper.Host),
|
||||
client.WithDialContext(helper.Dialer),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
version := os.Getenv("DOCKER_API_VERSION")
|
||||
if version != "" {
|
||||
result = append(result, client.WithVersion(version))
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func withHTTPClient(tlsConfig *tls.Config) func(*client.Client) error {
|
||||
return func(c *client.Client) error {
|
||||
if tlsConfig == nil {
|
||||
// Use the default HTTPClient
|
||||
return nil
|
||||
}
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
DialContext: (&net.Dialer{
|
||||
KeepAlive: 30 * time.Second,
|
||||
Timeout: 30 * time.Second,
|
||||
}).DialContext,
|
||||
},
|
||||
CheckRedirect: client.CheckRedirect,
|
||||
}
|
||||
return client.WithHTTPClient(httpClient)(c)
|
||||
}
|
||||
}
|
||||
|
||||
// EndpointFromContext parses a context docker endpoint metadata into a typed EndpointMeta structure
|
||||
func EndpointFromContext(metadata store.ContextMetadata) (EndpointMeta, error) {
|
||||
ep, ok := metadata.Endpoints[DockerEndpoint]
|
||||
if !ok {
|
||||
return EndpointMeta{}, errors.New("cannot find docker endpoint in context")
|
||||
}
|
||||
typed, ok := ep.(EndpointMeta)
|
||||
if !ok {
|
||||
return EndpointMeta{}, errors.Errorf("endpoint %q is not of type EndpointMeta", DockerEndpoint)
|
||||
}
|
||||
return typed, nil
|
||||
}
|
7
vendor/github.com/docker/cli/cli/context/endpoint.go
generated
vendored
Normal file
7
vendor/github.com/docker/cli/cli/context/endpoint.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package context
|
||||
|
||||
// EndpointMetaBase contains fields we expect to be common for most context endpoints
|
||||
type EndpointMetaBase struct {
|
||||
Host string `json:",omitempty"`
|
||||
SkipTLSVerify bool
|
||||
}
|
6
vendor/github.com/docker/cli/cli/context/kubernetes/constants.go
generated
vendored
Normal file
6
vendor/github.com/docker/cli/cli/context/kubernetes/constants.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
package kubernetes
|
||||
|
||||
const (
|
||||
// KubernetesEndpoint is the kubernetes endpoint name in a stored context
|
||||
KubernetesEndpoint = "kubernetes"
|
||||
)
|
95
vendor/github.com/docker/cli/cli/context/kubernetes/load.go
generated
vendored
Normal file
95
vendor/github.com/docker/cli/cli/context/kubernetes/load.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// EndpointMeta is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Kubernetes endpoint, without TLS data
|
||||
type EndpointMeta struct {
|
||||
context.EndpointMetaBase
|
||||
DefaultNamespace string `json:",omitempty"`
|
||||
AuthProvider *clientcmdapi.AuthProviderConfig `json:",omitempty"`
|
||||
Exec *clientcmdapi.ExecConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Endpoint is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Kubernetes endpoint, with TLS data
|
||||
type Endpoint struct {
|
||||
EndpointMeta
|
||||
TLSData *context.TLSData
|
||||
}
|
||||
|
||||
// WithTLSData loads TLS materials for the endpoint
|
||||
func (c *EndpointMeta) WithTLSData(s store.Store, contextName string) (Endpoint, error) {
|
||||
tlsData, err := context.LoadTLSData(s, contextName, KubernetesEndpoint)
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: *c,
|
||||
TLSData: tlsData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// KubernetesConfig creates the kubernetes client config from the endpoint
|
||||
func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = c.Host
|
||||
cluster.InsecureSkipTLSVerify = c.SkipTLSVerify
|
||||
authInfo := clientcmdapi.NewAuthInfo()
|
||||
if c.TLSData != nil {
|
||||
cluster.CertificateAuthorityData = c.TLSData.CA
|
||||
authInfo.ClientCertificateData = c.TLSData.Cert
|
||||
authInfo.ClientKeyData = c.TLSData.Key
|
||||
}
|
||||
authInfo.AuthProvider = c.AuthProvider
|
||||
authInfo.Exec = c.Exec
|
||||
cfg.Clusters["cluster"] = cluster
|
||||
cfg.AuthInfos["authInfo"] = authInfo
|
||||
ctx := clientcmdapi.NewContext()
|
||||
ctx.AuthInfo = "authInfo"
|
||||
ctx.Cluster = "cluster"
|
||||
ctx.Namespace = c.DefaultNamespace
|
||||
cfg.Contexts["context"] = ctx
|
||||
cfg.CurrentContext = "context"
|
||||
return clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
}
|
||||
|
||||
// EndpointFromContext extracts kubernetes endpoint info from current context
|
||||
func EndpointFromContext(metadata store.ContextMetadata) *EndpointMeta {
|
||||
ep, ok := metadata.Endpoints[KubernetesEndpoint]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
typed, ok := ep.(EndpointMeta)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &typed
|
||||
}
|
||||
|
||||
// ConfigFromContext resolves a kubernetes client config for the specified context.
|
||||
// If kubeconfigOverride is specified, use this config file instead of the context defaults.ConfigFromContext
|
||||
// if command.ContextDockerHost is specified as the context name, fallsback to the default user's kubeconfig file
|
||||
func ConfigFromContext(name string, s store.Store) (clientcmd.ClientConfig, error) {
|
||||
ctxMeta, err := s.GetContextMetadata(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epMeta := EndpointFromContext(ctxMeta)
|
||||
if epMeta != nil {
|
||||
ep, err := epMeta.WithTLSData(s, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ep.KubernetesConfig(), nil
|
||||
}
|
||||
// context has no kubernetes endpoint
|
||||
return kubernetes.NewKubernetesConfig(""), nil
|
||||
}
|
61
vendor/github.com/docker/cli/cli/context/kubernetes/save.go
generated
vendored
Normal file
61
vendor/github.com/docker/cli/cli/context/kubernetes/save.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/cli/cli/context"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// FromKubeConfig creates a Kubernetes endpoint from a Kubeconfig file
|
||||
func FromKubeConfig(kubeconfig, kubeContext, namespaceOverride string) (Endpoint, error) {
|
||||
cfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
|
||||
&clientcmd.ConfigOverrides{CurrentContext: kubeContext, Context: clientcmdapi.Context{Namespace: namespaceOverride}})
|
||||
ns, _, err := cfg.Namespace()
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
clientcfg, err := cfg.ClientConfig()
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
var ca, key, cert []byte
|
||||
if ca, err = readFileOrDefault(clientcfg.CAFile, clientcfg.CAData); err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
if key, err = readFileOrDefault(clientcfg.KeyFile, clientcfg.KeyData); err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
if cert, err = readFileOrDefault(clientcfg.CertFile, clientcfg.CertData); err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
var tlsData *context.TLSData
|
||||
if ca != nil || cert != nil || key != nil {
|
||||
tlsData = &context.TLSData{
|
||||
CA: ca,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: EndpointMeta{
|
||||
EndpointMetaBase: context.EndpointMetaBase{
|
||||
Host: clientcfg.Host,
|
||||
SkipTLSVerify: clientcfg.Insecure,
|
||||
},
|
||||
DefaultNamespace: ns,
|
||||
AuthProvider: clientcfg.AuthProvider,
|
||||
Exec: clientcfg.ExecProvider,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readFileOrDefault(path string, defaultValue []byte) ([]byte, error) {
|
||||
if path != "" {
|
||||
return ioutil.ReadFile(path)
|
||||
}
|
||||
return defaultValue, nil
|
||||
}
|
22
vendor/github.com/docker/cli/cli/context/store/doc.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/cli/context/store/doc.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// Package store provides a generic way to store credentials to connect to virtually any kind of remote system.
|
||||
// The term `context` comes from the similar feature in Kubernetes kubectl config files.
|
||||
//
|
||||
// Conceptually, a context is a set of metadata and TLS data, that can be used to connect to various endpoints
|
||||
// of a remote system. TLS data and metadata are stored separately, so that in the future, we will be able to store sensitive
|
||||
// information in a more secure way, depending on the os we are running on (e.g.: on Windows we could use the user Certificate Store, on Mac OS the user Keychain...).
|
||||
//
|
||||
// Current implementation is purely file based with the following structure:
|
||||
// ${CONTEXT_ROOT}
|
||||
// - meta/
|
||||
// - <context id>/meta.json: contains context medata (key/value pairs) as well as a list of endpoints (themselves containing key/value pair metadata)
|
||||
// - tls/
|
||||
// - <context id>/endpoint1/: directory containing TLS data for the endpoint1 in the corresponding context
|
||||
//
|
||||
// The context store itself has absolutely no knowledge about what a docker or a kubernetes endpoint should contain in term of metadata or TLS config.
|
||||
// Client code is responsible for generating and parsing endpoint metadata and TLS files.
|
||||
// The multi-endpoints approach of this package allows to combine many different endpoints in the same "context" (e.g., the Docker CLI
|
||||
// is able for a single context to define both a docker endpoint and a Kubernetes endpoint for the same cluster, and also specify which
|
||||
// orchestrator to use by default when deploying a compose stack on this cluster).
|
||||
//
|
||||
// Context IDs are actually SHA256 hashes of the context name, and are there only to avoid dealing with special characters in context names.
|
||||
package store
|
153
vendor/github.com/docker/cli/cli/context/store/metadatastore.go
generated
vendored
Normal file
153
vendor/github.com/docker/cli/cli/context/store/metadatastore.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"vbom.ml/util/sortorder"
|
||||
)
|
||||
|
||||
const (
|
||||
metadataDir = "meta"
|
||||
metaFile = "meta.json"
|
||||
)
|
||||
|
||||
type metadataStore struct {
|
||||
root string
|
||||
config Config
|
||||
}
|
||||
|
||||
func (s *metadataStore) contextDir(id contextdir) string {
|
||||
return filepath.Join(s.root, string(id))
|
||||
}
|
||||
|
||||
func (s *metadataStore) createOrUpdate(meta ContextMetadata) error {
|
||||
contextDir := s.contextDir(contextdirOf(meta.Name))
|
||||
if err := os.MkdirAll(contextDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
bytes, err := json.Marshal(&meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(filepath.Join(contextDir, metaFile), bytes, 0644)
|
||||
}
|
||||
|
||||
func parseTypedOrMap(payload []byte, getter TypeGetter) (interface{}, error) {
|
||||
if len(payload) == 0 || string(payload) == "null" {
|
||||
return nil, nil
|
||||
}
|
||||
if getter == nil {
|
||||
var res map[string]interface{}
|
||||
if err := json.Unmarshal(payload, &res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
typed := getter()
|
||||
if err := json.Unmarshal(payload, typed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reflect.ValueOf(typed).Elem().Interface(), nil
|
||||
}
|
||||
|
||||
func (s *metadataStore) get(id contextdir) (ContextMetadata, error) {
|
||||
contextDir := s.contextDir(id)
|
||||
bytes, err := ioutil.ReadFile(filepath.Join(contextDir, metaFile))
|
||||
if err != nil {
|
||||
return ContextMetadata{}, convertContextDoesNotExist(err)
|
||||
}
|
||||
var untyped untypedContextMetadata
|
||||
r := ContextMetadata{
|
||||
Endpoints: make(map[string]interface{}),
|
||||
}
|
||||
if err := json.Unmarshal(bytes, &untyped); err != nil {
|
||||
return ContextMetadata{}, err
|
||||
}
|
||||
r.Name = untyped.Name
|
||||
if r.Metadata, err = parseTypedOrMap(untyped.Metadata, s.config.contextType); err != nil {
|
||||
return ContextMetadata{}, err
|
||||
}
|
||||
for k, v := range untyped.Endpoints {
|
||||
if r.Endpoints[k], err = parseTypedOrMap(v, s.config.endpointTypes[k]); err != nil {
|
||||
return ContextMetadata{}, err
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (s *metadataStore) remove(id contextdir) error {
|
||||
contextDir := s.contextDir(id)
|
||||
return os.RemoveAll(contextDir)
|
||||
}
|
||||
|
||||
func (s *metadataStore) list() ([]ContextMetadata, error) {
|
||||
ctxDirs, err := listRecursivelyMetadataDirs(s.root)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var res []ContextMetadata
|
||||
for _, dir := range ctxDirs {
|
||||
c, err := s.get(contextdir(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, c)
|
||||
}
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return sortorder.NaturalLess(res[i].Name, res[j].Name)
|
||||
})
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func isContextDir(path string) bool {
|
||||
s, err := os.Stat(filepath.Join(path, metaFile))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return !s.IsDir()
|
||||
}
|
||||
|
||||
func listRecursivelyMetadataDirs(root string) ([]string, error) {
|
||||
fis, err := ioutil.ReadDir(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var result []string
|
||||
for _, fi := range fis {
|
||||
if fi.IsDir() {
|
||||
if isContextDir(filepath.Join(root, fi.Name())) {
|
||||
result = append(result, fi.Name())
|
||||
}
|
||||
subs, err := listRecursivelyMetadataDirs(filepath.Join(root, fi.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range subs {
|
||||
result = append(result, fmt.Sprintf("%s/%s", fi.Name(), s))
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func convertContextDoesNotExist(err error) error {
|
||||
if os.IsNotExist(err) {
|
||||
return &contextDoesNotExistError{}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type untypedContextMetadata struct {
|
||||
Metadata json.RawMessage `json:"metadata,omitempty"`
|
||||
Endpoints map[string]json.RawMessage `json:"endpoints,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
349
vendor/github.com/docker/cli/cli/context/store/store.go
generated
vendored
Normal file
349
vendor/github.com/docker/cli/cli/context/store/store.go
generated
vendored
Normal file
@ -0,0 +1,349 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
_ "crypto/sha256" // ensure ids can be computed
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Store provides a context store for easily remembering endpoints configuration
|
||||
type Store interface {
|
||||
ListContexts() ([]ContextMetadata, error)
|
||||
CreateOrUpdateContext(meta ContextMetadata) error
|
||||
RemoveContext(name string) error
|
||||
GetContextMetadata(name string) (ContextMetadata, error)
|
||||
ResetContextTLSMaterial(name string, data *ContextTLSData) error
|
||||
ResetContextEndpointTLSMaterial(contextName string, endpointName string, data *EndpointTLSData) error
|
||||
ListContextTLSFiles(name string) (map[string]EndpointFiles, error)
|
||||
GetContextTLSData(contextName, endpointName, fileName string) ([]byte, error)
|
||||
GetContextStorageInfo(contextName string) ContextStorageInfo
|
||||
}
|
||||
|
||||
// ContextMetadata contains metadata about a context and its endpoints
|
||||
type ContextMetadata struct {
|
||||
Name string `json:",omitempty"`
|
||||
Metadata interface{} `json:",omitempty"`
|
||||
Endpoints map[string]interface{} `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ContextStorageInfo contains data about where a given context is stored
|
||||
type ContextStorageInfo struct {
|
||||
MetadataPath string
|
||||
TLSPath string
|
||||
}
|
||||
|
||||
// EndpointTLSData represents tls data for a given endpoint
|
||||
type EndpointTLSData struct {
|
||||
Files map[string][]byte
|
||||
}
|
||||
|
||||
// ContextTLSData represents tls data for a whole context
|
||||
type ContextTLSData struct {
|
||||
Endpoints map[string]EndpointTLSData
|
||||
}
|
||||
|
||||
// New creates a store from a given directory.
|
||||
// If the directory does not exist or is empty, initialize it
|
||||
func New(dir string, cfg Config) Store {
|
||||
metaRoot := filepath.Join(dir, metadataDir)
|
||||
tlsRoot := filepath.Join(dir, tlsDir)
|
||||
|
||||
return &store{
|
||||
meta: &metadataStore{
|
||||
root: metaRoot,
|
||||
config: cfg,
|
||||
},
|
||||
tls: &tlsStore{
|
||||
root: tlsRoot,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type store struct {
|
||||
meta *metadataStore
|
||||
tls *tlsStore
|
||||
}
|
||||
|
||||
func (s *store) ListContexts() ([]ContextMetadata, error) {
|
||||
return s.meta.list()
|
||||
}
|
||||
|
||||
func (s *store) CreateOrUpdateContext(meta ContextMetadata) error {
|
||||
return s.meta.createOrUpdate(meta)
|
||||
}
|
||||
|
||||
func (s *store) RemoveContext(name string) error {
|
||||
id := contextdirOf(name)
|
||||
if err := s.meta.remove(id); err != nil {
|
||||
return patchErrContextName(err, name)
|
||||
}
|
||||
return patchErrContextName(s.tls.removeAllContextData(id), name)
|
||||
}
|
||||
|
||||
func (s *store) GetContextMetadata(name string) (ContextMetadata, error) {
|
||||
res, err := s.meta.get(contextdirOf(name))
|
||||
patchErrContextName(err, name)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (s *store) ResetContextTLSMaterial(name string, data *ContextTLSData) error {
|
||||
id := contextdirOf(name)
|
||||
if err := s.tls.removeAllContextData(id); err != nil {
|
||||
return patchErrContextName(err, name)
|
||||
}
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
for ep, files := range data.Endpoints {
|
||||
for fileName, data := range files.Files {
|
||||
if err := s.tls.createOrUpdate(id, ep, fileName, data); err != nil {
|
||||
return patchErrContextName(err, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) ResetContextEndpointTLSMaterial(contextName string, endpointName string, data *EndpointTLSData) error {
|
||||
id := contextdirOf(contextName)
|
||||
if err := s.tls.removeAllEndpointData(id, endpointName); err != nil {
|
||||
return patchErrContextName(err, contextName)
|
||||
}
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
for fileName, data := range data.Files {
|
||||
if err := s.tls.createOrUpdate(id, endpointName, fileName, data); err != nil {
|
||||
return patchErrContextName(err, contextName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) ListContextTLSFiles(name string) (map[string]EndpointFiles, error) {
|
||||
res, err := s.tls.listContextData(contextdirOf(name))
|
||||
return res, patchErrContextName(err, name)
|
||||
}
|
||||
|
||||
func (s *store) GetContextTLSData(contextName, endpointName, fileName string) ([]byte, error) {
|
||||
res, err := s.tls.getData(contextdirOf(contextName), endpointName, fileName)
|
||||
return res, patchErrContextName(err, contextName)
|
||||
}
|
||||
|
||||
func (s *store) GetContextStorageInfo(contextName string) ContextStorageInfo {
|
||||
dir := contextdirOf(contextName)
|
||||
return ContextStorageInfo{
|
||||
MetadataPath: s.meta.contextDir(dir),
|
||||
TLSPath: s.tls.contextDir(dir),
|
||||
}
|
||||
}
|
||||
|
||||
// Export exports an existing namespace into an opaque data stream
|
||||
// This stream is actually a tarball containing context metadata and TLS materials, but it does
|
||||
// not map 1:1 the layout of the context store (don't try to restore it manually without calling store.Import)
|
||||
func Export(name string, s Store) io.ReadCloser {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
tw := tar.NewWriter(writer)
|
||||
defer tw.Close()
|
||||
defer writer.Close()
|
||||
meta, err := s.GetContextMetadata(name)
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
metaBytes, err := json.Marshal(&meta)
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err = tw.WriteHeader(&tar.Header{
|
||||
Name: metaFile,
|
||||
Mode: 0644,
|
||||
Size: int64(len(metaBytes)),
|
||||
}); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err = tw.Write(metaBytes); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
tlsFiles, err := s.ListContextTLSFiles(name)
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err = tw.WriteHeader(&tar.Header{
|
||||
Name: "tls",
|
||||
Mode: 0700,
|
||||
Size: 0,
|
||||
Typeflag: tar.TypeDir,
|
||||
}); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
for endpointName, endpointFiles := range tlsFiles {
|
||||
if err = tw.WriteHeader(&tar.Header{
|
||||
Name: path.Join("tls", endpointName),
|
||||
Mode: 0700,
|
||||
Size: 0,
|
||||
Typeflag: tar.TypeDir,
|
||||
}); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
for _, fileName := range endpointFiles {
|
||||
data, err := s.GetContextTLSData(name, endpointName, fileName)
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err = tw.WriteHeader(&tar.Header{
|
||||
Name: path.Join("tls", endpointName, fileName),
|
||||
Mode: 0600,
|
||||
Size: int64(len(data)),
|
||||
}); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err = tw.Write(data); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return reader
|
||||
}
|
||||
|
||||
// Import imports an exported context into a store
|
||||
func Import(name string, s Store, reader io.Reader) error {
|
||||
tr := tar.NewReader(reader)
|
||||
tlsData := ContextTLSData{
|
||||
Endpoints: map[string]EndpointTLSData{},
|
||||
}
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
// skip this entry, only taking files into account
|
||||
continue
|
||||
}
|
||||
if hdr.Name == metaFile {
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var meta ContextMetadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return err
|
||||
}
|
||||
meta.Name = name
|
||||
if err := s.CreateOrUpdateContext(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if strings.HasPrefix(hdr.Name, "tls/") {
|
||||
relative := strings.TrimPrefix(hdr.Name, "tls/")
|
||||
parts := strings.SplitN(relative, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return errors.New("archive format is invalid")
|
||||
}
|
||||
endpointName := parts[0]
|
||||
fileName := parts[1]
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := tlsData.Endpoints[endpointName]; !ok {
|
||||
tlsData.Endpoints[endpointName] = EndpointTLSData{
|
||||
Files: map[string][]byte{},
|
||||
}
|
||||
}
|
||||
tlsData.Endpoints[endpointName].Files[fileName] = data
|
||||
}
|
||||
}
|
||||
return s.ResetContextTLSMaterial(name, &tlsData)
|
||||
}
|
||||
|
||||
type setContextName interface {
|
||||
setContext(name string)
|
||||
}
|
||||
|
||||
type contextDoesNotExistError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (e *contextDoesNotExistError) Error() string {
|
||||
return fmt.Sprintf("context %q does not exist", e.name)
|
||||
}
|
||||
|
||||
func (e *contextDoesNotExistError) setContext(name string) {
|
||||
e.name = name
|
||||
}
|
||||
|
||||
// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound
|
||||
func (e *contextDoesNotExistError) NotFound() {}
|
||||
|
||||
type tlsDataDoesNotExist interface {
|
||||
errdefs.ErrNotFound
|
||||
IsTLSDataDoesNotExist()
|
||||
}
|
||||
|
||||
type tlsDataDoesNotExistError struct {
|
||||
context, endpoint, file string
|
||||
}
|
||||
|
||||
func (e *tlsDataDoesNotExistError) Error() string {
|
||||
return fmt.Sprintf("tls data for %s/%s/%s does not exist", e.context, e.endpoint, e.file)
|
||||
}
|
||||
|
||||
func (e *tlsDataDoesNotExistError) setContext(name string) {
|
||||
e.context = name
|
||||
}
|
||||
|
||||
// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound
|
||||
func (e *tlsDataDoesNotExistError) NotFound() {}
|
||||
|
||||
// IsTLSDataDoesNotExist satisfies tlsDataDoesNotExist
|
||||
func (e *tlsDataDoesNotExistError) IsTLSDataDoesNotExist() {}
|
||||
|
||||
// IsErrContextDoesNotExist checks if the given error is a "context does not exist" condition
|
||||
func IsErrContextDoesNotExist(err error) bool {
|
||||
_, ok := err.(*contextDoesNotExistError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsErrTLSDataDoesNotExist checks if the given error is a "context does not exist" condition
|
||||
func IsErrTLSDataDoesNotExist(err error) bool {
|
||||
_, ok := err.(tlsDataDoesNotExist)
|
||||
return ok
|
||||
}
|
||||
|
||||
type contextdir string
|
||||
|
||||
func contextdirOf(name string) contextdir {
|
||||
return contextdir(digest.FromString(name).Encoded())
|
||||
}
|
||||
|
||||
func patchErrContextName(err error, name string) error {
|
||||
if typed, ok := err.(setContextName); ok {
|
||||
typed.setContext(name)
|
||||
}
|
||||
return err
|
||||
}
|
43
vendor/github.com/docker/cli/cli/context/store/storeconfig.go
generated
vendored
Normal file
43
vendor/github.com/docker/cli/cli/context/store/storeconfig.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package store
|
||||
|
||||
// TypeGetter is a func used to determine the concrete type of a context or
|
||||
// endpoint metadata by returning a pointer to an instance of the object
|
||||
// eg: for a context of type DockerContext, the corresponding TypeGetter should return new(DockerContext)
|
||||
type TypeGetter func() interface{}
|
||||
|
||||
// NamedTypeGetter is a TypeGetter associated with a name
|
||||
type NamedTypeGetter struct {
|
||||
name string
|
||||
typeGetter TypeGetter
|
||||
}
|
||||
|
||||
// EndpointTypeGetter returns a NamedTypeGetter with the spcecified name and getter
|
||||
func EndpointTypeGetter(name string, getter TypeGetter) NamedTypeGetter {
|
||||
return NamedTypeGetter{
|
||||
name: name,
|
||||
typeGetter: getter,
|
||||
}
|
||||
}
|
||||
|
||||
// Config is used to configure the metadata marshaler of the context store
|
||||
type Config struct {
|
||||
contextType TypeGetter
|
||||
endpointTypes map[string]TypeGetter
|
||||
}
|
||||
|
||||
// SetEndpoint set an endpoint typing information
|
||||
func (c Config) SetEndpoint(name string, getter TypeGetter) {
|
||||
c.endpointTypes[name] = getter
|
||||
}
|
||||
|
||||
// NewConfig creates a config object
|
||||
func NewConfig(contextType TypeGetter, endpoints ...NamedTypeGetter) Config {
|
||||
res := Config{
|
||||
contextType: contextType,
|
||||
endpointTypes: make(map[string]TypeGetter),
|
||||
}
|
||||
for _, e := range endpoints {
|
||||
res.endpointTypes[e.name] = e.typeGetter
|
||||
}
|
||||
return res
|
||||
}
|
99
vendor/github.com/docker/cli/cli/context/store/tlsstore.go
generated
vendored
Normal file
99
vendor/github.com/docker/cli/cli/context/store/tlsstore.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const tlsDir = "tls"
|
||||
|
||||
type tlsStore struct {
|
||||
root string
|
||||
}
|
||||
|
||||
func (s *tlsStore) contextDir(id contextdir) string {
|
||||
return filepath.Join(s.root, string(id))
|
||||
}
|
||||
|
||||
func (s *tlsStore) endpointDir(contextID contextdir, name string) string {
|
||||
return filepath.Join(s.root, string(contextID), name)
|
||||
}
|
||||
|
||||
func (s *tlsStore) filePath(contextID contextdir, endpointName, filename string) string {
|
||||
return filepath.Join(s.root, string(contextID), endpointName, filename)
|
||||
}
|
||||
|
||||
func (s *tlsStore) createOrUpdate(contextID contextdir, endpointName, filename string, data []byte) error {
|
||||
epdir := s.endpointDir(contextID, endpointName)
|
||||
parentOfRoot := filepath.Dir(s.root)
|
||||
if err := os.MkdirAll(parentOfRoot, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(epdir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(s.filePath(contextID, endpointName, filename), data, 0600)
|
||||
}
|
||||
|
||||
func (s *tlsStore) getData(contextID contextdir, endpointName, filename string) ([]byte, error) {
|
||||
data, err := ioutil.ReadFile(s.filePath(contextID, endpointName, filename))
|
||||
if err != nil {
|
||||
return nil, convertTLSDataDoesNotExist(endpointName, filename, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (s *tlsStore) remove(contextID contextdir, endpointName, filename string) error {
|
||||
err := os.Remove(s.filePath(contextID, endpointName, filename))
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *tlsStore) removeAllEndpointData(contextID contextdir, endpointName string) error {
|
||||
return os.RemoveAll(s.endpointDir(contextID, endpointName))
|
||||
}
|
||||
|
||||
func (s *tlsStore) removeAllContextData(contextID contextdir) error {
|
||||
return os.RemoveAll(s.contextDir(contextID))
|
||||
}
|
||||
|
||||
func (s *tlsStore) listContextData(contextID contextdir) (map[string]EndpointFiles, error) {
|
||||
epFSs, err := ioutil.ReadDir(s.contextDir(contextID))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return map[string]EndpointFiles{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
r := make(map[string]EndpointFiles)
|
||||
for _, epFS := range epFSs {
|
||||
if epFS.IsDir() {
|
||||
epDir := s.endpointDir(contextID, epFS.Name())
|
||||
fss, err := ioutil.ReadDir(epDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files EndpointFiles
|
||||
for _, fs := range fss {
|
||||
if !fs.IsDir() {
|
||||
files = append(files, fs.Name())
|
||||
}
|
||||
}
|
||||
r[epFS.Name()] = files
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// EndpointFiles is a slice of strings representing file names
|
||||
type EndpointFiles []string
|
||||
|
||||
func convertTLSDataDoesNotExist(endpoint, file string, err error) error {
|
||||
if os.IsNotExist(err) {
|
||||
return &tlsDataDoesNotExistError{endpoint: endpoint, file: file}
|
||||
}
|
||||
return err
|
||||
}
|
98
vendor/github.com/docker/cli/cli/context/tlsdata.go
generated
vendored
Normal file
98
vendor/github.com/docker/cli/cli/context/tlsdata.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
caKey = "ca.pem"
|
||||
certKey = "cert.pem"
|
||||
keyKey = "key.pem"
|
||||
)
|
||||
|
||||
// TLSData holds ca/cert/key raw data
|
||||
type TLSData struct {
|
||||
CA []byte
|
||||
Key []byte
|
||||
Cert []byte
|
||||
}
|
||||
|
||||
// ToStoreTLSData converts TLSData to the store representation
|
||||
func (data *TLSData) ToStoreTLSData() *store.EndpointTLSData {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
result := store.EndpointTLSData{
|
||||
Files: make(map[string][]byte),
|
||||
}
|
||||
if data.CA != nil {
|
||||
result.Files[caKey] = data.CA
|
||||
}
|
||||
if data.Cert != nil {
|
||||
result.Files[certKey] = data.Cert
|
||||
}
|
||||
if data.Key != nil {
|
||||
result.Files[keyKey] = data.Key
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// LoadTLSData loads TLS data from the store
|
||||
func LoadTLSData(s store.Store, contextName, endpointName string) (*TLSData, error) {
|
||||
tlsFiles, err := s.ListContextTLSFiles(contextName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to retrieve context tls files for context %q", contextName)
|
||||
}
|
||||
if epTLSFiles, ok := tlsFiles[endpointName]; ok {
|
||||
var tlsData TLSData
|
||||
for _, f := range epTLSFiles {
|
||||
data, err := s.GetContextTLSData(contextName, endpointName, f)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to retrieve context tls data for file %q of context %q", f, contextName)
|
||||
}
|
||||
switch f {
|
||||
case caKey:
|
||||
tlsData.CA = data
|
||||
case certKey:
|
||||
tlsData.Cert = data
|
||||
case keyKey:
|
||||
tlsData.Key = data
|
||||
default:
|
||||
logrus.Warnf("unknown file %s in context %s tls bundle", f, contextName)
|
||||
}
|
||||
}
|
||||
return &tlsData, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// TLSDataFromFiles reads files into a TLSData struct (or returns nil if all paths are empty)
|
||||
func TLSDataFromFiles(caPath, certPath, keyPath string) (*TLSData, error) {
|
||||
var (
|
||||
ca, cert, key []byte
|
||||
err error
|
||||
)
|
||||
if caPath != "" {
|
||||
if ca, err = ioutil.ReadFile(caPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if certPath != "" {
|
||||
if cert, err = ioutil.ReadFile(certPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if keyPath != "" {
|
||||
if key, err = ioutil.ReadFile(keyPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ca == nil && cert == nil && key == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &TLSData{CA: ca, Cert: cert, Key: key}, nil
|
||||
}
|
26
vendor/github.com/docker/cli/cli/debug/debug.go
generated
vendored
Normal file
26
vendor/github.com/docker/cli/cli/debug/debug.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Enable sets the DEBUG env var to true
|
||||
// and makes the logger to log at debug level.
|
||||
func Enable() {
|
||||
os.Setenv("DEBUG", "1")
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
// Disable sets the DEBUG env var to false
|
||||
// and makes the logger to log at info level.
|
||||
func Disable() {
|
||||
os.Setenv("DEBUG", "")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
// IsEnabled checks whether the debug flag is set or not.
|
||||
func IsEnabled() bool {
|
||||
return os.Getenv("DEBUG") != ""
|
||||
}
|
33
vendor/github.com/docker/cli/cli/error.go
generated
vendored
Normal file
33
vendor/github.com/docker/cli/cli/error.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Errors is a list of errors.
|
||||
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
|
||||
// all the errors that happened during the loop.
|
||||
type Errors []error
|
||||
|
||||
func (errList Errors) Error() string {
|
||||
if len(errList) < 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
out := make([]string, len(errList))
|
||||
for i := range errList {
|
||||
out[i] = errList[i].Error()
|
||||
}
|
||||
return strings.Join(out, ", ")
|
||||
}
|
||||
|
||||
// StatusError reports an unsuccessful exit by a command.
|
||||
type StatusError struct {
|
||||
Status string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (e StatusError) Error() string {
|
||||
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
|
||||
}
|
12
vendor/github.com/docker/cli/cli/flags/client.go
generated
vendored
Normal file
12
vendor/github.com/docker/cli/cli/flags/client.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package flags
|
||||
|
||||
// ClientOptions are the options used to configure the client cli
|
||||
type ClientOptions struct {
|
||||
Common *CommonOptions
|
||||
ConfigDir string
|
||||
}
|
||||
|
||||
// NewClientOptions returns a new ClientOptions
|
||||
func NewClientOptions() *ClientOptions {
|
||||
return &ClientOptions{Common: NewCommonOptions()}
|
||||
}
|
122
vendor/github.com/docker/cli/cli/flags/common.go
generated
vendored
Normal file
122
vendor/github.com/docker/cli/cli/flags/common.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultCaFile is the default filename for the CA pem file
|
||||
DefaultCaFile = "ca.pem"
|
||||
// DefaultKeyFile is the default filename for the key pem file
|
||||
DefaultKeyFile = "key.pem"
|
||||
// DefaultCertFile is the default filename for the cert pem file
|
||||
DefaultCertFile = "cert.pem"
|
||||
// FlagTLSVerify is the flag name for the TLS verification option
|
||||
FlagTLSVerify = "tlsverify"
|
||||
)
|
||||
|
||||
var (
|
||||
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
|
||||
dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != ""
|
||||
dockerTLS = os.Getenv("DOCKER_TLS") != ""
|
||||
)
|
||||
|
||||
// CommonOptions are options common to both the client and the daemon.
|
||||
type CommonOptions struct {
|
||||
Debug bool
|
||||
Hosts []string
|
||||
LogLevel string
|
||||
TLS bool
|
||||
TLSVerify bool
|
||||
TLSOptions *tlsconfig.Options
|
||||
Context string
|
||||
}
|
||||
|
||||
// NewCommonOptions returns a new CommonOptions
|
||||
func NewCommonOptions() *CommonOptions {
|
||||
return &CommonOptions{}
|
||||
}
|
||||
|
||||
// InstallFlags adds flags for the common options on the FlagSet
|
||||
func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) {
|
||||
if dockerCertPath == "" {
|
||||
dockerCertPath = cliconfig.Dir()
|
||||
}
|
||||
|
||||
flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode")
|
||||
flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`)
|
||||
flags.BoolVar(&commonOpts.TLS, "tls", dockerTLS, "Use TLS; implied by --tlsverify")
|
||||
flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote")
|
||||
|
||||
// TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file")
|
||||
|
||||
commonOpts.TLSOptions = &tlsconfig.Options{
|
||||
CAFile: filepath.Join(dockerCertPath, DefaultCaFile),
|
||||
CertFile: filepath.Join(dockerCertPath, DefaultCertFile),
|
||||
KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile),
|
||||
}
|
||||
tlsOptions := commonOpts.TLSOptions
|
||||
flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA")
|
||||
flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file")
|
||||
flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file")
|
||||
|
||||
// opts.ValidateHost is not used here, so as to allow connection helpers
|
||||
hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, nil)
|
||||
flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to")
|
||||
flags.StringVarP(&commonOpts.Context, "context", "c", "",
|
||||
`Name of the context to use to connect to the daemon (overrides DOCKER_HOST env var and default context set with "docker context use")`)
|
||||
}
|
||||
|
||||
// SetDefaultOptions sets default values for options after flag parsing is
|
||||
// complete
|
||||
func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) {
|
||||
// Regardless of whether the user sets it to true or false, if they
|
||||
// specify --tlsverify at all then we need to turn on TLS
|
||||
// TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need
|
||||
// to check that here as well
|
||||
if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify {
|
||||
commonOpts.TLS = true
|
||||
}
|
||||
|
||||
if !commonOpts.TLS {
|
||||
commonOpts.TLSOptions = nil
|
||||
} else {
|
||||
tlsOptions := commonOpts.TLSOptions
|
||||
tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify
|
||||
|
||||
// Reset CertFile and KeyFile to empty string if the user did not specify
|
||||
// the respective flags and the respective default files were not found.
|
||||
if !flags.Changed("tlscert") {
|
||||
if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) {
|
||||
tlsOptions.CertFile = ""
|
||||
}
|
||||
}
|
||||
if !flags.Changed("tlskey") {
|
||||
if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) {
|
||||
tlsOptions.KeyFile = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogLevel sets the logrus logging level
|
||||
func SetLogLevel(logLevel string) {
|
||||
if logLevel != "" {
|
||||
lvl, err := logrus.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel)
|
||||
os.Exit(1)
|
||||
}
|
||||
logrus.SetLevel(lvl)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
}
|
180
vendor/github.com/docker/cli/cli/manifest/store/store.go
generated
vendored
Normal file
180
vendor/github.com/docker/cli/cli/manifest/store/store.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/manifest/types"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/reference"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Store manages local storage of image distribution manifests
|
||||
type Store interface {
|
||||
Remove(listRef reference.Reference) error
|
||||
Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error)
|
||||
GetList(listRef reference.Reference) ([]types.ImageManifest, error)
|
||||
Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error
|
||||
}
|
||||
|
||||
// fsStore manages manifest files stored on the local filesystem
|
||||
type fsStore struct {
|
||||
root string
|
||||
}
|
||||
|
||||
// NewStore returns a new store for a local file path
|
||||
func NewStore(root string) Store {
|
||||
return &fsStore{root: root}
|
||||
}
|
||||
|
||||
// Remove a manifest list from local storage
|
||||
func (s *fsStore) Remove(listRef reference.Reference) error {
|
||||
path := filepath.Join(s.root, makeFilesafeName(listRef.String()))
|
||||
return os.RemoveAll(path)
|
||||
}
|
||||
|
||||
// Get returns the local manifest
|
||||
func (s *fsStore) Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error) {
|
||||
filename := manifestToFilename(s.root, listRef.String(), manifest.String())
|
||||
return s.getFromFilename(manifest, filename)
|
||||
}
|
||||
|
||||
func (s *fsStore) getFromFilename(ref reference.Reference, filename string) (types.ImageManifest, error) {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return types.ImageManifest{}, newNotFoundError(ref.String())
|
||||
case err != nil:
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
var manifestInfo struct {
|
||||
types.ImageManifest
|
||||
|
||||
// Deprecated Fields, replaced by Descriptor
|
||||
Digest digest.Digest
|
||||
Platform *manifestlist.PlatformSpec
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(bytes, &manifestInfo); err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
|
||||
// Compatibility with image manifests created before
|
||||
// descriptor, newer versions omit Digest and Platform
|
||||
if manifestInfo.Digest != "" {
|
||||
mediaType, raw, err := manifestInfo.Payload()
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
if dgst := digest.FromBytes(raw); dgst != manifestInfo.Digest {
|
||||
return types.ImageManifest{}, errors.Errorf("invalid manifest file %v: image manifest digest mismatch (%v != %v)", filename, manifestInfo.Digest, dgst)
|
||||
}
|
||||
manifestInfo.ImageManifest.Descriptor = ocispec.Descriptor{
|
||||
Digest: manifestInfo.Digest,
|
||||
Size: int64(len(raw)),
|
||||
MediaType: mediaType,
|
||||
Platform: types.OCIPlatform(manifestInfo.Platform),
|
||||
}
|
||||
}
|
||||
|
||||
return manifestInfo.ImageManifest, nil
|
||||
}
|
||||
|
||||
// GetList returns all the local manifests for a transaction
|
||||
func (s *fsStore) GetList(listRef reference.Reference) ([]types.ImageManifest, error) {
|
||||
filenames, err := s.listManifests(listRef.String())
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case filenames == nil:
|
||||
return nil, newNotFoundError(listRef.String())
|
||||
}
|
||||
|
||||
manifests := []types.ImageManifest{}
|
||||
for _, filename := range filenames {
|
||||
filename = filepath.Join(s.root, makeFilesafeName(listRef.String()), filename)
|
||||
manifest, err := s.getFromFilename(listRef, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, manifest)
|
||||
}
|
||||
return manifests, nil
|
||||
}
|
||||
|
||||
// listManifests stored in a transaction
|
||||
func (s *fsStore) listManifests(transaction string) ([]string, error) {
|
||||
transactionDir := filepath.Join(s.root, makeFilesafeName(transaction))
|
||||
fileInfos, err := ioutil.ReadDir(transactionDir)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filenames := []string{}
|
||||
for _, info := range fileInfos {
|
||||
filenames = append(filenames, info.Name())
|
||||
}
|
||||
return filenames, nil
|
||||
}
|
||||
|
||||
// Save a manifest as part of a local manifest list
|
||||
func (s *fsStore) Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error {
|
||||
if err := s.createManifestListDirectory(listRef.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
filename := manifestToFilename(s.root, listRef.String(), manifest.String())
|
||||
bytes, err := json.Marshal(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(filename, bytes, 0644)
|
||||
}
|
||||
|
||||
func (s *fsStore) createManifestListDirectory(transaction string) error {
|
||||
path := filepath.Join(s.root, makeFilesafeName(transaction))
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
|
||||
func manifestToFilename(root, manifestList, manifest string) string {
|
||||
return filepath.Join(root, makeFilesafeName(manifestList), makeFilesafeName(manifest))
|
||||
}
|
||||
|
||||
func makeFilesafeName(ref string) string {
|
||||
fileName := strings.Replace(ref, ":", "-", -1)
|
||||
return strings.Replace(fileName, "/", "_", -1)
|
||||
}
|
||||
|
||||
type notFoundError struct {
|
||||
object string
|
||||
}
|
||||
|
||||
func newNotFoundError(ref string) *notFoundError {
|
||||
return ¬FoundError{object: ref}
|
||||
}
|
||||
|
||||
func (n *notFoundError) Error() string {
|
||||
return fmt.Sprintf("No such manifest: %s", n.object)
|
||||
}
|
||||
|
||||
// NotFound interface
|
||||
func (n *notFoundError) NotFound() {}
|
||||
|
||||
// IsNotFound returns true if the error is a not found error
|
||||
func IsNotFound(err error) bool {
|
||||
_, ok := err.(notFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
type notFound interface {
|
||||
NotFound()
|
||||
}
|
114
vendor/github.com/docker/cli/cli/manifest/types/types.go
generated
vendored
Normal file
114
vendor/github.com/docker/cli/cli/manifest/types/types.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageManifest contains info to output for a manifest object.
|
||||
type ImageManifest struct {
|
||||
Ref *SerializableNamed
|
||||
Descriptor ocispec.Descriptor
|
||||
|
||||
// SchemaV2Manifest is used for inspection
|
||||
// TODO: Deprecate this and store manifest blobs
|
||||
SchemaV2Manifest *schema2.DeserializedManifest `json:",omitempty"`
|
||||
}
|
||||
|
||||
// OCIPlatform creates an OCI platform from a manifest list platform spec
|
||||
func OCIPlatform(ps *manifestlist.PlatformSpec) *ocispec.Platform {
|
||||
if ps == nil {
|
||||
return nil
|
||||
}
|
||||
return &ocispec.Platform{
|
||||
Architecture: ps.Architecture,
|
||||
OS: ps.OS,
|
||||
OSVersion: ps.OSVersion,
|
||||
OSFeatures: ps.OSFeatures,
|
||||
Variant: ps.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
// PlatformSpecFromOCI creates a platform spec from OCI platform
|
||||
func PlatformSpecFromOCI(p *ocispec.Platform) *manifestlist.PlatformSpec {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &manifestlist.PlatformSpec{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
OSVersion: p.OSVersion,
|
||||
OSFeatures: p.OSFeatures,
|
||||
Variant: p.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
// Blobs returns the digests for all the blobs referenced by this manifest
|
||||
func (i ImageManifest) Blobs() []digest.Digest {
|
||||
digests := []digest.Digest{}
|
||||
for _, descriptor := range i.SchemaV2Manifest.References() {
|
||||
digests = append(digests, descriptor.Digest)
|
||||
}
|
||||
return digests
|
||||
}
|
||||
|
||||
// Payload returns the media type and bytes for the manifest
|
||||
func (i ImageManifest) Payload() (string, []byte, error) {
|
||||
// TODO: If available, read content from a content store by digest
|
||||
switch {
|
||||
case i.SchemaV2Manifest != nil:
|
||||
return i.SchemaV2Manifest.Payload()
|
||||
default:
|
||||
return "", nil, errors.Errorf("%s has no payload", i.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
// References implements the distribution.Manifest interface. It delegates to
|
||||
// the underlying manifest.
|
||||
func (i ImageManifest) References() []distribution.Descriptor {
|
||||
switch {
|
||||
case i.SchemaV2Manifest != nil:
|
||||
return i.SchemaV2Manifest.References()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewImageManifest returns a new ImageManifest object. The values for Platform
|
||||
// are initialized from those in the image
|
||||
func NewImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *schema2.DeserializedManifest) ImageManifest {
|
||||
return ImageManifest{
|
||||
Ref: &SerializableNamed{Named: ref},
|
||||
Descriptor: desc,
|
||||
SchemaV2Manifest: manifest,
|
||||
}
|
||||
}
|
||||
|
||||
// SerializableNamed is a reference.Named that can be serialized and deserialized
|
||||
// from JSON
|
||||
type SerializableNamed struct {
|
||||
reference.Named
|
||||
}
|
||||
|
||||
// UnmarshalJSON loads the Named reference from JSON bytes
|
||||
func (s *SerializableNamed) UnmarshalJSON(b []byte) error {
|
||||
var raw string
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return errors.Wrapf(err, "invalid named reference bytes: %s", b)
|
||||
}
|
||||
var err error
|
||||
s.Named, err = reference.ParseNamed(raw)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON bytes representation
|
||||
func (s *SerializableNamed) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(s.String())
|
||||
}
|
211
vendor/github.com/docker/cli/cli/registry/client/client.go
generated
vendored
Normal file
211
vendor/github.com/docker/cli/cli/registry/client/client.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
manifesttypes "github.com/docker/cli/cli/manifest/types"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
distributionclient "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RegistryClient is a client used to communicate with a Docker distribution
|
||||
// registry
|
||||
type RegistryClient interface {
|
||||
GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error)
|
||||
GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
|
||||
MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error
|
||||
PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error)
|
||||
GetTags(ctx context.Context, ref reference.Named) ([]string, error)
|
||||
}
|
||||
|
||||
// NewRegistryClient returns a new RegistryClient with a resolver
|
||||
func NewRegistryClient(resolver AuthConfigResolver, userAgent string, insecure bool) RegistryClient {
|
||||
return &client{
|
||||
authConfigResolver: resolver,
|
||||
insecureRegistry: insecure,
|
||||
userAgent: userAgent,
|
||||
}
|
||||
}
|
||||
|
||||
// AuthConfigResolver returns Auth Configuration for an index
|
||||
type AuthConfigResolver func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig
|
||||
|
||||
// PutManifestOptions is the data sent to push a manifest
|
||||
type PutManifestOptions struct {
|
||||
MediaType string
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
type client struct {
|
||||
authConfigResolver AuthConfigResolver
|
||||
insecureRegistry bool
|
||||
userAgent string
|
||||
}
|
||||
|
||||
// ErrBlobCreated returned when a blob mount request was created
|
||||
type ErrBlobCreated struct {
|
||||
From reference.Named
|
||||
Target reference.Named
|
||||
}
|
||||
|
||||
func (err ErrBlobCreated) Error() string {
|
||||
return fmt.Sprintf("blob mounted from: %v to: %v",
|
||||
err.From, err.Target)
|
||||
}
|
||||
|
||||
// ErrHTTPProto returned if attempting to use TLS with a non-TLS registry
|
||||
type ErrHTTPProto struct {
|
||||
OrigErr string
|
||||
}
|
||||
|
||||
func (err ErrHTTPProto) Error() string {
|
||||
return err.OrigErr
|
||||
}
|
||||
|
||||
var _ RegistryClient = &client{}
|
||||
|
||||
// MountBlob into the registry, so it can be referenced by a manifest
|
||||
func (c *client) MountBlob(ctx context.Context, sourceRef reference.Canonical, targetRef reference.Named) error {
|
||||
repoEndpoint, err := newDefaultRepositoryEndpoint(targetRef, c.insecureRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
repo, err := c.getRepositoryForReference(ctx, targetRef, repoEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lu, err := repo.Blobs(ctx).Create(ctx, distributionclient.WithMountFrom(sourceRef))
|
||||
switch err.(type) {
|
||||
case distribution.ErrBlobMounted:
|
||||
logrus.Debugf("mount of blob %s succeeded", sourceRef)
|
||||
return nil
|
||||
case nil:
|
||||
default:
|
||||
return errors.Wrapf(err, "failed to mount blob %s to %s", sourceRef, targetRef)
|
||||
}
|
||||
lu.Cancel(ctx)
|
||||
logrus.Debugf("mount of blob %s created", sourceRef)
|
||||
return ErrBlobCreated{From: sourceRef, Target: targetRef}
|
||||
}
|
||||
|
||||
// PutManifest sends the manifest to a registry and returns the new digest
|
||||
func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
|
||||
repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry)
|
||||
if err != nil {
|
||||
return digest.Digest(""), err
|
||||
}
|
||||
|
||||
repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint)
|
||||
if err != nil {
|
||||
return digest.Digest(""), err
|
||||
}
|
||||
|
||||
manifestService, err := repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return digest.Digest(""), err
|
||||
}
|
||||
|
||||
_, opts, err := getManifestOptionsFromReference(ref)
|
||||
if err != nil {
|
||||
return digest.Digest(""), err
|
||||
}
|
||||
|
||||
dgst, err := manifestService.Put(ctx, manifest, opts...)
|
||||
return dgst, errors.Wrapf(err, "failed to put manifest %s", ref)
|
||||
}
|
||||
|
||||
func (c *client) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
|
||||
repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return repo.Tags(ctx).All(ctx)
|
||||
}
|
||||
|
||||
func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) {
|
||||
httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "server gave HTTP response to HTTPS client") {
|
||||
return nil, ErrHTTPProto{OrigErr: err.Error()}
|
||||
}
|
||||
}
|
||||
repoName, err := reference.WithName(repoEndpoint.Name())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse repo name from %s", ref)
|
||||
}
|
||||
return distributionclient.NewRepository(repoName, repoEndpoint.BaseURL(), httpTransport)
|
||||
}
|
||||
|
||||
func (c *client) getHTTPTransportForRepoEndpoint(ctx context.Context, repoEndpoint repositoryEndpoint) (http.RoundTripper, error) {
|
||||
httpTransport, err := getHTTPTransport(
|
||||
c.authConfigResolver(ctx, repoEndpoint.info.Index),
|
||||
repoEndpoint.endpoint,
|
||||
repoEndpoint.Name(),
|
||||
c.userAgent)
|
||||
return httpTransport, errors.Wrap(err, "failed to configure transport")
|
||||
}
|
||||
|
||||
// GetManifest returns an ImageManifest for the reference
|
||||
func (c *client) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
||||
var result manifesttypes.ImageManifest
|
||||
fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) {
|
||||
var err error
|
||||
result, err = fetchManifest(ctx, repo, ref)
|
||||
return result.Ref != nil, err
|
||||
}
|
||||
|
||||
err := c.iterateEndpoints(ctx, ref, fetch)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// GetManifestList returns a list of ImageManifest for the reference
|
||||
func (c *client) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
|
||||
result := []manifesttypes.ImageManifest{}
|
||||
fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) {
|
||||
var err error
|
||||
result, err = fetchList(ctx, repo, ref)
|
||||
return len(result) > 0, err
|
||||
}
|
||||
|
||||
err := c.iterateEndpoints(ctx, ref, fetch)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []distribution.ManifestServiceOption, error) {
|
||||
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
||||
tag := tagged.Tag()
|
||||
return "", []distribution.ManifestServiceOption{distribution.WithTag(tag)}, nil
|
||||
}
|
||||
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
return digested.Digest(), []distribution.ManifestServiceOption{}, nil
|
||||
}
|
||||
return "", nil, errors.Errorf("%s no tag or digest", ref)
|
||||
}
|
||||
|
||||
// GetRegistryAuth returns the auth config given an input image
|
||||
func GetRegistryAuth(ctx context.Context, resolver AuthConfigResolver, imageName string) (*types.AuthConfig, error) {
|
||||
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse image name: %s: %s", imageName, err)
|
||||
}
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, resolver, distributionRef.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get imgRefAndAuth: %s", err)
|
||||
}
|
||||
return imgRefAndAuth.AuthConfig(), nil
|
||||
}
|
133
vendor/github.com/docker/cli/cli/registry/client/endpoint.go
generated
vendored
Normal file
133
vendor/github.com/docker/cli/cli/registry/client/endpoint.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
authtypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type repositoryEndpoint struct {
|
||||
info *registry.RepositoryInfo
|
||||
endpoint registry.APIEndpoint
|
||||
}
|
||||
|
||||
// Name returns the repository name
|
||||
func (r repositoryEndpoint) Name() string {
|
||||
repoName := r.info.Name.Name()
|
||||
// If endpoint does not support CanonicalName, use the RemoteName instead
|
||||
if r.endpoint.TrimHostname {
|
||||
repoName = reference.Path(r.info.Name)
|
||||
}
|
||||
return repoName
|
||||
}
|
||||
|
||||
// BaseURL returns the endpoint url
|
||||
func (r repositoryEndpoint) BaseURL() string {
|
||||
return r.endpoint.URL.String()
|
||||
}
|
||||
|
||||
func newDefaultRepositoryEndpoint(ref reference.Named, insecure bool) (repositoryEndpoint, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return repositoryEndpoint{}, err
|
||||
}
|
||||
endpoint, err := getDefaultEndpointFromRepoInfo(repoInfo)
|
||||
if err != nil {
|
||||
return repositoryEndpoint{}, err
|
||||
}
|
||||
if insecure {
|
||||
endpoint.TLSConfig.InsecureSkipVerify = true
|
||||
}
|
||||
return repositoryEndpoint{info: repoInfo, endpoint: endpoint}, nil
|
||||
}
|
||||
|
||||
func getDefaultEndpointFromRepoInfo(repoInfo *registry.RepositoryInfo) (registry.APIEndpoint, error) {
|
||||
var err error
|
||||
|
||||
options := registry.ServiceOptions{}
|
||||
registryService, err := registry.NewService(options)
|
||||
if err != nil {
|
||||
return registry.APIEndpoint{}, err
|
||||
}
|
||||
endpoints, err := registryService.LookupPushEndpoints(reference.Domain(repoInfo.Name))
|
||||
if err != nil {
|
||||
return registry.APIEndpoint{}, err
|
||||
}
|
||||
// Default to the highest priority endpoint to return
|
||||
endpoint := endpoints[0]
|
||||
if !repoInfo.Index.Secure {
|
||||
for _, ep := range endpoints {
|
||||
if ep.URL.Scheme == "http" {
|
||||
endpoint = ep
|
||||
}
|
||||
}
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// getHTTPTransport builds a transport for use in communicating with a registry
|
||||
func getHTTPTransport(authConfig authtypes.AuthConfig, endpoint registry.APIEndpoint, repoName string, userAgent string) (http.RoundTripper, error) {
|
||||
// get the http transport, this will be used in a client to upload manifest
|
||||
base := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: endpoint.TLSConfig,
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
modifiers := registry.Headers(userAgent, http.Header{})
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
challengeManager, confirmedV2, err := registry.PingV2Registry(endpoint.URL, authTransport)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error pinging v2 registry")
|
||||
}
|
||||
if !confirmedV2 {
|
||||
return nil, fmt.Errorf("unsupported registry version")
|
||||
}
|
||||
if authConfig.RegistryToken != "" {
|
||||
passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
|
||||
} else {
|
||||
creds := registry.NewStaticCredentialStore(&authConfig)
|
||||
tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull")
|
||||
basicHandler := auth.NewBasicHandler(creds)
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
|
||||
}
|
||||
return transport.NewTransport(base, modifiers...), nil
|
||||
}
|
||||
|
||||
// RepoNameForReference returns the repository name from a reference
|
||||
func RepoNameForReference(ref reference.Named) (string, error) {
|
||||
// insecure is fine since this only returns the name
|
||||
repo, err := newDefaultRepositoryEndpoint(ref, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return repo.Name(), nil
|
||||
}
|
||||
|
||||
type existingTokenHandler struct {
|
||||
token string
|
||||
}
|
||||
|
||||
func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (th *existingTokenHandler) Scheme() string {
|
||||
return "bearer"
|
||||
}
|
308
vendor/github.com/docker/cli/cli/registry/client/fetcher.go
generated
vendored
Normal file
308
vendor/github.com/docker/cli/cli/registry/client/fetcher.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli/manifest/types"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
distclient "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/docker/registry"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// fetchManifest pulls a manifest from a registry and returns it. An error
|
||||
// is returned if no manifest is found matching namedRef.
|
||||
func fetchManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (types.ImageManifest, error) {
|
||||
manifest, err := getManifest(ctx, repo, ref)
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
|
||||
switch v := manifest.(type) {
|
||||
// Removed Schema 1 support
|
||||
case *schema2.DeserializedManifest:
|
||||
imageManifest, err := pullManifestSchemaV2(ctx, ref, repo, *v)
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
return imageManifest, nil
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
return types.ImageManifest{}, errors.Errorf("%s is a manifest list", ref)
|
||||
}
|
||||
return types.ImageManifest{}, errors.Errorf("%s is not a manifest", ref)
|
||||
}
|
||||
|
||||
func fetchList(ctx context.Context, repo distribution.Repository, ref reference.Named) ([]types.ImageManifest, error) {
|
||||
manifest, err := getManifest(ctx, repo, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
imageManifests, err := pullManifestList(ctx, ref, repo, *v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return imageManifests, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported manifest format: %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func getManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (distribution.Manifest, error) {
|
||||
manSvc, err := repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dgst, opts, err := getManifestOptionsFromReference(ref)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("image manifest for %q does not exist", ref)
|
||||
}
|
||||
return manSvc.Get(ctx, dgst, opts...)
|
||||
}
|
||||
|
||||
func pullManifestSchemaV2(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst schema2.DeserializedManifest) (types.ImageManifest, error) {
|
||||
manifestDesc, err := validateManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo)
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
|
||||
if manifestDesc.Platform == nil {
|
||||
manifestDesc.Platform = &ocispec.Platform{}
|
||||
}
|
||||
|
||||
// Fill in os and architecture fields from config JSON
|
||||
if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
|
||||
return types.NewImageManifest(ref, manifestDesc, &mfst), nil
|
||||
}
|
||||
|
||||
func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, repo distribution.Repository) ([]byte, error) {
|
||||
blobs := repo.Blobs(ctx)
|
||||
configJSON, err := blobs.Get(ctx, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
verifier := dgst.Verifier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := verifier.Write(configJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
return nil, errors.Errorf("image config verification failed for digest %s", dgst)
|
||||
}
|
||||
return configJSON, nil
|
||||
}
|
||||
|
||||
// validateManifestDigest computes the manifest digest, and, if pulling by
|
||||
// digest, ensures that it matches the requested digest.
|
||||
func validateManifestDigest(ref reference.Named, mfst distribution.Manifest) (ocispec.Descriptor, error) {
|
||||
mediaType, canonical, err := mfst.Payload()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: digest.FromBytes(canonical),
|
||||
Size: int64(len(canonical)),
|
||||
MediaType: mediaType,
|
||||
}
|
||||
|
||||
// If pull by digest, then verify the manifest digest.
|
||||
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
if digested.Digest() != desc.Digest {
|
||||
err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// pullManifestList handles "manifest lists" which point to various
|
||||
// platform-specific manifests.
|
||||
func pullManifestList(ctx context.Context, ref reference.Named, repo distribution.Repository, mfstList manifestlist.DeserializedManifestList) ([]types.ImageManifest, error) {
|
||||
infos := []types.ImageManifest{}
|
||||
|
||||
if _, err := validateManifestDigest(ref, mfstList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, manifestDescriptor := range mfstList.Manifests {
|
||||
manSvc, err := repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifest, err := manSvc.Get(ctx, manifestDescriptor.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, ok := manifest.(*schema2.DeserializedManifest)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported manifest format: %v", v)
|
||||
}
|
||||
|
||||
manifestRef, err := reference.WithDigest(ref, manifestDescriptor.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imageManifest, err := pullManifestSchemaV2(ctx, manifestRef, repo, *v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Replace platform from config
|
||||
imageManifest.Descriptor.Platform = types.OCIPlatform(&manifestDescriptor.Platform)
|
||||
|
||||
infos = append(infos, imageManifest)
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func continueOnError(err error) bool {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) == 0 {
|
||||
return true
|
||||
}
|
||||
return continueOnError(v[0])
|
||||
case errcode.Error:
|
||||
e := err.(errcode.Error)
|
||||
switch e.Code {
|
||||
case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case *distclient.UnexpectedHTTPResponseError:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error {
|
||||
endpoints, err := allEndpoints(namedRef, c.insecureRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoInfo, err := registry.ParseRepositoryInfo(namedRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
confirmedTLSRegistries := make(map[string]bool)
|
||||
for _, endpoint := range endpoints {
|
||||
|
||||
if endpoint.Version == registry.APIVersion1 {
|
||||
logrus.Debugf("skipping v1 endpoint %s", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme != "https" {
|
||||
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
||||
logrus.Debugf("skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if c.insecureRegistry {
|
||||
endpoint.TLSConfig.InsecureSkipVerify = true
|
||||
}
|
||||
repoEndpoint := repositoryEndpoint{endpoint: endpoint, info: repoInfo}
|
||||
repo, err := c.getRepositoryForReference(ctx, namedRef, repoEndpoint)
|
||||
if err != nil {
|
||||
logrus.Debugf("error %s with repo endpoint %+v", err, repoEndpoint)
|
||||
if _, ok := err.(ErrHTTPProto); ok {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme == "http" && !c.insecureRegistry {
|
||||
logrus.Debugf("skipping non-tls registry endpoint: %s", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
done, err := each(ctx, repo, namedRef)
|
||||
if err != nil {
|
||||
if continueOnError(err) {
|
||||
if endpoint.URL.Scheme == "https" {
|
||||
confirmedTLSRegistries[endpoint.URL.Host] = true
|
||||
}
|
||||
logrus.Debugf("continuing on error (%T) %s", err, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("not continuing on error (%T) %s", err, err)
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return newNotFoundError(namedRef.String())
|
||||
}
|
||||
|
||||
// allEndpoints returns a list of endpoints ordered by priority (v2, https, v1).
|
||||
func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(namedRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var serviceOpts registry.ServiceOptions
|
||||
if insecure {
|
||||
logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef))
|
||||
serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)}
|
||||
}
|
||||
registryService, err := registry.NewService(serviceOpts)
|
||||
if err != nil {
|
||||
return []registry.APIEndpoint{}, err
|
||||
}
|
||||
endpoints, err := registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name))
|
||||
logrus.Debugf("endpoints for %s: %v", namedRef, endpoints)
|
||||
return endpoints, err
|
||||
}
|
||||
|
||||
type notFoundError struct {
|
||||
object string
|
||||
}
|
||||
|
||||
func newNotFoundError(ref string) *notFoundError {
|
||||
return ¬FoundError{object: ref}
|
||||
}
|
||||
|
||||
func (n *notFoundError) Error() string {
|
||||
return fmt.Sprintf("no such manifest: %s", n.object)
|
||||
}
|
||||
|
||||
// NotFound interface
|
||||
func (n *notFoundError) NotFound() {}
|
||||
|
||||
// IsNotFound returns true if the error is a not found error
|
||||
func IsNotFound(err error) bool {
|
||||
_, ok := err.(notFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
type notFound interface {
|
||||
NotFound()
|
||||
}
|
107
vendor/github.com/docker/cli/cli/required.go
generated
vendored
Normal file
107
vendor/github.com/docker/cli/cli/required.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NoArgs validates args and returns an error if there are any args
|
||||
func NoArgs(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cmd.HasSubCommands() {
|
||||
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
}
|
||||
|
||||
return errors.Errorf(
|
||||
"%q accepts no arguments.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
|
||||
// RequiresMinArgs returns an error if there is not at least min args
|
||||
func RequiresMinArgs(min int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) >= min {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires at least %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
min,
|
||||
pluralize("argument", min),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// RequiresMaxArgs returns an error if there is not at most max args
|
||||
func RequiresMaxArgs(max int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) <= max {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
max,
|
||||
pluralize("argument", max),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// RequiresRangeArgs returns an error if there is not at least min args and at most max args
|
||||
func RequiresRangeArgs(min int, max int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) >= min && len(args) <= max {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires at least %d and at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
min,
|
||||
max,
|
||||
pluralize("argument", max),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ExactArgs returns an error if there is not the exact number of args
|
||||
func ExactArgs(number int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == number {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires exactly %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
number,
|
||||
pluralize("argument", number),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func pluralize(word string, number int) string {
|
||||
if number == 1 {
|
||||
return word
|
||||
}
|
||||
return word + "s"
|
||||
}
|
56
vendor/github.com/docker/cli/cli/streams/in.go
generated
vendored
Normal file
56
vendor/github.com/docker/cli/cli/streams/in.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package streams
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
// In is an input stream used by the DockerCli to read user input
|
||||
type In struct {
|
||||
commonStream
|
||||
in io.ReadCloser
|
||||
}
|
||||
|
||||
func (i *In) Read(p []byte) (int, error) {
|
||||
return i.in.Read(p)
|
||||
}
|
||||
|
||||
// Close implements the Closer interface
|
||||
func (i *In) Close() error {
|
||||
return i.in.Close()
|
||||
}
|
||||
|
||||
// SetRawTerminal sets raw mode on the input terminal
|
||||
func (i *In) SetRawTerminal() (err error) {
|
||||
if os.Getenv("NORAW") != "" || !i.commonStream.isTerminal {
|
||||
return nil
|
||||
}
|
||||
i.commonStream.state, err = term.SetRawTerminal(i.commonStream.fd)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckTty checks if we are trying to attach to a container tty
|
||||
// from a non-tty client input stream, and if so, returns an error.
|
||||
func (i *In) CheckTty(attachStdin, ttyMode bool) error {
|
||||
// In order to attach to a container tty, input stream for the client must
|
||||
// be a tty itself: redirecting or piping the client standard input is
|
||||
// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
|
||||
if ttyMode && attachStdin && !i.isTerminal {
|
||||
eText := "the input device is not a TTY"
|
||||
if runtime.GOOS == "windows" {
|
||||
return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'")
|
||||
}
|
||||
return errors.New(eText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewIn returns a new In object from a ReadCloser
|
||||
func NewIn(in io.ReadCloser) *In {
|
||||
fd, isTerminal := term.GetFdInfo(in)
|
||||
return &In{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, in: in}
|
||||
}
|
50
vendor/github.com/docker/cli/cli/streams/out.go
generated
vendored
Normal file
50
vendor/github.com/docker/cli/cli/streams/out.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package streams
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Out is an output stream used by the DockerCli to write normal program
|
||||
// output.
|
||||
type Out struct {
|
||||
commonStream
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (o *Out) Write(p []byte) (int, error) {
|
||||
return o.out.Write(p)
|
||||
}
|
||||
|
||||
// SetRawTerminal sets raw mode on the input terminal
|
||||
func (o *Out) SetRawTerminal() (err error) {
|
||||
if os.Getenv("NORAW") != "" || !o.commonStream.isTerminal {
|
||||
return nil
|
||||
}
|
||||
o.commonStream.state, err = term.SetRawTerminalOutput(o.commonStream.fd)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTtySize returns the height and width in characters of the tty
|
||||
func (o *Out) GetTtySize() (uint, uint) {
|
||||
if !o.isTerminal {
|
||||
return 0, 0
|
||||
}
|
||||
ws, err := term.GetWinsize(o.fd)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting size: %s", err)
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return uint(ws.Height), uint(ws.Width)
|
||||
}
|
||||
|
||||
// NewOut returns a new Out object from a Writer
|
||||
func NewOut(out io.Writer) *Out {
|
||||
fd, isTerminal := term.GetFdInfo(out)
|
||||
return &Out{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, out: out}
|
||||
}
|
34
vendor/github.com/docker/cli/cli/streams/stream.go
generated
vendored
Normal file
34
vendor/github.com/docker/cli/cli/streams/stream.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package streams
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
// commonStream is an input stream used by the DockerCli to read user input
|
||||
type commonStream struct {
|
||||
fd uintptr
|
||||
isTerminal bool
|
||||
state *term.State
|
||||
}
|
||||
|
||||
// FD returns the file descriptor number for this stream
|
||||
func (s *commonStream) FD() uintptr {
|
||||
return s.fd
|
||||
}
|
||||
|
||||
// IsTerminal returns true if this stream is connected to a terminal
|
||||
func (s *commonStream) IsTerminal() bool {
|
||||
return s.isTerminal
|
||||
}
|
||||
|
||||
// RestoreTerminal restores normal mode to the terminal
|
||||
func (s *commonStream) RestoreTerminal() {
|
||||
if s.state != nil {
|
||||
term.RestoreTerminal(s.fd, s.state)
|
||||
}
|
||||
}
|
||||
|
||||
// SetIsTerminal sets the boolean used for isTerminal
|
||||
func (s *commonStream) SetIsTerminal(isTerminal bool) {
|
||||
s.isTerminal = isTerminal
|
||||
}
|
388
vendor/github.com/docker/cli/cli/trust/trust.go
generated
vendored
Normal file
388
vendor/github.com/docker/cli/cli/trust/trust.go
generated
vendored
Normal file
@ -0,0 +1,388 @@
|
||||
package trust
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/passphrase"
|
||||
"github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
)
|
||||
|
||||
var (
|
||||
// ReleasesRole is the role named "releases"
|
||||
ReleasesRole = data.RoleName(path.Join(data.CanonicalTargetsRole.String(), "releases"))
|
||||
// ActionsPullOnly defines the actions for read-only interactions with a Notary Repository
|
||||
ActionsPullOnly = []string{"pull"}
|
||||
// ActionsPushAndPull defines the actions for read-write interactions with a Notary Repository
|
||||
ActionsPushAndPull = []string{"pull", "push"}
|
||||
// NotaryServer is the endpoint serving the Notary trust server
|
||||
NotaryServer = "https://notary.docker.io"
|
||||
)
|
||||
|
||||
// GetTrustDirectory returns the base trust directory name
|
||||
func GetTrustDirectory() string {
|
||||
return filepath.Join(cliconfig.Dir(), "trust")
|
||||
}
|
||||
|
||||
// certificateDirectory returns the directory containing
|
||||
// TLS certificates for the given server. An error is
|
||||
// returned if there was an error parsing the server string.
|
||||
func certificateDirectory(server string) (string, error) {
|
||||
u, err := url.Parse(server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(cliconfig.Dir(), "tls", u.Host), nil
|
||||
}
|
||||
|
||||
// Server returns the base URL for the trust server.
|
||||
func Server(index *registrytypes.IndexInfo) (string, error) {
|
||||
if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" {
|
||||
urlObj, err := url.Parse(s)
|
||||
if err != nil || urlObj.Scheme != "https" {
|
||||
return "", errors.Errorf("valid https URL required for trust server, got %s", s)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
if index.Official {
|
||||
return NotaryServer, nil
|
||||
}
|
||||
return "https://" + index.Name, nil
|
||||
}
|
||||
|
||||
type simpleCredentialStore struct {
|
||||
auth types.AuthConfig
|
||||
}
|
||||
|
||||
func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) {
|
||||
return scs.auth.Username, scs.auth.Password
|
||||
}
|
||||
|
||||
func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string {
|
||||
return scs.auth.IdentityToken
|
||||
}
|
||||
|
||||
func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) {
|
||||
}
|
||||
|
||||
// GetNotaryRepository returns a NotaryRepository which stores all the
|
||||
// information needed to operate on a notary repository.
|
||||
// It creates an HTTP transport providing authentication support.
|
||||
func GetNotaryRepository(in io.Reader, out io.Writer, userAgent string, repoInfo *registry.RepositoryInfo, authConfig *types.AuthConfig, actions ...string) (client.Repository, error) {
|
||||
server, err := Server(repoInfo.Index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg = tlsconfig.ClientDefault()
|
||||
cfg.InsecureSkipVerify = !repoInfo.Index.Secure
|
||||
|
||||
// Get certificate base directory
|
||||
certDir, err := certificateDirectory(server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("reading certificate directory: %s", certDir)
|
||||
|
||||
if err := registry.ReadCertsDirectory(cfg, certDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: cfg,
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
// Skip configuration headers since request is not going to Docker daemon
|
||||
modifiers := registry.Headers(userAgent, http.Header{})
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
pingClient := &http.Client{
|
||||
Transport: authTransport,
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
endpointStr := server + "/v2/"
|
||||
req, err := http.NewRequest("GET", endpointStr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
challengeManager := challenge.NewSimpleManager()
|
||||
|
||||
resp, err := pingClient.Do(req)
|
||||
if err != nil {
|
||||
// Ignore error on ping to operate in offline mode
|
||||
logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Add response to the challenge manager to parse out
|
||||
// authentication header and register authentication method
|
||||
if err := challengeManager.AddResponse(resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
scope := auth.RepositoryScope{
|
||||
Repository: repoInfo.Name.Name(),
|
||||
Actions: actions,
|
||||
Class: repoInfo.Class,
|
||||
}
|
||||
creds := simpleCredentialStore{auth: *authConfig}
|
||||
tokenHandlerOptions := auth.TokenHandlerOptions{
|
||||
Transport: authTransport,
|
||||
Credentials: creds,
|
||||
Scopes: []auth.Scope{scope},
|
||||
ClientID: registry.AuthClientID,
|
||||
}
|
||||
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
|
||||
basicHandler := auth.NewBasicHandler(creds)
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
|
||||
tr := transport.NewTransport(base, modifiers...)
|
||||
|
||||
return client.NewFileCachedRepository(
|
||||
GetTrustDirectory(),
|
||||
data.GUN(repoInfo.Name.Name()),
|
||||
server,
|
||||
tr,
|
||||
GetPassphraseRetriever(in, out),
|
||||
trustpinning.TrustPinConfig{})
|
||||
}
|
||||
|
||||
// GetPassphraseRetriever returns a passphrase retriever that utilizes Content Trust env vars
|
||||
func GetPassphraseRetriever(in io.Reader, out io.Writer) notary.PassRetriever {
|
||||
aliasMap := map[string]string{
|
||||
"root": "root",
|
||||
"snapshot": "repository",
|
||||
"targets": "repository",
|
||||
"default": "repository",
|
||||
}
|
||||
baseRetriever := passphrase.PromptRetrieverWithInOut(in, out, aliasMap)
|
||||
env := map[string]string{
|
||||
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
|
||||
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
|
||||
"targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
|
||||
"default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
|
||||
}
|
||||
|
||||
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if v := env[alias]; v != "" {
|
||||
return v, numAttempts > 1, nil
|
||||
}
|
||||
// For non-root roles, we can also try the "default" alias if it is specified
|
||||
if v := env["default"]; v != "" && alias != data.CanonicalRootRole.String() {
|
||||
return v, numAttempts > 1, nil
|
||||
}
|
||||
return baseRetriever(keyName, alias, createNew, numAttempts)
|
||||
}
|
||||
}
|
||||
|
||||
// NotaryError formats an error message received from the notary service
|
||||
func NotaryError(repoName string, err error) error {
|
||||
switch err.(type) {
|
||||
case *json.SyntaxError:
|
||||
logrus.Debugf("Notary syntax error: %s", err)
|
||||
return errors.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName)
|
||||
case signed.ErrExpired:
|
||||
return errors.Errorf("Error: remote repository %s out-of-date: %v", repoName, err)
|
||||
case trustmanager.ErrKeyNotFound:
|
||||
return errors.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err)
|
||||
case storage.NetworkError:
|
||||
return errors.Errorf("Error: error contacting notary server: %v", err)
|
||||
case storage.ErrMetaNotFound:
|
||||
return errors.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err)
|
||||
case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType:
|
||||
return errors.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err)
|
||||
case signed.ErrNoKeys:
|
||||
return errors.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err)
|
||||
case signed.ErrLowVersion:
|
||||
return errors.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err)
|
||||
case signed.ErrRoleThreshold:
|
||||
return errors.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err)
|
||||
case client.ErrRepositoryNotExist:
|
||||
return errors.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err)
|
||||
case signed.ErrInsufficientSignatures:
|
||||
return errors.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetSignableRoles returns a list of roles for which we have valid signing
|
||||
// keys, given a notary repository and a target
|
||||
func GetSignableRoles(repo client.Repository, target *client.Target) ([]data.RoleName, error) {
|
||||
var signableRoles []data.RoleName
|
||||
|
||||
// translate the full key names, which includes the GUN, into just the key IDs
|
||||
allCanonicalKeyIDs := make(map[string]struct{})
|
||||
for fullKeyID := range repo.GetCryptoService().ListAllKeys() {
|
||||
allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{}
|
||||
}
|
||||
|
||||
allDelegationRoles, err := repo.GetDelegationRoles()
|
||||
if err != nil {
|
||||
return signableRoles, err
|
||||
}
|
||||
|
||||
// if there are no delegation roles, then just try to sign it into the targets role
|
||||
if len(allDelegationRoles) == 0 {
|
||||
signableRoles = append(signableRoles, data.CanonicalTargetsRole)
|
||||
return signableRoles, nil
|
||||
}
|
||||
|
||||
// there are delegation roles, find every delegation role we have a key for, and
|
||||
// attempt to sign into into all those roles.
|
||||
for _, delegationRole := range allDelegationRoles {
|
||||
// We do not support signing any delegation role that isn't a direct child of the targets role.
|
||||
// Also don't bother checking the keys if we can't add the target
|
||||
// to this role due to path restrictions
|
||||
if path.Dir(delegationRole.Name.String()) != data.CanonicalTargetsRole.String() || !delegationRole.CheckPaths(target.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, canonicalKeyID := range delegationRole.KeyIDs {
|
||||
if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok {
|
||||
signableRoles = append(signableRoles, delegationRole.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(signableRoles) == 0 {
|
||||
return signableRoles, errors.Errorf("no valid signing keys for delegation roles")
|
||||
}
|
||||
|
||||
return signableRoles, nil
|
||||
|
||||
}
|
||||
|
||||
// ImageRefAndAuth contains all reference information and the auth config for an image request
|
||||
type ImageRefAndAuth struct {
|
||||
original string
|
||||
authConfig *types.AuthConfig
|
||||
reference reference.Named
|
||||
repoInfo *registry.RepositoryInfo
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
// GetImageReferencesAndAuth retrieves the necessary reference and auth information for an image name
|
||||
// as an ImageRefAndAuth struct
|
||||
func GetImageReferencesAndAuth(ctx context.Context, rs registry.Service,
|
||||
authResolver func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig,
|
||||
imgName string,
|
||||
) (ImageRefAndAuth, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(imgName)
|
||||
if err != nil {
|
||||
return ImageRefAndAuth{}, err
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
var repoInfo *registry.RepositoryInfo
|
||||
if rs != nil {
|
||||
repoInfo, err = rs.ResolveRepository(ref)
|
||||
} else {
|
||||
repoInfo, err = registry.ParseRepositoryInfo(ref)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return ImageRefAndAuth{}, err
|
||||
}
|
||||
|
||||
authConfig := authResolver(ctx, repoInfo.Index)
|
||||
return ImageRefAndAuth{
|
||||
original: imgName,
|
||||
authConfig: &authConfig,
|
||||
reference: ref,
|
||||
repoInfo: repoInfo,
|
||||
tag: getTag(ref),
|
||||
digest: getDigest(ref),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getTag(ref reference.Named) string {
|
||||
switch x := ref.(type) {
|
||||
case reference.Canonical, reference.Digested:
|
||||
return ""
|
||||
case reference.NamedTagged:
|
||||
return x.Tag()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func getDigest(ref reference.Named) digest.Digest {
|
||||
switch x := ref.(type) {
|
||||
case reference.Canonical:
|
||||
return x.Digest()
|
||||
case reference.Digested:
|
||||
return x.Digest()
|
||||
default:
|
||||
return digest.Digest("")
|
||||
}
|
||||
}
|
||||
|
||||
// AuthConfig returns the auth information (username, etc) for a given ImageRefAndAuth
|
||||
func (imgRefAuth *ImageRefAndAuth) AuthConfig() *types.AuthConfig {
|
||||
return imgRefAuth.authConfig
|
||||
}
|
||||
|
||||
// Reference returns the Image reference for a given ImageRefAndAuth
|
||||
func (imgRefAuth *ImageRefAndAuth) Reference() reference.Named {
|
||||
return imgRefAuth.reference
|
||||
}
|
||||
|
||||
// RepoInfo returns the repository information for a given ImageRefAndAuth
|
||||
func (imgRefAuth *ImageRefAndAuth) RepoInfo() *registry.RepositoryInfo {
|
||||
return imgRefAuth.repoInfo
|
||||
}
|
||||
|
||||
// Tag returns the Image tag for a given ImageRefAndAuth
|
||||
func (imgRefAuth *ImageRefAndAuth) Tag() string {
|
||||
return imgRefAuth.tag
|
||||
}
|
||||
|
||||
// Digest returns the Image digest for a given ImageRefAndAuth
|
||||
func (imgRefAuth *ImageRefAndAuth) Digest() digest.Digest {
|
||||
return imgRefAuth.digest
|
||||
}
|
||||
|
||||
// Name returns the image name used to initialize the ImageRefAndAuth
|
||||
func (imgRefAuth *ImageRefAndAuth) Name() string {
|
||||
return imgRefAuth.original
|
||||
|
||||
}
|
10
vendor/github.com/docker/cli/cli/version/version.go
generated
vendored
Normal file
10
vendor/github.com/docker/cli/cli/version/version.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package version
|
||||
|
||||
// Default build-time variable.
|
||||
// These values are overridden via ldflags
|
||||
var (
|
||||
PlatformName = ""
|
||||
Version = "unknown-version"
|
||||
GitCommit = "unknown-commit"
|
||||
BuildTime = "unknown-buildtime"
|
||||
)
|
78
vendor/github.com/docker/cli/internal/containerizedengine/containerd.go
generated
vendored
Normal file
78
vendor/github.com/docker/cli/internal/containerizedengine/containerd.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// NewClient returns a new containerizedengine client
|
||||
// This client can be used to manage the lifecycle of
|
||||
// dockerd running as a container on containerd.
|
||||
func NewClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
||||
if sockPath == "" {
|
||||
sockPath = containerdSockPath
|
||||
}
|
||||
cclient, err := containerd.New(sockPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &baseClient{
|
||||
cclient: cclient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close will close the underlying clients
|
||||
func (c *baseClient) Close() error {
|
||||
return c.cclient.Close()
|
||||
}
|
||||
|
||||
func (c *baseClient) pullWithAuth(ctx context.Context, imageName string, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) (containerd.Image, error) {
|
||||
|
||||
resolver := docker.NewResolver(docker.ResolverOptions{
|
||||
Credentials: func(string) (string, string, error) {
|
||||
return authConfig.Username, authConfig.Password, nil
|
||||
},
|
||||
})
|
||||
|
||||
ongoing := newJobs(imageName)
|
||||
pctx, stopProgress := context.WithCancel(ctx)
|
||||
progress := make(chan struct{})
|
||||
bufin, bufout := io.Pipe()
|
||||
|
||||
go func() {
|
||||
showProgress(pctx, ongoing, c.cclient.ContentStore(), bufout)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
jsonmessage.DisplayJSONMessagesToStream(bufin, out, nil)
|
||||
close(progress)
|
||||
}()
|
||||
|
||||
h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
|
||||
ongoing.add(desc)
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
image, err := c.cclient.Pull(ctx, imageName,
|
||||
containerd.WithResolver(resolver),
|
||||
containerd.WithImageHandler(h),
|
||||
containerd.WithPullUnpack)
|
||||
stopProgress()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
<-progress
|
||||
return image, nil
|
||||
}
|
215
vendor/github.com/docker/cli/internal/containerizedengine/progress.go
generated
vendored
Normal file
215
vendor/github.com/docker/cli/internal/containerizedengine/progress.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.WriteCloser) {
|
||||
var (
|
||||
ticker = time.NewTicker(100 * time.Millisecond)
|
||||
start = time.Now()
|
||||
enc = json.NewEncoder(out)
|
||||
statuses = map[string]statusInfo{}
|
||||
done bool
|
||||
)
|
||||
defer ticker.Stop()
|
||||
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
|
||||
resolved := "resolved"
|
||||
if !ongoing.isResolved() {
|
||||
resolved = "resolving"
|
||||
}
|
||||
statuses[ongoing.name] = statusInfo{
|
||||
Ref: ongoing.name,
|
||||
Status: resolved,
|
||||
}
|
||||
keys := []string{ongoing.name}
|
||||
|
||||
activeSeen := map[string]struct{}{}
|
||||
if !done {
|
||||
active, err := cs.ListStatuses(ctx, "")
|
||||
if err != nil {
|
||||
logrus.Debugf("active check failed: %s", err)
|
||||
continue
|
||||
}
|
||||
// update status of active entries!
|
||||
for _, active := range active {
|
||||
statuses[active.Ref] = statusInfo{
|
||||
Ref: active.Ref,
|
||||
Status: "downloading",
|
||||
Offset: active.Offset,
|
||||
Total: active.Total,
|
||||
StartedAt: active.StartedAt,
|
||||
UpdatedAt: active.UpdatedAt,
|
||||
}
|
||||
activeSeen[active.Ref] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
err := updateNonActive(ctx, ongoing, cs, statuses, &keys, activeSeen, &done, start)
|
||||
if err != nil {
|
||||
continue outer
|
||||
}
|
||||
|
||||
var ordered []statusInfo
|
||||
for _, key := range keys {
|
||||
ordered = append(ordered, statuses[key])
|
||||
}
|
||||
|
||||
for _, si := range ordered {
|
||||
jm := si.JSONMessage()
|
||||
err := enc.Encode(jm)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to encode progress message: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if done {
|
||||
out.Close()
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
done = true // allow ui to update once more
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys *[]string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
|
||||
|
||||
for _, j := range ongoing.jobs() {
|
||||
key := remotes.MakeRefKey(ctx, j)
|
||||
*keys = append(*keys, key)
|
||||
if _, ok := activeSeen[key]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
status, ok := statuses[key]
|
||||
if !*done && (!ok || status.Status == "downloading") {
|
||||
info, err := cs.Info(ctx, j.Digest)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
logrus.Debugf("failed to get content info: %s", err)
|
||||
return err
|
||||
}
|
||||
statuses[key] = statusInfo{
|
||||
Ref: key,
|
||||
Status: "waiting",
|
||||
}
|
||||
} else if info.CreatedAt.After(start) {
|
||||
statuses[key] = statusInfo{
|
||||
Ref: key,
|
||||
Status: "done",
|
||||
Offset: info.Size,
|
||||
Total: info.Size,
|
||||
UpdatedAt: info.CreatedAt,
|
||||
}
|
||||
} else {
|
||||
statuses[key] = statusInfo{
|
||||
Ref: key,
|
||||
Status: "exists",
|
||||
}
|
||||
}
|
||||
} else if *done {
|
||||
if ok {
|
||||
if status.Status != "done" && status.Status != "exists" {
|
||||
status.Status = "done"
|
||||
statuses[key] = status
|
||||
}
|
||||
} else {
|
||||
statuses[key] = statusInfo{
|
||||
Ref: key,
|
||||
Status: "done",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type jobs struct {
|
||||
name string
|
||||
added map[digest.Digest]struct{}
|
||||
descs []ocispec.Descriptor
|
||||
mu sync.Mutex
|
||||
resolved bool
|
||||
}
|
||||
|
||||
func newJobs(name string) *jobs {
|
||||
return &jobs{
|
||||
name: name,
|
||||
added: map[digest.Digest]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func (j *jobs) add(desc ocispec.Descriptor) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.resolved = true
|
||||
|
||||
if _, ok := j.added[desc.Digest]; ok {
|
||||
return
|
||||
}
|
||||
j.descs = append(j.descs, desc)
|
||||
j.added[desc.Digest] = struct{}{}
|
||||
}
|
||||
|
||||
func (j *jobs) jobs() []ocispec.Descriptor {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
return append(descs, j.descs...)
|
||||
}
|
||||
|
||||
func (j *jobs) isResolved() bool {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
return j.resolved
|
||||
}
|
||||
|
||||
// statusInfo holds the status info for an upload or download
|
||||
type statusInfo struct {
|
||||
Ref string
|
||||
Status string
|
||||
Offset int64
|
||||
Total int64
|
||||
StartedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
func (s statusInfo) JSONMessage() jsonmessage.JSONMessage {
|
||||
// Shorten the ID to use up less width on the display
|
||||
id := s.Ref
|
||||
if strings.Contains(id, ":") {
|
||||
split := strings.SplitN(id, ":", 2)
|
||||
id = split[1]
|
||||
}
|
||||
id = fmt.Sprintf("%.12s", id)
|
||||
|
||||
return jsonmessage.JSONMessage{
|
||||
ID: id,
|
||||
Status: s.Status,
|
||||
Progress: &jsonmessage.JSONProgress{
|
||||
Current: s.Offset,
|
||||
Total: s.Total,
|
||||
},
|
||||
}
|
||||
}
|
49
vendor/github.com/docker/cli/internal/containerizedengine/types.go
generated
vendored
Normal file
49
vendor/github.com/docker/cli/internal/containerizedengine/types.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
)
|
||||
|
||||
const (
|
||||
containerdSockPath = "/run/containerd/containerd.sock"
|
||||
engineNamespace = "com.docker"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEngineAlreadyPresent returned when engine already present and should not be
|
||||
ErrEngineAlreadyPresent = errors.New("engine already present, use the update command to change versions")
|
||||
|
||||
// ErrEngineNotPresent returned when the engine is not present and should be
|
||||
ErrEngineNotPresent = errors.New("engine not present")
|
||||
|
||||
// ErrMalformedConfigFileParam returned if the engine config file parameter is malformed
|
||||
ErrMalformedConfigFileParam = errors.New("malformed --config-file param on engine")
|
||||
|
||||
// ErrEngineConfigLookupFailure returned if unable to lookup existing engine configuration
|
||||
ErrEngineConfigLookupFailure = errors.New("unable to lookup existing engine configuration")
|
||||
|
||||
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
|
||||
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
|
||||
)
|
||||
|
||||
type baseClient struct {
|
||||
cclient containerdClient
|
||||
}
|
||||
|
||||
// containerdClient abstracts the containerd client to aid in testability
|
||||
type containerdClient interface {
|
||||
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
|
||||
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
|
||||
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
|
||||
GetImage(ctx context.Context, ref string) (containerd.Image, error)
|
||||
Close() error
|
||||
ContentStore() content.Store
|
||||
ContainerService() containers.Store
|
||||
Install(context.Context, containerd.Image, ...containerd.InstallOpts) error
|
||||
Version(ctx context.Context) (containerd.Version, error)
|
||||
}
|
183
vendor/github.com/docker/cli/internal/containerizedengine/update.go
generated
vendored
Normal file
183
vendor/github.com/docker/cli/internal/containerizedengine/update.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/docker/cli/internal/versions"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ActivateEngine will switch the image from the CE to EE image
|
||||
func (c *baseClient) ActivateEngine(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
|
||||
// If the user didn't specify an image, determine the correct enterprise image to use
|
||||
if opts.EngineImage == "" {
|
||||
localMetadata, err := versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image")
|
||||
}
|
||||
|
||||
engineImage := localMetadata.EngineImage
|
||||
if engineImage == clitypes.EnterpriseEngineImage || engineImage == clitypes.CommunityEngineImage {
|
||||
opts.EngineImage = clitypes.EnterpriseEngineImage
|
||||
} else {
|
||||
// Chop off the standard prefix and retain any trailing OS specific image details
|
||||
// e.g., engine-community-dm -> engine-enterprise-dm
|
||||
engineImage = strings.TrimPrefix(engineImage, clitypes.EnterpriseEngineImage)
|
||||
engineImage = strings.TrimPrefix(engineImage, clitypes.CommunityEngineImage)
|
||||
opts.EngineImage = clitypes.EnterpriseEngineImage + engineImage
|
||||
}
|
||||
}
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
return c.DoUpdate(ctx, opts, out, authConfig)
|
||||
}
|
||||
|
||||
// DoUpdate performs the underlying engine update
|
||||
func (c *baseClient) DoUpdate(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
if opts.EngineVersion == "" {
|
||||
// TODO - Future enhancement: This could be improved to be
|
||||
// smart about figuring out the latest patch rev for the
|
||||
// current engine version and automatically apply it so users
|
||||
// could stay in sync by simply having a scheduled
|
||||
// `docker engine update`
|
||||
return fmt.Errorf("pick the version you want to update to with --version")
|
||||
}
|
||||
var localMetadata *clitypes.RuntimeMetadata
|
||||
if opts.EngineImage == "" {
|
||||
var err error
|
||||
localMetadata, err = versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image set to 'engine-community' or 'engine-enterprise'")
|
||||
}
|
||||
opts.EngineImage = localMetadata.EngineImage
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||
|
||||
// Look for desired image
|
||||
image, err := c.cclient.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
image, err = c.pullWithAuth(ctx, imageName, out, authConfig)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to pull image %s", imageName)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrapf(err, "unable to check for image %s", imageName)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we're safe to proceed
|
||||
newMetadata, err := c.PreflightCheck(ctx, image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if localMetadata != nil {
|
||||
if localMetadata.Platform != newMetadata.Platform {
|
||||
fmt.Fprintf(out, "\nNotice: you have switched to \"%s\". Refer to %s for update instructions.\n\n", newMetadata.Platform, getReleaseNotesURL(imageName))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.cclient.Install(ctx, image, containerd.WithInstallReplace, containerd.WithInstallPath("/usr")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return versions.WriteRuntimeMetadata(opts.RuntimeMetadataDir, newMetadata)
|
||||
}
|
||||
|
||||
// PreflightCheck verifies the specified image is compatible with the local system before proceeding to update/activate
|
||||
// If things look good, the RuntimeMetadata for the new image is returned and can be written out to the host
|
||||
func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image) (*clitypes.RuntimeMetadata, error) {
|
||||
var metadata clitypes.RuntimeMetadata
|
||||
ic, err := image.Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
ociimage v1.Image
|
||||
config v1.ImageConfig
|
||||
)
|
||||
switch ic.MediaType {
|
||||
case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
|
||||
p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(p, &ociimage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config = ociimage.Config
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown image %s config media type %s", image.Name(), ic.MediaType)
|
||||
}
|
||||
|
||||
metadataString, ok := config.Labels["com.docker."+clitypes.RuntimeMetadataName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("image %s does not contain runtime metadata label %s", image.Name(), clitypes.RuntimeMetadataName)
|
||||
}
|
||||
err = json.Unmarshal([]byte(metadataString), &metadata)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "malformed runtime metadata file in %s", image.Name())
|
||||
}
|
||||
|
||||
// Current CLI only supports host install runtime
|
||||
if metadata.Runtime != "host_install" {
|
||||
return nil, fmt.Errorf("unsupported daemon image: %s\nConsult the release notes at %s for upgrade instructions", metadata.Runtime, getReleaseNotesURL(image.Name()))
|
||||
}
|
||||
|
||||
// Verify local containerd is new enough
|
||||
localVersion, err := c.cclient.Version(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if metadata.ContainerdMinVersion != "" {
|
||||
lv, err := ver.NewVersion(localVersion.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mv, err := ver.NewVersion(metadata.ContainerdMinVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lv.LessThan(mv) {
|
||||
return nil, fmt.Errorf("local containerd is too old: %s - this engine version requires %s or newer.\nConsult the release notes at %s for upgrade instructions",
|
||||
localVersion.Version, metadata.ContainerdMinVersion, getReleaseNotesURL(image.Name()))
|
||||
}
|
||||
} // If omitted on metadata, no hard dependency on containerd version beyond 18.09 baseline
|
||||
|
||||
// All checks look OK, proceed with update
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
// getReleaseNotesURL returns a release notes url
|
||||
// If the image name does not contain a version tag, the base release notes URL is returned
|
||||
func getReleaseNotesURL(imageName string) string {
|
||||
versionTag := ""
|
||||
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||
if err == nil {
|
||||
taggedRef, ok := distributionRef.(reference.NamedTagged)
|
||||
if ok {
|
||||
versionTag = taggedRef.Tag()
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, versionTag)
|
||||
}
|
127
vendor/github.com/docker/cli/internal/versions/versions.go
generated
vendored
Normal file
127
vendor/github.com/docker/cli/internal/versions/versions.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
package versions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultRuntimeMetadataDir is the location where the metadata file is stored
|
||||
defaultRuntimeMetadataDir = "/var/lib/docker-engine"
|
||||
)
|
||||
|
||||
// GetEngineVersions reports the versions of the engine that are available
|
||||
func GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, registryPrefix, imageName, versionString string) (clitypes.AvailableVersions, error) {
|
||||
|
||||
if imageName == "" {
|
||||
var err error
|
||||
localMetadata, err := GetCurrentRuntimeMetadata("")
|
||||
if err != nil {
|
||||
return clitypes.AvailableVersions{}, err
|
||||
}
|
||||
imageName = localMetadata.EngineImage
|
||||
}
|
||||
imageRef, err := reference.ParseNormalizedNamed(path.Join(registryPrefix, imageName))
|
||||
if err != nil {
|
||||
return clitypes.AvailableVersions{}, err
|
||||
}
|
||||
|
||||
tags, err := registryClient.GetTags(ctx, imageRef)
|
||||
if err != nil {
|
||||
return clitypes.AvailableVersions{}, err
|
||||
}
|
||||
|
||||
return parseTags(tags, versionString)
|
||||
}
|
||||
|
||||
func parseTags(tags []string, currentVersion string) (clitypes.AvailableVersions, error) {
|
||||
var ret clitypes.AvailableVersions
|
||||
currentVer, err := ver.NewVersion(currentVersion)
|
||||
if err != nil {
|
||||
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
|
||||
}
|
||||
downgrades := []clitypes.DockerVersion{}
|
||||
patches := []clitypes.DockerVersion{}
|
||||
upgrades := []clitypes.DockerVersion{}
|
||||
currentSegments := currentVer.Segments()
|
||||
for _, tag := range tags {
|
||||
tmp, err := ver.NewVersion(tag)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to parse %s: %s", tag, err)
|
||||
continue
|
||||
}
|
||||
testVersion := clitypes.DockerVersion{Version: *tmp, Tag: tag}
|
||||
if testVersion.LessThan(currentVer) {
|
||||
downgrades = append(downgrades, testVersion)
|
||||
continue
|
||||
}
|
||||
testSegments := testVersion.Segments()
|
||||
// lib always provides min 3 segments
|
||||
if testSegments[0] == currentSegments[0] &&
|
||||
testSegments[1] == currentSegments[1] {
|
||||
patches = append(patches, testVersion)
|
||||
} else {
|
||||
upgrades = append(upgrades, testVersion)
|
||||
}
|
||||
}
|
||||
sort.Slice(downgrades, func(i, j int) bool {
|
||||
return downgrades[i].Version.LessThan(&downgrades[j].Version)
|
||||
})
|
||||
sort.Slice(patches, func(i, j int) bool {
|
||||
return patches[i].Version.LessThan(&patches[j].Version)
|
||||
})
|
||||
sort.Slice(upgrades, func(i, j int) bool {
|
||||
return upgrades[i].Version.LessThan(&upgrades[j].Version)
|
||||
})
|
||||
ret.Downgrades = downgrades
|
||||
ret.Patches = patches
|
||||
ret.Upgrades = upgrades
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetCurrentRuntimeMetadata loads the current daemon runtime metadata information from the local host
|
||||
func GetCurrentRuntimeMetadata(metadataDir string) (*clitypes.RuntimeMetadata, error) {
|
||||
if metadataDir == "" {
|
||||
metadataDir = defaultRuntimeMetadataDir
|
||||
}
|
||||
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
|
||||
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res clitypes.RuntimeMetadata
|
||||
err = json.Unmarshal(data, &res)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "malformed runtime metadata file %s", filename)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// WriteRuntimeMetadata stores the metadata on the local system
|
||||
func WriteRuntimeMetadata(metadataDir string, metadata *clitypes.RuntimeMetadata) error {
|
||||
if metadataDir == "" {
|
||||
metadataDir = defaultRuntimeMetadataDir
|
||||
}
|
||||
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
|
||||
|
||||
data, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
os.Remove(filename)
|
||||
return ioutil.WriteFile(filename, data, 0644)
|
||||
}
|
4
vendor/github.com/docker/cli/kubernetes/README.md
generated
vendored
Normal file
4
vendor/github.com/docker/cli/kubernetes/README.md
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# Kubernetes client libraries
|
||||
|
||||
This package (and sub-packages) holds the client libraries for the kubernetes integration in
|
||||
the docker platform. Most of the code is currently generated.
|
60
vendor/github.com/docker/cli/kubernetes/check.go
generated
vendored
Normal file
60
vendor/github.com/docker/cli/kubernetes/check.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
apiv1alpha3 "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
apiv1beta1 "github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
apiv1beta2 "github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
|
||||
"github.com/pkg/errors"
|
||||
apimachinerymetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/discovery"
|
||||
)
|
||||
|
||||
// StackVersion represents the detected Compose Component on Kubernetes side.
|
||||
type StackVersion string
|
||||
|
||||
const (
|
||||
// StackAPIV1Beta1 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta1 = StackVersion("v1beta1")
|
||||
// StackAPIV1Beta2 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta2 = StackVersion("v1beta2")
|
||||
// StackAPIV1Alpha3 is returned if it's the most recent version available, and experimental flag is on.
|
||||
StackAPIV1Alpha3 = StackVersion("v1alpha3")
|
||||
)
|
||||
|
||||
// GetStackAPIVersion returns the most appropriate stack API version installed.
|
||||
func GetStackAPIVersion(serverGroups discovery.ServerGroupsInterface, experimental bool) (StackVersion, error) {
|
||||
groups, err := serverGroups.ServerGroups()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getAPIVersion(groups, experimental)
|
||||
}
|
||||
|
||||
func getAPIVersion(groups *metav1.APIGroupList, experimental bool) (StackVersion, error) {
|
||||
switch {
|
||||
case experimental && findVersion(apiv1alpha3.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Alpha3, nil
|
||||
case findVersion(apiv1beta2.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta2, nil
|
||||
case findVersion(apiv1beta1.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta1, nil
|
||||
default:
|
||||
return "", errors.New("failed to find a Stack API version")
|
||||
}
|
||||
}
|
||||
|
||||
func findVersion(stackAPI schema.GroupVersion, groups []apimachinerymetav1.APIGroup) bool {
|
||||
for _, group := range groups {
|
||||
if group.Name == stackAPI.Group {
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == stackAPI.Version {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
8
vendor/github.com/docker/cli/kubernetes/config.go
generated
vendored
Normal file
8
vendor/github.com/docker/cli/kubernetes/config.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
package kubernetes
|
||||
|
||||
import api "github.com/docker/compose-on-kubernetes/api"
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file based on
|
||||
// the KUBECONFIG environment variable and command line flags.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api.NewKubernetesConfig instead
|
||||
var NewKubernetesConfig = api.NewKubernetesConfig
|
4
vendor/github.com/docker/cli/kubernetes/doc.go
generated
vendored
Normal file
4
vendor/github.com/docker/cli/kubernetes/doc.go
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
//
|
||||
// +domain=docker.com
|
||||
|
||||
package kubernetes
|
98
vendor/github.com/docker/cli/opts/config.go
generated
vendored
Normal file
98
vendor/github.com/docker/cli/opts/config.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
// ConfigOpt is a Value type for parsing configs
|
||||
type ConfigOpt struct {
|
||||
values []*swarmtypes.ConfigReference
|
||||
}
|
||||
|
||||
// Set a new config value
|
||||
func (o *ConfigOpt) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := &swarmtypes.ConfigReference{
|
||||
File: &swarmtypes.ConfigReferenceFileTarget{
|
||||
UID: "0",
|
||||
GID: "0",
|
||||
Mode: 0444,
|
||||
},
|
||||
}
|
||||
|
||||
// support a simple syntax of --config foo
|
||||
if len(fields) == 1 {
|
||||
options.File.Name = fields[0]
|
||||
options.ConfigName = fields[0]
|
||||
o.values = append(o.values, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := strings.ToLower(parts[0])
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "source", "src":
|
||||
options.ConfigName = value
|
||||
case "target":
|
||||
options.File.Name = value
|
||||
case "uid":
|
||||
options.File.UID = value
|
||||
case "gid":
|
||||
options.File.GID = value
|
||||
case "mode":
|
||||
m, err := strconv.ParseUint(value, 0, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid mode specified: %v", err)
|
||||
}
|
||||
|
||||
options.File.Mode = os.FileMode(m)
|
||||
default:
|
||||
return fmt.Errorf("invalid field in config request: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if options.ConfigName == "" {
|
||||
return fmt.Errorf("source is required")
|
||||
}
|
||||
|
||||
o.values = append(o.values, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (o *ConfigOpt) Type() string {
|
||||
return "config"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (o *ConfigOpt) String() string {
|
||||
configs := []string{}
|
||||
for _, config := range o.values {
|
||||
repr := fmt.Sprintf("%s -> %s", config.ConfigName, config.File.Name)
|
||||
configs = append(configs, repr)
|
||||
}
|
||||
return strings.Join(configs, ", ")
|
||||
}
|
||||
|
||||
// Value returns the config requests
|
||||
func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference {
|
||||
return o.values
|
||||
}
|
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
Normal file
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PositiveDurationOpt is an option type for time.Duration that uses a pointer.
|
||||
// It behave similarly to DurationOpt but only allows positive duration values.
|
||||
type PositiveDurationOpt struct {
|
||||
DurationOpt
|
||||
}
|
||||
|
||||
// Set a new value on the option. Setting a negative duration value will cause
|
||||
// an error to be returned.
|
||||
func (d *PositiveDurationOpt) Set(s string) error {
|
||||
err := d.DurationOpt.Set(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *d.DurationOpt.value < 0 {
|
||||
return errors.Errorf("duration cannot be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationOpt is an option type for time.Duration that uses a pointer. This
|
||||
// allows us to get nil values outside, instead of defaulting to 0
|
||||
type DurationOpt struct {
|
||||
value *time.Duration
|
||||
}
|
||||
|
||||
// NewDurationOpt creates a DurationOpt with the specified duration
|
||||
func NewDurationOpt(value *time.Duration) *DurationOpt {
|
||||
return &DurationOpt{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// Set a new value on the option
|
||||
func (d *DurationOpt) Set(s string) error {
|
||||
v, err := time.ParseDuration(s)
|
||||
d.value = &v
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type of this option, which will be displayed in `--help` output
|
||||
func (d *DurationOpt) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (d *DurationOpt) String() string {
|
||||
if d.value != nil {
|
||||
return d.value.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Value returns the time.Duration
|
||||
func (d *DurationOpt) Value() *time.Duration {
|
||||
return d.value
|
||||
}
|
46
vendor/github.com/docker/cli/opts/env.go
generated
vendored
Normal file
46
vendor/github.com/docker/cli/opts/env.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidateEnv validates an environment variable and returns it.
|
||||
// If no value is specified, it returns the current value using os.Getenv.
|
||||
//
|
||||
// As on ParseEnvFile and related to #16585, environment variable names
|
||||
// are not validate what so ever, it's up to application inside docker
|
||||
// to validate them or not.
|
||||
//
|
||||
// The only validation here is to check if name is empty, per #25099
|
||||
func ValidateEnv(val string) (string, error) {
|
||||
arr := strings.Split(val, "=")
|
||||
if arr[0] == "" {
|
||||
return "", fmt.Errorf("invalid environment variable: %s", val)
|
||||
}
|
||||
if len(arr) > 1 {
|
||||
return val, nil
|
||||
}
|
||||
if !doesEnvExist(val) {
|
||||
return val, nil
|
||||
}
|
||||
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
||||
}
|
||||
|
||||
func doesEnvExist(name string) bool {
|
||||
for _, entry := range os.Environ() {
|
||||
parts := strings.SplitN(entry, "=", 2)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
|
||||
if strings.EqualFold(parts[0], name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if parts[0] == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
22
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// ParseEnvFile reads a file with environment variables enumerated by lines
|
||||
//
|
||||
// ``Environment variable names used by the utilities in the Shell and
|
||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
||||
// Portable Character Set and do not begin with a digit. *But*, other
|
||||
// characters may be permitted by an implementation; applications shall
|
||||
// tolerate the presence of such names.''
|
||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
||||
//
|
||||
// As of #16585, it's up to application inside docker to validate or not
|
||||
// environment variables, that's why we just strip leading whitespace and
|
||||
// nothing more.
|
||||
func ParseEnvFile(filename string) ([]string, error) {
|
||||
return parseKeyValueFile(filename, os.LookupEnv)
|
||||
}
|
77
vendor/github.com/docker/cli/opts/file.go
generated
vendored
Normal file
77
vendor/github.com/docker/cli/opts/file.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const whiteSpaces = " \t"
|
||||
|
||||
// ErrBadKey typed error for bad environment variable
|
||||
type ErrBadKey struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrBadKey) Error() string {
|
||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||
}
|
||||
|
||||
func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) {
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
lines := []string{}
|
||||
scanner := bufio.NewScanner(fh)
|
||||
currentLine := 0
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
if !utf8.Valid(scannedBytes) {
|
||||
return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
||||
}
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
}
|
||||
// trim the line from all leading whitespace first
|
||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||
currentLine++
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
if strings.ContainsAny(variable, whiteSpaces) {
|
||||
return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)}
|
||||
}
|
||||
if len(variable) == 0 {
|
||||
return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)}
|
||||
}
|
||||
|
||||
if len(data) > 1 {
|
||||
// pass the value through, no trimming
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
||||
} else {
|
||||
var value string
|
||||
var present bool
|
||||
if emptyFn != nil {
|
||||
value, present = emptyFn(line)
|
||||
}
|
||||
if present {
|
||||
// if only a pass-through variable is given, clean it up.
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return lines, scanner.Err()
|
||||
}
|
112
vendor/github.com/docker/cli/opts/gpus.go
generated
vendored
Normal file
112
vendor/github.com/docker/cli/opts/gpus.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GpuOpts is a Value type for parsing mounts
|
||||
type GpuOpts struct {
|
||||
values []container.DeviceRequest
|
||||
}
|
||||
|
||||
func parseCount(s string) (int, error) {
|
||||
if s == "all" {
|
||||
return -1, nil
|
||||
}
|
||||
i, err := strconv.Atoi(s)
|
||||
return i, errors.Wrap(err, "count must be an integer")
|
||||
}
|
||||
|
||||
// Set a new mount value
|
||||
// nolint: gocyclo
|
||||
func (o *GpuOpts) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := container.DeviceRequest{}
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
// Set writable as the default
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := parts[0]
|
||||
if _, ok := seen[key]; ok {
|
||||
return fmt.Errorf("gpu request key '%s' can be specified only once", key)
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
|
||||
if len(parts) == 1 {
|
||||
seen["count"] = struct{}{}
|
||||
req.Count, err = parseCount(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "driver":
|
||||
req.Driver = value
|
||||
case "count":
|
||||
req.Count, err = parseCount(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "device":
|
||||
req.DeviceIDs = strings.Split(value, ",")
|
||||
case "capabilities":
|
||||
req.Capabilities = [][]string{append(strings.Split(value, ","), "gpu")}
|
||||
case "options":
|
||||
r := csv.NewReader(strings.NewReader(value))
|
||||
optFields, err := r.Read()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read gpu options")
|
||||
}
|
||||
req.Options = ConvertKVStringsToMap(optFields)
|
||||
default:
|
||||
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := seen["count"]; !ok && req.DeviceIDs == nil {
|
||||
req.Count = 1
|
||||
}
|
||||
if req.Options == nil {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
if req.Capabilities == nil {
|
||||
req.Capabilities = [][]string{{"gpu"}}
|
||||
}
|
||||
|
||||
o.values = append(o.values, req)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (o *GpuOpts) Type() string {
|
||||
return "gpu-request"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (o *GpuOpts) String() string {
|
||||
gpus := []string{}
|
||||
for _, gpu := range o.values {
|
||||
gpus = append(gpus, fmt.Sprintf("%v", gpu))
|
||||
}
|
||||
return strings.Join(gpus, ", ")
|
||||
}
|
||||
|
||||
// Value returns the mounts
|
||||
func (o *GpuOpts) Value() []container.DeviceRequest {
|
||||
return o.values
|
||||
}
|
167
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
Normal file
167
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
|
||||
// These are the IANA registered port numbers for use with Docker
|
||||
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
||||
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
||||
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
||||
// DefaultUnixSocket Path for the unix socket.
|
||||
// Docker daemon by default always listens on the default unix socket
|
||||
DefaultUnixSocket = "/var/run/docker.sock"
|
||||
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
||||
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
||||
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
||||
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
||||
// DefaultNamedPipe defines the default named pipe used by docker on Windows
|
||||
DefaultNamedPipe = `//./pipe/docker_engine`
|
||||
)
|
||||
|
||||
// ValidateHost validates that the specified string is a valid host and returns it.
|
||||
func ValidateHost(val string) (string, error) {
|
||||
host := strings.TrimSpace(val)
|
||||
// The empty string means default and is not handled by parseDockerDaemonHost
|
||||
if host != "" {
|
||||
_, err := parseDockerDaemonHost(host)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
}
|
||||
// Note: unlike most flag validators, we don't return the mutated value here
|
||||
// we need to know what the user entered later (using ParseHost) to adjust for TLS
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ParseHost and set defaults for a Daemon host string
|
||||
func ParseHost(defaultToTLS bool, val string) (string, error) {
|
||||
host := strings.TrimSpace(val)
|
||||
if host == "" {
|
||||
if defaultToTLS {
|
||||
host = DefaultTLSHost
|
||||
} else {
|
||||
host = DefaultHost
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
host, err = parseDockerDaemonHost(host)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
||||
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
|
||||
func parseDockerDaemonHost(addr string) (string, error) {
|
||||
addrParts := strings.SplitN(addr, "://", 2)
|
||||
if len(addrParts) == 1 && addrParts[0] != "" {
|
||||
addrParts = []string{"tcp", addrParts[0]}
|
||||
}
|
||||
|
||||
switch addrParts[0] {
|
||||
case "tcp":
|
||||
return ParseTCPAddr(addrParts[1], DefaultTCPHost)
|
||||
case "unix":
|
||||
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
|
||||
case "npipe":
|
||||
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
|
||||
case "fd":
|
||||
return addr, nil
|
||||
case "ssh":
|
||||
return addr, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
}
|
||||
|
||||
// parseSimpleProtoAddr parses and validates that the specified address is a valid
|
||||
// socket address for simple protocols like unix and npipe. It returns a formatted
|
||||
// socket address, either using the address parsed from addr, or the contents of
|
||||
// defaultAddr if addr is a blank string.
|
||||
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, proto+"://")
|
||||
if strings.Contains(addr, "://") {
|
||||
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
|
||||
}
|
||||
if addr == "" {
|
||||
addr = defaultAddr
|
||||
}
|
||||
return fmt.Sprintf("%s://%s", proto, addr), nil
|
||||
}
|
||||
|
||||
// ParseTCPAddr parses and validates that the specified address is a valid TCP
|
||||
// address. It returns a formatted TCP address, either using the address parsed
|
||||
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
|
||||
// tryAddr is expected to have already been Trim()'d
|
||||
// defaultAddr must be in the full `tcp://host:port` form
|
||||
func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
|
||||
if tryAddr == "" || tryAddr == "tcp://" {
|
||||
return defaultAddr, nil
|
||||
}
|
||||
addr := strings.TrimPrefix(tryAddr, "tcp://")
|
||||
if strings.Contains(addr, "://") || addr == "" {
|
||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
|
||||
}
|
||||
|
||||
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
|
||||
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
|
||||
// not 1.4. See https://github.com/golang/go/issues/12200 and
|
||||
// https://github.com/golang/go/issues/6530.
|
||||
if strings.HasSuffix(addr, "]:") {
|
||||
addr += defaultPort
|
||||
}
|
||||
|
||||
u, err := url.Parse("tcp://" + addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
// try port addition once
|
||||
host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
host = defaultHost
|
||||
}
|
||||
if port == "" {
|
||||
port = defaultPort
|
||||
}
|
||||
p, err := strconv.Atoi(port)
|
||||
if err != nil && p == 0 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
|
||||
}
|
||||
|
||||
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
|
||||
// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
|
||||
func ValidateExtraHost(val string) (string, error) {
|
||||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
||||
arr := strings.SplitN(val, ":", 2)
|
||||
if len(arr) != 2 || len(arr[0]) == 0 {
|
||||
return "", fmt.Errorf("bad format for add-host: %q", val)
|
||||
}
|
||||
if _, err := ValidateIPAddress(arr[1]); err != nil {
|
||||
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
||||
}
|
||||
return val, nil
|
||||
}
|
8
vendor/github.com/docker/cli/opts/hosts_unix.go
generated
vendored
Normal file
8
vendor/github.com/docker/cli/opts/hosts_unix.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// +build !windows
|
||||
|
||||
package opts
|
||||
|
||||
import "fmt"
|
||||
|
||||
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
||||
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
6
vendor/github.com/docker/cli/opts/hosts_windows.go
generated
vendored
Normal file
6
vendor/github.com/docker/cli/opts/hosts_windows.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
// +build windows
|
||||
|
||||
package opts
|
||||
|
||||
// DefaultHost constant defines the default host string used by docker on Windows
|
||||
var DefaultHost = "npipe://" + DefaultNamedPipe
|
47
vendor/github.com/docker/cli/opts/ip.go
generated
vendored
Normal file
47
vendor/github.com/docker/cli/opts/ip.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// IPOpt holds an IP. It is used to store values from CLI flags.
|
||||
type IPOpt struct {
|
||||
*net.IP
|
||||
}
|
||||
|
||||
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
||||
// string representation of an IP. If the string is not a valid
|
||||
// IP it will fallback to the specified reference.
|
||||
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
||||
o := &IPOpt{
|
||||
IP: ref,
|
||||
}
|
||||
o.Set(defaultVal)
|
||||
return o
|
||||
}
|
||||
|
||||
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
||||
// string is not parseable as an IP address it returns an error.
|
||||
func (o *IPOpt) Set(val string) error {
|
||||
ip := net.ParseIP(val)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("%s is not an ip address", val)
|
||||
}
|
||||
*o.IP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the IP address stored in the IPOpt. If stored IP is a
|
||||
// nil pointer, it returns an empty string.
|
||||
func (o *IPOpt) String() string {
|
||||
if *o.IP == nil {
|
||||
return ""
|
||||
}
|
||||
return o.IP.String()
|
||||
}
|
||||
|
||||
// Type returns the type of the option
|
||||
func (o *IPOpt) Type() string {
|
||||
return "ip"
|
||||
}
|
182
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
Normal file
182
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// MountOpt is a Value type for parsing mounts
|
||||
type MountOpt struct {
|
||||
values []mounttypes.Mount
|
||||
}
|
||||
|
||||
// Set a new mount value
|
||||
// nolint: gocyclo
|
||||
func (m *MountOpt) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mount := mounttypes.Mount{}
|
||||
|
||||
volumeOptions := func() *mounttypes.VolumeOptions {
|
||||
if mount.VolumeOptions == nil {
|
||||
mount.VolumeOptions = &mounttypes.VolumeOptions{
|
||||
Labels: make(map[string]string),
|
||||
}
|
||||
}
|
||||
if mount.VolumeOptions.DriverConfig == nil {
|
||||
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
|
||||
}
|
||||
return mount.VolumeOptions
|
||||
}
|
||||
|
||||
bindOptions := func() *mounttypes.BindOptions {
|
||||
if mount.BindOptions == nil {
|
||||
mount.BindOptions = new(mounttypes.BindOptions)
|
||||
}
|
||||
return mount.BindOptions
|
||||
}
|
||||
|
||||
tmpfsOptions := func() *mounttypes.TmpfsOptions {
|
||||
if mount.TmpfsOptions == nil {
|
||||
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
|
||||
}
|
||||
return mount.TmpfsOptions
|
||||
}
|
||||
|
||||
setValueOnMap := func(target map[string]string, value string) {
|
||||
parts := strings.SplitN(value, "=", 2)
|
||||
if len(parts) == 1 {
|
||||
target[value] = ""
|
||||
} else {
|
||||
target[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
mount.Type = mounttypes.TypeVolume // default to volume mounts
|
||||
// Set writable as the default
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := strings.ToLower(parts[0])
|
||||
|
||||
if len(parts) == 1 {
|
||||
switch key {
|
||||
case "readonly", "ro":
|
||||
mount.ReadOnly = true
|
||||
continue
|
||||
case "volume-nocopy":
|
||||
volumeOptions().NoCopy = true
|
||||
continue
|
||||
case "bind-nonrecursive":
|
||||
bindOptions().NonRecursive = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "type":
|
||||
mount.Type = mounttypes.Type(strings.ToLower(value))
|
||||
case "source", "src":
|
||||
mount.Source = value
|
||||
case "target", "dst", "destination":
|
||||
mount.Target = value
|
||||
case "readonly", "ro":
|
||||
mount.ReadOnly, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
case "consistency":
|
||||
mount.Consistency = mounttypes.Consistency(strings.ToLower(value))
|
||||
case "bind-propagation":
|
||||
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
|
||||
case "bind-nonrecursive":
|
||||
bindOptions().NonRecursive, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
case "volume-nocopy":
|
||||
volumeOptions().NoCopy, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for volume-nocopy: %s", value)
|
||||
}
|
||||
case "volume-label":
|
||||
setValueOnMap(volumeOptions().Labels, value)
|
||||
case "volume-driver":
|
||||
volumeOptions().DriverConfig.Name = value
|
||||
case "volume-opt":
|
||||
if volumeOptions().DriverConfig.Options == nil {
|
||||
volumeOptions().DriverConfig.Options = make(map[string]string)
|
||||
}
|
||||
setValueOnMap(volumeOptions().DriverConfig.Options, value)
|
||||
case "tmpfs-size":
|
||||
sizeBytes, err := units.RAMInBytes(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
tmpfsOptions().SizeBytes = sizeBytes
|
||||
case "tmpfs-mode":
|
||||
ui64, err := strconv.ParseUint(value, 8, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
tmpfsOptions().Mode = os.FileMode(ui64)
|
||||
default:
|
||||
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||
}
|
||||
}
|
||||
|
||||
if mount.Type == "" {
|
||||
return fmt.Errorf("type is required")
|
||||
}
|
||||
|
||||
if mount.Target == "" {
|
||||
return fmt.Errorf("target is required")
|
||||
}
|
||||
|
||||
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
|
||||
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
|
||||
}
|
||||
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
|
||||
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
|
||||
}
|
||||
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
|
||||
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
|
||||
}
|
||||
|
||||
m.values = append(m.values, mount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (m *MountOpt) Type() string {
|
||||
return "mount"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (m *MountOpt) String() string {
|
||||
mounts := []string{}
|
||||
for _, mount := range m.values {
|
||||
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
|
||||
mounts = append(mounts, repr)
|
||||
}
|
||||
return strings.Join(mounts, ", ")
|
||||
}
|
||||
|
||||
// Value returns the mounts
|
||||
func (m *MountOpt) Value() []mounttypes.Mount {
|
||||
return m.values
|
||||
}
|
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
Normal file
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
networkOptName = "name"
|
||||
networkOptAlias = "alias"
|
||||
driverOpt = "driver-opt"
|
||||
)
|
||||
|
||||
// NetworkAttachmentOpts represents the network options for endpoint creation
|
||||
type NetworkAttachmentOpts struct {
|
||||
Target string
|
||||
Aliases []string
|
||||
DriverOpts map[string]string
|
||||
}
|
||||
|
||||
// NetworkOpt represents a network config in swarm mode.
|
||||
type NetworkOpt struct {
|
||||
options []NetworkAttachmentOpts
|
||||
}
|
||||
|
||||
// Set networkopts value
|
||||
func (n *NetworkOpt) Set(value string) error {
|
||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var netOpt NetworkAttachmentOpts
|
||||
if longSyntax {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
netOpt.Aliases = []string{}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid field %s", field)
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
||||
|
||||
switch key {
|
||||
case networkOptName:
|
||||
netOpt.Target = value
|
||||
case networkOptAlias:
|
||||
netOpt.Aliases = append(netOpt.Aliases, value)
|
||||
case driverOpt:
|
||||
key, value, err = parseDriverOpt(value)
|
||||
if err == nil {
|
||||
if netOpt.DriverOpts == nil {
|
||||
netOpt.DriverOpts = make(map[string]string)
|
||||
}
|
||||
netOpt.DriverOpts[key] = value
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid field key %s", key)
|
||||
}
|
||||
}
|
||||
if len(netOpt.Target) == 0 {
|
||||
return fmt.Errorf("network name/id is not specified")
|
||||
}
|
||||
} else {
|
||||
netOpt.Target = value
|
||||
}
|
||||
n.options = append(n.options, netOpt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (n *NetworkOpt) Type() string {
|
||||
return "network"
|
||||
}
|
||||
|
||||
// Value returns the networkopts
|
||||
func (n *NetworkOpt) Value() []NetworkAttachmentOpts {
|
||||
return n.options
|
||||
}
|
||||
|
||||
// String returns the network opts as a string
|
||||
func (n *NetworkOpt) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseDriverOpt(driverOpt string) (string, string, error) {
|
||||
parts := strings.SplitN(driverOpt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("invalid key value pair format in driver options")
|
||||
}
|
||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
||||
return key, value, nil
|
||||
}
|
523
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
Normal file
523
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
Normal file
@ -0,0 +1,523 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
||||
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
||||
)
|
||||
|
||||
// ListOpts holds a list of values and a validation function.
|
||||
type ListOpts struct {
|
||||
values *[]string
|
||||
validator ValidatorFctType
|
||||
}
|
||||
|
||||
// NewListOpts creates a new ListOpts with the specified validator.
|
||||
func NewListOpts(validator ValidatorFctType) ListOpts {
|
||||
var values []string
|
||||
return *NewListOptsRef(&values, validator)
|
||||
}
|
||||
|
||||
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
||||
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
||||
return &ListOpts{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *ListOpts) String() string {
|
||||
if len(*opts.values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%v", *opts.values)
|
||||
}
|
||||
|
||||
// Set validates if needed the input value and adds it to the
|
||||
// internal slice.
|
||||
func (opts *ListOpts) Set(value string) error {
|
||||
if opts.validator != nil {
|
||||
v, err := opts.validator(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
(*opts.values) = append((*opts.values), value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the specified element from the slice.
|
||||
func (opts *ListOpts) Delete(key string) {
|
||||
for i, k := range *opts.values {
|
||||
if k == key {
|
||||
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetMap returns the content of values in a map in order to avoid
|
||||
// duplicates.
|
||||
func (opts *ListOpts) GetMap() map[string]struct{} {
|
||||
ret := make(map[string]struct{})
|
||||
for _, k := range *opts.values {
|
||||
ret[k] = struct{}{}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GetAll returns the values of slice.
|
||||
func (opts *ListOpts) GetAll() []string {
|
||||
return (*opts.values)
|
||||
}
|
||||
|
||||
// GetAllOrEmpty returns the values of the slice
|
||||
// or an empty slice when there are no values.
|
||||
func (opts *ListOpts) GetAllOrEmpty() []string {
|
||||
v := *opts.values
|
||||
if v == nil {
|
||||
return make([]string, 0)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Get checks the existence of the specified key.
|
||||
func (opts *ListOpts) Get(key string) bool {
|
||||
for _, k := range *opts.values {
|
||||
if k == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the amount of element in the slice.
|
||||
func (opts *ListOpts) Len() int {
|
||||
return len((*opts.values))
|
||||
}
|
||||
|
||||
// Type returns a string name for this Option type
|
||||
func (opts *ListOpts) Type() string {
|
||||
return "list"
|
||||
}
|
||||
|
||||
// WithValidator returns the ListOpts with validator set.
|
||||
func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
|
||||
opts.validator = validator
|
||||
return opts
|
||||
}
|
||||
|
||||
// NamedOption is an interface that list and map options
|
||||
// with names implement.
|
||||
type NamedOption interface {
|
||||
Name() string
|
||||
}
|
||||
|
||||
// NamedListOpts is a ListOpts with a configuration name.
|
||||
// This struct is useful to keep reference to the assigned
|
||||
// field name in the internal configuration struct.
|
||||
type NamedListOpts struct {
|
||||
name string
|
||||
ListOpts
|
||||
}
|
||||
|
||||
var _ NamedOption = &NamedListOpts{}
|
||||
|
||||
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
|
||||
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
|
||||
return &NamedListOpts{
|
||||
name: name,
|
||||
ListOpts: *NewListOptsRef(values, validator),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the NamedListOpts in the configuration.
|
||||
func (o *NamedListOpts) Name() string {
|
||||
return o.name
|
||||
}
|
||||
|
||||
// MapOpts holds a map of values and a validation function.
|
||||
type MapOpts struct {
|
||||
values map[string]string
|
||||
validator ValidatorFctType
|
||||
}
|
||||
|
||||
// Set validates if needed the input value and add it to the
|
||||
// internal map, by splitting on '='.
|
||||
func (opts *MapOpts) Set(value string) error {
|
||||
if opts.validator != nil {
|
||||
v, err := opts.validator(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
vals := strings.SplitN(value, "=", 2)
|
||||
if len(vals) == 1 {
|
||||
(opts.values)[vals[0]] = ""
|
||||
} else {
|
||||
(opts.values)[vals[0]] = vals[1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll returns the values of MapOpts as a map.
|
||||
func (opts *MapOpts) GetAll() map[string]string {
|
||||
return opts.values
|
||||
}
|
||||
|
||||
func (opts *MapOpts) String() string {
|
||||
return fmt.Sprintf("%v", opts.values)
|
||||
}
|
||||
|
||||
// Type returns a string name for this Option type
|
||||
func (opts *MapOpts) Type() string {
|
||||
return "map"
|
||||
}
|
||||
|
||||
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
||||
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
||||
if values == nil {
|
||||
values = make(map[string]string)
|
||||
}
|
||||
return &MapOpts{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
// NamedMapOpts is a MapOpts struct with a configuration name.
|
||||
// This struct is useful to keep reference to the assigned
|
||||
// field name in the internal configuration struct.
|
||||
type NamedMapOpts struct {
|
||||
name string
|
||||
MapOpts
|
||||
}
|
||||
|
||||
var _ NamedOption = &NamedMapOpts{}
|
||||
|
||||
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
|
||||
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
|
||||
return &NamedMapOpts{
|
||||
name: name,
|
||||
MapOpts: *NewMapOpts(values, validator),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the NamedMapOpts in the configuration.
|
||||
func (o *NamedMapOpts) Name() string {
|
||||
return o.name
|
||||
}
|
||||
|
||||
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
||||
type ValidatorFctType func(val string) (string, error)
|
||||
|
||||
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
||||
type ValidatorFctListType func(val string) ([]string, error)
|
||||
|
||||
// ValidateIPAddress validates an Ip address.
|
||||
func ValidateIPAddress(val string) (string, error) {
|
||||
var ip = net.ParseIP(strings.TrimSpace(val))
|
||||
if ip != nil {
|
||||
return ip.String(), nil
|
||||
}
|
||||
return "", fmt.Errorf("%s is not an ip address", val)
|
||||
}
|
||||
|
||||
// ValidateMACAddress validates a MAC address.
|
||||
func ValidateMACAddress(val string) (string, error) {
|
||||
_, err := net.ParseMAC(strings.TrimSpace(val))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
||||
// A zero length domain is represented by a dot (.).
|
||||
func ValidateDNSSearch(val string) (string, error) {
|
||||
if val = strings.Trim(val, " "); val == "." {
|
||||
return val, nil
|
||||
}
|
||||
return validateDomain(val)
|
||||
}
|
||||
|
||||
func validateDomain(val string) (string, error) {
|
||||
if alphaRegexp.FindString(val) == "" {
|
||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||
}
|
||||
ns := domainRegexp.FindSubmatch([]byte(val))
|
||||
if len(ns) > 0 && len(ns[1]) < 255 {
|
||||
return string(ns[1]), nil
|
||||
}
|
||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||
}
|
||||
|
||||
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
||||
//
|
||||
// Labels are in the form of key=value; key must be a non-empty string, and not
|
||||
// contain whitespaces. A value is optional (defaults to an empty string if omitted).
|
||||
//
|
||||
// Leading whitespace is removed during validation but values are kept as-is
|
||||
// otherwise, so any string value is accepted for both, which includes whitespace
|
||||
// (for values) and quotes (surrounding, or embedded in key or value).
|
||||
//
|
||||
// TODO discuss if quotes (and other special characters) should be valid or invalid for keys
|
||||
// TODO discuss if leading/trailing whitespace in keys should be preserved (and valid)
|
||||
func ValidateLabel(val string) (string, error) {
|
||||
arr := strings.SplitN(val, "=", 2)
|
||||
key := strings.TrimLeft(arr[0], whiteSpaces)
|
||||
if key == "" {
|
||||
return "", fmt.Errorf("invalid label '%s': empty name", val)
|
||||
}
|
||||
if strings.ContainsAny(key, whiteSpaces) {
|
||||
return "", fmt.Errorf("label '%s' contains whitespaces", key)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateSysctl validates a sysctl and returns it.
|
||||
func ValidateSysctl(val string) (string, error) {
|
||||
validSysctlMap := map[string]bool{
|
||||
"kernel.msgmax": true,
|
||||
"kernel.msgmnb": true,
|
||||
"kernel.msgmni": true,
|
||||
"kernel.sem": true,
|
||||
"kernel.shmall": true,
|
||||
"kernel.shmmax": true,
|
||||
"kernel.shmmni": true,
|
||||
"kernel.shm_rmid_forced": true,
|
||||
}
|
||||
validSysctlPrefixes := []string{
|
||||
"net.",
|
||||
"fs.mqueue.",
|
||||
}
|
||||
arr := strings.Split(val, "=")
|
||||
if len(arr) < 2 {
|
||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||
}
|
||||
if validSysctlMap[arr[0]] {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
for _, vp := range validSysctlPrefixes {
|
||||
if strings.HasPrefix(arr[0], vp) {
|
||||
return val, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||
}
|
||||
|
||||
// ValidateProgressOutput errors out if an invalid value is passed to --progress
|
||||
func ValidateProgressOutput(val string) error {
|
||||
valid := []string{"auto", "plain", "tty"}
|
||||
for _, s := range valid {
|
||||
if s == val {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", "))
|
||||
}
|
||||
|
||||
// FilterOpt is a flag type for validating filters
|
||||
type FilterOpt struct {
|
||||
filter filters.Args
|
||||
}
|
||||
|
||||
// NewFilterOpt returns a new FilterOpt
|
||||
func NewFilterOpt() FilterOpt {
|
||||
return FilterOpt{filter: filters.NewArgs()}
|
||||
}
|
||||
|
||||
func (o *FilterOpt) String() string {
|
||||
repr, err := filters.ToJSON(o.filter)
|
||||
if err != nil {
|
||||
return "invalid filters"
|
||||
}
|
||||
return repr
|
||||
}
|
||||
|
||||
// Set sets the value of the opt by parsing the command line value
|
||||
func (o *FilterOpt) Set(value string) error {
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
if !strings.Contains(value, "=") {
|
||||
return errors.New("bad format of filter (expected name=value)")
|
||||
}
|
||||
f := strings.SplitN(value, "=", 2)
|
||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
||||
value = strings.TrimSpace(f[1])
|
||||
|
||||
o.filter.Add(name, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the option type
|
||||
func (o *FilterOpt) Type() string {
|
||||
return "filter"
|
||||
}
|
||||
|
||||
// Value returns the value of this option
|
||||
func (o *FilterOpt) Value() filters.Args {
|
||||
return o.filter
|
||||
}
|
||||
|
||||
// NanoCPUs is a type for fixed point fractional number.
|
||||
type NanoCPUs int64
|
||||
|
||||
// String returns the string format of the number
|
||||
func (c *NanoCPUs) String() string {
|
||||
if *c == 0 {
|
||||
return ""
|
||||
}
|
||||
return big.NewRat(c.Value(), 1e9).FloatString(3)
|
||||
}
|
||||
|
||||
// Set sets the value of the NanoCPU by passing a string
|
||||
func (c *NanoCPUs) Set(value string) error {
|
||||
cpus, err := ParseCPUs(value)
|
||||
*c = NanoCPUs(cpus)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type
|
||||
func (c *NanoCPUs) Type() string {
|
||||
return "decimal"
|
||||
}
|
||||
|
||||
// Value returns the value in int64
|
||||
func (c *NanoCPUs) Value() int64 {
|
||||
return int64(*c)
|
||||
}
|
||||
|
||||
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
|
||||
func ParseCPUs(value string) (int64, error) {
|
||||
cpu, ok := new(big.Rat).SetString(value)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
|
||||
}
|
||||
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
|
||||
if !nano.IsInt() {
|
||||
return 0, fmt.Errorf("value is too precise")
|
||||
}
|
||||
return nano.Num().Int64(), nil
|
||||
}
|
||||
|
||||
// ParseLink parses and validates the specified string as a link format (name:alias)
|
||||
func ParseLink(val string) (string, string, error) {
|
||||
if val == "" {
|
||||
return "", "", fmt.Errorf("empty string specified for links")
|
||||
}
|
||||
arr := strings.Split(val, ":")
|
||||
if len(arr) > 2 {
|
||||
return "", "", fmt.Errorf("bad format for links: %s", val)
|
||||
}
|
||||
if len(arr) == 1 {
|
||||
return val, val, nil
|
||||
}
|
||||
// This is kept because we can actually get a HostConfig with links
|
||||
// from an already created container and the format is not `foo:bar`
|
||||
// but `/foo:/c1/bar`
|
||||
if strings.HasPrefix(arr[0], "/") {
|
||||
_, alias := path.Split(arr[1])
|
||||
return arr[0][1:], alias, nil
|
||||
}
|
||||
return arr[0], arr[1], nil
|
||||
}
|
||||
|
||||
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
|
||||
func ValidateLink(val string) (string, error) {
|
||||
_, _, err := ParseLink(val)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
|
||||
type MemBytes int64
|
||||
|
||||
// String returns the string format of the human readable memory bytes
|
||||
func (m *MemBytes) String() string {
|
||||
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
|
||||
// We return "0" in case value is 0 here so that the default value is hidden.
|
||||
// (Sometimes "default 0 B" is actually misleading)
|
||||
if m.Value() != 0 {
|
||||
return units.BytesSize(float64(m.Value()))
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
// Set sets the value of the MemBytes by passing a string
|
||||
func (m *MemBytes) Set(value string) error {
|
||||
val, err := units.RAMInBytes(value)
|
||||
*m = MemBytes(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type
|
||||
func (m *MemBytes) Type() string {
|
||||
return "bytes"
|
||||
}
|
||||
|
||||
// Value returns the value in int64
|
||||
func (m *MemBytes) Value() int64 {
|
||||
return int64(*m)
|
||||
}
|
||||
|
||||
// UnmarshalJSON is the customized unmarshaler for MemBytes
|
||||
func (m *MemBytes) UnmarshalJSON(s []byte) error {
|
||||
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
|
||||
return fmt.Errorf("invalid size: %q", s)
|
||||
}
|
||||
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
|
||||
*m = MemBytes(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc).
|
||||
// It differs from MemBytes in that -1 is valid and the default.
|
||||
type MemSwapBytes int64
|
||||
|
||||
// Set sets the value of the MemSwapBytes by passing a string
|
||||
func (m *MemSwapBytes) Set(value string) error {
|
||||
if value == "-1" {
|
||||
*m = MemSwapBytes(-1)
|
||||
return nil
|
||||
}
|
||||
val, err := units.RAMInBytes(value)
|
||||
*m = MemSwapBytes(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type
|
||||
func (m *MemSwapBytes) Type() string {
|
||||
return "bytes"
|
||||
}
|
||||
|
||||
// Value returns the value in int64
|
||||
func (m *MemSwapBytes) Value() int64 {
|
||||
return int64(*m)
|
||||
}
|
||||
|
||||
func (m *MemSwapBytes) String() string {
|
||||
b := MemBytes(*m)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// UnmarshalJSON is the customized unmarshaler for MemSwapBytes
|
||||
func (m *MemSwapBytes) UnmarshalJSON(s []byte) error {
|
||||
b := MemBytes(*m)
|
||||
return b.UnmarshalJSON(s)
|
||||
}
|
6
vendor/github.com/docker/cli/opts/opts_unix.go
generated
vendored
Normal file
6
vendor/github.com/docker/cli/opts/opts_unix.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
// +build !windows
|
||||
|
||||
package opts
|
||||
|
||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
|
||||
const DefaultHTTPHost = "localhost"
|
56
vendor/github.com/docker/cli/opts/opts_windows.go
generated
vendored
Normal file
56
vendor/github.com/docker/cli/opts/opts_windows.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package opts
|
||||
|
||||
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
|
||||
// @jhowardmsft, @swernli.
|
||||
//
|
||||
// On Windows, this mitigates a problem with the default options of running
|
||||
// a docker client against a local docker daemon on TP5.
|
||||
//
|
||||
// What was found that if the default host is "localhost", even if the client
|
||||
// (and daemon as this is local) is not physically on a network, and the DNS
|
||||
// cache is flushed (ipconfig /flushdns), then the client will pause for
|
||||
// exactly one second when connecting to the daemon for calls. For example
|
||||
// using docker run windowsservercore cmd, the CLI will send a create followed
|
||||
// by an attach. You see the delay between the attach finishing and the attach
|
||||
// being seen by the daemon.
|
||||
//
|
||||
// Here's some daemon debug logs with additional debug spew put in. The
|
||||
// AfterWriteJSON log is the very last thing the daemon does as part of the
|
||||
// create call. The POST /attach is the second CLI call. Notice the second
|
||||
// time gap.
|
||||
//
|
||||
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
|
||||
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
|
||||
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
|
||||
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
|
||||
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
|
||||
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
|
||||
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
|
||||
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
|
||||
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
|
||||
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
|
||||
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
|
||||
// ... 1 second gap here....
|
||||
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
|
||||
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
|
||||
//
|
||||
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
|
||||
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
|
||||
// the Windows networking stack is supposed to resolve "localhost" internally,
|
||||
// without hitting DNS, or even reading the hosts file (which is why localhost
|
||||
// is commented out in the hosts file on Windows).
|
||||
//
|
||||
// We have validated that working around this using the actual IPv4 localhost
|
||||
// address does not cause the delay.
|
||||
//
|
||||
// This does not occur with the docker client built with 1.4.3 on the same
|
||||
// Windows build, regardless of whether the daemon is built using 1.5.1
|
||||
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
|
||||
// on a cross-compiled Windows binary (from Linux).
|
||||
//
|
||||
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
|
||||
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
|
||||
// explicitly.
|
||||
|
||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
|
||||
const DefaultHTTPHost = "127.0.0.1"
|
99
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
Normal file
99
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
)
|
||||
|
||||
// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys
|
||||
// present in the file with additional pairs specified in the override parameter
|
||||
func ReadKVStrings(files []string, override []string) ([]string, error) {
|
||||
return readKVStrings(files, override, nil)
|
||||
}
|
||||
|
||||
// ReadKVEnvStrings reads a file of line terminated key=value pairs, and overrides any keys
|
||||
// present in the file with additional pairs specified in the override parameter.
|
||||
// If a key has no value, it will get the value from the environment.
|
||||
func ReadKVEnvStrings(files []string, override []string) ([]string, error) {
|
||||
return readKVStrings(files, override, os.LookupEnv)
|
||||
}
|
||||
|
||||
func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) {
|
||||
variables := []string{}
|
||||
for _, ef := range files {
|
||||
parsedVars, err := parseKeyValueFile(ef, emptyFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
variables = append(variables, parsedVars...)
|
||||
}
|
||||
// parse the '-e' and '--env' after, to allow override
|
||||
variables = append(variables, override...)
|
||||
|
||||
return variables, nil
|
||||
}
|
||||
|
||||
// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
||||
func ConvertKVStringsToMap(values []string) map[string]string {
|
||||
result := make(map[string]string, len(values))
|
||||
for _, value := range values {
|
||||
kv := strings.SplitN(value, "=", 2)
|
||||
if len(kv) == 1 {
|
||||
result[kv[0]] = ""
|
||||
} else {
|
||||
result[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"}
|
||||
// but set unset keys to nil - meaning the ones with no "=" in them.
|
||||
// We use this in cases where we need to distinguish between
|
||||
// FOO= and FOO
|
||||
// where the latter case just means FOO was mentioned but not given a value
|
||||
func ConvertKVStringsToMapWithNil(values []string) map[string]*string {
|
||||
result := make(map[string]*string, len(values))
|
||||
for _, value := range values {
|
||||
kv := strings.SplitN(value, "=", 2)
|
||||
if len(kv) == 1 {
|
||||
result[kv[0]] = nil
|
||||
} else {
|
||||
result[kv[0]] = &kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
|
||||
func ParseRestartPolicy(policy string) (container.RestartPolicy, error) {
|
||||
p := container.RestartPolicy{}
|
||||
|
||||
if policy == "" {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(policy, ":")
|
||||
|
||||
if len(parts) > 2 {
|
||||
return p, fmt.Errorf("invalid restart policy format")
|
||||
}
|
||||
if len(parts) == 2 {
|
||||
count, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return p, fmt.Errorf("maximum retry count must be an integer")
|
||||
}
|
||||
|
||||
p.MaximumRetryCount = count
|
||||
}
|
||||
|
||||
p.Name = parts[0]
|
||||
|
||||
return p, nil
|
||||
}
|
172
vendor/github.com/docker/cli/opts/port.go
generated
vendored
Normal file
172
vendor/github.com/docker/cli/opts/port.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
portOptTargetPort = "target"
|
||||
portOptPublishedPort = "published"
|
||||
portOptProtocol = "protocol"
|
||||
portOptMode = "mode"
|
||||
)
|
||||
|
||||
// PortOpt represents a port config in swarm mode.
|
||||
type PortOpt struct {
|
||||
ports []swarm.PortConfig
|
||||
}
|
||||
|
||||
// Set a new port value
|
||||
// nolint: gocyclo
|
||||
func (p *PortOpt) Set(value string) error {
|
||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if longSyntax {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pConfig := swarm.PortConfig{}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field %s", field)
|
||||
}
|
||||
|
||||
key := strings.ToLower(parts[0])
|
||||
value := strings.ToLower(parts[1])
|
||||
|
||||
switch key {
|
||||
case portOptProtocol:
|
||||
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) && value != string(swarm.PortConfigProtocolSCTP) {
|
||||
return fmt.Errorf("invalid protocol value %s", value)
|
||||
}
|
||||
|
||||
pConfig.Protocol = swarm.PortConfigProtocol(value)
|
||||
case portOptMode:
|
||||
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
|
||||
return fmt.Errorf("invalid publish mode value %s", value)
|
||||
}
|
||||
|
||||
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
|
||||
case portOptTargetPort:
|
||||
tPort, err := strconv.ParseUint(value, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pConfig.TargetPort = uint32(tPort)
|
||||
case portOptPublishedPort:
|
||||
pPort, err := strconv.ParseUint(value, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pConfig.PublishedPort = uint32(pPort)
|
||||
default:
|
||||
return fmt.Errorf("invalid field key %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if pConfig.TargetPort == 0 {
|
||||
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
|
||||
}
|
||||
|
||||
if pConfig.PublishMode == "" {
|
||||
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
|
||||
}
|
||||
|
||||
if pConfig.Protocol == "" {
|
||||
pConfig.Protocol = swarm.PortConfigProtocolTCP
|
||||
}
|
||||
|
||||
p.ports = append(p.ports, pConfig)
|
||||
} else {
|
||||
// short syntax
|
||||
portConfigs := []swarm.PortConfig{}
|
||||
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, portBindings := range portBindingMap {
|
||||
for _, portBinding := range portBindings {
|
||||
if portBinding.HostIP != "" {
|
||||
return fmt.Errorf("hostip is not supported")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for port := range ports {
|
||||
portConfig, err := ConvertPortToPortConfig(port, portBindingMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
portConfigs = append(portConfigs, portConfig...)
|
||||
}
|
||||
p.ports = append(p.ports, portConfigs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (p *PortOpt) Type() string {
|
||||
return "port"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (p *PortOpt) String() string {
|
||||
ports := []string{}
|
||||
for _, port := range p.ports {
|
||||
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
|
||||
ports = append(ports, repr)
|
||||
}
|
||||
return strings.Join(ports, ", ")
|
||||
}
|
||||
|
||||
// Value returns the ports
|
||||
func (p *PortOpt) Value() []swarm.PortConfig {
|
||||
return p.ports
|
||||
}
|
||||
|
||||
// ConvertPortToPortConfig converts ports to the swarm type
|
||||
func ConvertPortToPortConfig(
|
||||
port nat.Port,
|
||||
portBindings map[nat.Port][]nat.PortBinding,
|
||||
) ([]swarm.PortConfig, error) {
|
||||
ports := []swarm.PortConfig{}
|
||||
|
||||
for _, binding := range portBindings[port] {
|
||||
if binding.HostIP != "" && binding.HostIP != "0.0.0.0" {
|
||||
logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port)
|
||||
}
|
||||
|
||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||
|
||||
if err != nil && binding.HostPort != "" {
|
||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
||||
}
|
||||
|
||||
for i := startHostPort; i <= endHostPort; i++ {
|
||||
ports = append(ports, swarm.PortConfig{
|
||||
//TODO Name: ?
|
||||
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
|
||||
TargetPort: uint32(port.Int()),
|
||||
PublishedPort: uint32(i),
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
})
|
||||
}
|
||||
}
|
||||
return ports, nil
|
||||
}
|
37
vendor/github.com/docker/cli/opts/quotedstring.go
generated
vendored
Normal file
37
vendor/github.com/docker/cli/opts/quotedstring.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package opts
|
||||
|
||||
// QuotedString is a string that may have extra quotes around the value. The
|
||||
// quotes are stripped from the value.
|
||||
type QuotedString struct {
|
||||
value *string
|
||||
}
|
||||
|
||||
// Set sets a new value
|
||||
func (s *QuotedString) Set(val string) error {
|
||||
*s.value = trimQuotes(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of the value
|
||||
func (s *QuotedString) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
func (s *QuotedString) String() string {
|
||||
return *s.value
|
||||
}
|
||||
|
||||
func trimQuotes(value string) string {
|
||||
lastIndex := len(value) - 1
|
||||
for _, char := range []byte{'\'', '"'} {
|
||||
if value[0] == char && value[lastIndex] == char {
|
||||
return value[1:lastIndex]
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// NewQuotedString returns a new quoted string option
|
||||
func NewQuotedString(value *string) *QuotedString {
|
||||
return &QuotedString{value: value}
|
||||
}
|
79
vendor/github.com/docker/cli/opts/runtime.go
generated
vendored
Normal file
79
vendor/github.com/docker/cli/opts/runtime.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// RuntimeOpt defines a map of Runtimes
|
||||
type RuntimeOpt struct {
|
||||
name string
|
||||
stockRuntimeName string
|
||||
values *map[string]types.Runtime
|
||||
}
|
||||
|
||||
// NewNamedRuntimeOpt creates a new RuntimeOpt
|
||||
func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
|
||||
if ref == nil {
|
||||
ref = &map[string]types.Runtime{}
|
||||
}
|
||||
return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
|
||||
}
|
||||
|
||||
// Name returns the name of the NamedListOpts in the configuration.
|
||||
func (o *RuntimeOpt) Name() string {
|
||||
return o.name
|
||||
}
|
||||
|
||||
// Set validates and updates the list of Runtimes
|
||||
func (o *RuntimeOpt) Set(val string) error {
|
||||
parts := strings.SplitN(val, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid runtime argument: %s", val)
|
||||
}
|
||||
|
||||
parts[0] = strings.TrimSpace(parts[0])
|
||||
parts[1] = strings.TrimSpace(parts[1])
|
||||
if parts[0] == "" || parts[1] == "" {
|
||||
return fmt.Errorf("invalid runtime argument: %s", val)
|
||||
}
|
||||
|
||||
parts[0] = strings.ToLower(parts[0])
|
||||
if parts[0] == o.stockRuntimeName {
|
||||
return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
|
||||
}
|
||||
|
||||
if _, ok := (*o.values)[parts[0]]; ok {
|
||||
return fmt.Errorf("runtime '%s' was already defined", parts[0])
|
||||
}
|
||||
|
||||
(*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns Runtime values as a string.
|
||||
func (o *RuntimeOpt) String() string {
|
||||
var out []string
|
||||
for k := range *o.values {
|
||||
out = append(out, k)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
// GetMap returns a map of Runtimes (name: path)
|
||||
func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
|
||||
if o.values != nil {
|
||||
return *o.values
|
||||
}
|
||||
|
||||
return map[string]types.Runtime{}
|
||||
}
|
||||
|
||||
// Type returns the type of the option
|
||||
func (o *RuntimeOpt) Type() string {
|
||||
return "runtime"
|
||||
}
|
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
Normal file
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
// SecretOpt is a Value type for parsing secrets
|
||||
type SecretOpt struct {
|
||||
values []*swarmtypes.SecretReference
|
||||
}
|
||||
|
||||
// Set a new secret value
|
||||
func (o *SecretOpt) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := &swarmtypes.SecretReference{
|
||||
File: &swarmtypes.SecretReferenceFileTarget{
|
||||
UID: "0",
|
||||
GID: "0",
|
||||
Mode: 0444,
|
||||
},
|
||||
}
|
||||
|
||||
// support a simple syntax of --secret foo
|
||||
if len(fields) == 1 {
|
||||
options.File.Name = fields[0]
|
||||
options.SecretName = fields[0]
|
||||
o.values = append(o.values, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := strings.ToLower(parts[0])
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "source", "src":
|
||||
options.SecretName = value
|
||||
case "target":
|
||||
options.File.Name = value
|
||||
case "uid":
|
||||
options.File.UID = value
|
||||
case "gid":
|
||||
options.File.GID = value
|
||||
case "mode":
|
||||
m, err := strconv.ParseUint(value, 0, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid mode specified: %v", err)
|
||||
}
|
||||
|
||||
options.File.Mode = os.FileMode(m)
|
||||
default:
|
||||
return fmt.Errorf("invalid field in secret request: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if options.SecretName == "" {
|
||||
return fmt.Errorf("source is required")
|
||||
}
|
||||
|
||||
o.values = append(o.values, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (o *SecretOpt) Type() string {
|
||||
return "secret"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (o *SecretOpt) String() string {
|
||||
secrets := []string{}
|
||||
for _, secret := range o.values {
|
||||
repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name)
|
||||
secrets = append(secrets, repr)
|
||||
}
|
||||
return strings.Join(secrets, ", ")
|
||||
}
|
||||
|
||||
// Value returns the secret requests
|
||||
func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
|
||||
return o.values
|
||||
}
|
108
vendor/github.com/docker/cli/opts/throttledevice.go
generated
vendored
Normal file
108
vendor/github.com/docker/cli/opts/throttledevice.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error.
|
||||
type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error)
|
||||
|
||||
// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format.
|
||||
func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
|
||||
split := strings.SplitN(val, ":", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("bad format: %s", val)
|
||||
}
|
||||
if !strings.HasPrefix(split[0], "/dev/") {
|
||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
||||
}
|
||||
rate, err := units.RAMInBytes(split[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
|
||||
}
|
||||
if rate < 0 {
|
||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
|
||||
}
|
||||
|
||||
return &blkiodev.ThrottleDevice{
|
||||
Path: split[0],
|
||||
Rate: uint64(rate),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format.
|
||||
func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
|
||||
split := strings.SplitN(val, ":", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("bad format: %s", val)
|
||||
}
|
||||
if !strings.HasPrefix(split[0], "/dev/") {
|
||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
||||
}
|
||||
rate, err := strconv.ParseUint(split[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
||||
}
|
||||
if rate < 0 {
|
||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
||||
}
|
||||
|
||||
return &blkiodev.ThrottleDevice{Path: split[0], Rate: rate}, nil
|
||||
}
|
||||
|
||||
// ThrottledeviceOpt defines a map of ThrottleDevices
|
||||
type ThrottledeviceOpt struct {
|
||||
values []*blkiodev.ThrottleDevice
|
||||
validator ValidatorThrottleFctType
|
||||
}
|
||||
|
||||
// NewThrottledeviceOpt creates a new ThrottledeviceOpt
|
||||
func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt {
|
||||
values := []*blkiodev.ThrottleDevice{}
|
||||
return ThrottledeviceOpt{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt
|
||||
func (opt *ThrottledeviceOpt) Set(val string) error {
|
||||
var value *blkiodev.ThrottleDevice
|
||||
if opt.validator != nil {
|
||||
v, err := opt.validator(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
(opt.values) = append((opt.values), value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns ThrottledeviceOpt values as a string.
|
||||
func (opt *ThrottledeviceOpt) String() string {
|
||||
var out []string
|
||||
for _, v := range opt.values {
|
||||
out = append(out, v.String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
// GetList returns a slice of pointers to ThrottleDevices.
|
||||
func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice {
|
||||
var throttledevice []*blkiodev.ThrottleDevice
|
||||
throttledevice = append(throttledevice, opt.values...)
|
||||
|
||||
return throttledevice
|
||||
}
|
||||
|
||||
// Type returns the option type
|
||||
func (opt *ThrottledeviceOpt) Type() string {
|
||||
return "list"
|
||||
}
|
57
vendor/github.com/docker/cli/opts/ulimit.go
generated
vendored
Normal file
57
vendor/github.com/docker/cli/opts/ulimit.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// UlimitOpt defines a map of Ulimits
|
||||
type UlimitOpt struct {
|
||||
values *map[string]*units.Ulimit
|
||||
}
|
||||
|
||||
// NewUlimitOpt creates a new UlimitOpt
|
||||
func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
|
||||
if ref == nil {
|
||||
ref = &map[string]*units.Ulimit{}
|
||||
}
|
||||
return &UlimitOpt{ref}
|
||||
}
|
||||
|
||||
// Set validates a Ulimit and sets its name as a key in UlimitOpt
|
||||
func (o *UlimitOpt) Set(val string) error {
|
||||
l, err := units.ParseUlimit(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
(*o.values)[l.Name] = l
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns Ulimit values as a string.
|
||||
func (o *UlimitOpt) String() string {
|
||||
var out []string
|
||||
for _, v := range *o.values {
|
||||
out = append(out, v.String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
// GetList returns a slice of pointers to Ulimits.
|
||||
func (o *UlimitOpt) GetList() []*units.Ulimit {
|
||||
var ulimits []*units.Ulimit
|
||||
for _, v := range *o.values {
|
||||
ulimits = append(ulimits, v)
|
||||
}
|
||||
|
||||
return ulimits
|
||||
}
|
||||
|
||||
// Type returns the option type
|
||||
func (o *UlimitOpt) Type() string {
|
||||
return "ulimit"
|
||||
}
|
84
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
Normal file
84
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
)
|
||||
|
||||
// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error.
|
||||
type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error)
|
||||
|
||||
// ValidateWeightDevice validates that the specified string has a valid device-weight format.
|
||||
func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
|
||||
split := strings.SplitN(val, ":", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("bad format: %s", val)
|
||||
}
|
||||
if !strings.HasPrefix(split[0], "/dev/") {
|
||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
||||
}
|
||||
weight, err := strconv.ParseUint(split[1], 10, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
||||
}
|
||||
if weight > 0 && (weight < 10 || weight > 1000) {
|
||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
||||
}
|
||||
|
||||
return &blkiodev.WeightDevice{
|
||||
Path: split[0],
|
||||
Weight: uint16(weight),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeightdeviceOpt defines a map of WeightDevices
|
||||
type WeightdeviceOpt struct {
|
||||
values []*blkiodev.WeightDevice
|
||||
validator ValidatorWeightFctType
|
||||
}
|
||||
|
||||
// NewWeightdeviceOpt creates a new WeightdeviceOpt
|
||||
func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt {
|
||||
values := []*blkiodev.WeightDevice{}
|
||||
return WeightdeviceOpt{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt
|
||||
func (opt *WeightdeviceOpt) Set(val string) error {
|
||||
var value *blkiodev.WeightDevice
|
||||
if opt.validator != nil {
|
||||
v, err := opt.validator(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
(opt.values) = append((opt.values), value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns WeightdeviceOpt values as a string.
|
||||
func (opt *WeightdeviceOpt) String() string {
|
||||
var out []string
|
||||
for _, v := range opt.values {
|
||||
out = append(out, v.String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
// GetList returns a slice of pointers to WeightDevices.
|
||||
func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
|
||||
return opt.values
|
||||
}
|
||||
|
||||
// Type returns the option type
|
||||
func (opt *WeightdeviceOpt) Type() string {
|
||||
return "list"
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user