A complete blog overhaul!
|
@ -5,7 +5,7 @@ name: Deploy
|
|||
|
||||
steps:
|
||||
- name: build
|
||||
image: nikola:20200217
|
||||
image: nikola:20200831
|
||||
volumes:
|
||||
- name: blog-path
|
||||
path: /drone/src/output/
|
||||
|
@ -21,5 +21,4 @@ steps:
|
|||
volumes:
|
||||
- name: blog-path
|
||||
host:
|
||||
path: /mnt/blog/
|
||||
|
||||
path: /mnt/new_blog/
|
||||
|
|
1
.gitignore
vendored
|
@ -3,6 +3,7 @@ __pycache__/
|
|||
output/
|
||||
cache/
|
||||
.doit.db
|
||||
.doit.db.*
|
||||
.venv/
|
||||
.DS_Store
|
||||
.venv
|
||||
|
|
65
conf.py
|
@ -25,7 +25,8 @@ SOURCE_CODE_URL = "https://gitea.project42.io/Elia/blog.lazkani.io"
|
|||
# This is the URL where Nikola's output will be deployed.
|
||||
# If not set, defaults to SITE_URL
|
||||
# BASE_URL = "https://blog.lazkani.io/"
|
||||
BLOG_EMAIL = None
|
||||
#BLOG_EMAIL = None
|
||||
BLOG_EMAIL = ""
|
||||
BLOG_DESCRIPTION = "A blog where technology, passion and experience combine" # (translatable)
|
||||
|
||||
# Nikola is multilingual!
|
||||
|
@ -139,16 +140,12 @@ TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
|
|||
|
||||
NAVIGATION_LINKS = {
|
||||
DEFAULT_LANG: (
|
||||
(
|
||||
(
|
||||
("/blog/", "Blog"),
|
||||
("/archive.html", "Archives"),
|
||||
("/categories/", "Tags"),
|
||||
("/rss.xml", "RSS feed")
|
||||
),
|
||||
"Blog Content"
|
||||
),
|
||||
("/about_me/", "About me"),
|
||||
("/", "Home"),
|
||||
("/blog/", "Blog Posts"),
|
||||
("/archive.html", "Archives"),
|
||||
("/categories/", "Tags"),
|
||||
("/rss.xml", "RSS feed"),
|
||||
("/about/", "About"),
|
||||
),
|
||||
}
|
||||
|
||||
|
@ -161,7 +158,8 @@ NAVIGATION_ALT_LINKS = {
|
|||
|
||||
# Name of the theme to use.
|
||||
#THEME = "bootblog4"
|
||||
THEME = "custom"
|
||||
#THEME = "custom"
|
||||
THEME = "willy-theme"
|
||||
|
||||
# Primary color of your theme. This will be used to customize your theme.
|
||||
# Must be a HEX value.
|
||||
|
@ -229,12 +227,14 @@ POSTS = (
|
|||
("posts/*.md", "posts", "post.tmpl"),
|
||||
("posts/*.txt", "posts", "post.tmpl"),
|
||||
("posts/*.html", "posts", "post.tmpl"),
|
||||
("posts/*.org", "posts", "post.tmpl"),
|
||||
)
|
||||
PAGES = (
|
||||
("pages/*.rst", "", "page.tmpl"),
|
||||
("pages/*.md", "", "page.tmpl"),
|
||||
("pages/*.txt", "", "page.tmpl"),
|
||||
("pages/*.html", "", "page.tmpl"),
|
||||
("pages/*.org", "", "page.tmpl"),
|
||||
)
|
||||
|
||||
|
||||
|
@ -313,6 +313,7 @@ COMPILERS = {
|
|||
# but is disabled by default as it would conflict
|
||||
# with many of the others.
|
||||
# "pandoc": ('.rst', '.md', '.txt'),
|
||||
"orgmode": ('.org',),
|
||||
}
|
||||
|
||||
# Create by default posts in one file format?
|
||||
|
@ -652,7 +653,7 @@ GITHUB_REMOTE_NAME = 'origin'
|
|||
|
||||
# Whether or not github_deploy should commit to the source branch automatically
|
||||
# before deploying.
|
||||
GITHUB_COMMIT_SOURCE = True
|
||||
GITHUB_COMMIT_SOURCE = False
|
||||
|
||||
# Where the output site should be located
|
||||
# If you don't use an absolute path, it will be considered as relative
|
||||
|
@ -941,6 +942,7 @@ LICENSE = """
|
|||
|
||||
# A small copyright notice for the page footer (in HTML).
|
||||
# (translatable)
|
||||
|
||||
CONTENT_FOOTER = '''
|
||||
<center>
|
||||
Stay in touch
|
||||
|
@ -952,8 +954,11 @@ Stay in touch
|
|||
<a href="https://github.com/elazkani" title="My GitHub Profile" class="fab fa-github" target="_blank"></a>
|
||||
<br />
|
||||
<br />
|
||||
Contents © {date} <a href="{source_code_url}">{author}</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a> under {license}
|
||||
Contents © {date} <a href="{source_code_url}">{author}</a>
|
||||
<br />
|
||||
Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a>
|
||||
<br />
|
||||
{license}
|
||||
</center>
|
||||
'''
|
||||
|
||||
|
@ -1163,7 +1168,7 @@ GENERATE_ATOM = True
|
|||
# FEED_TEASERS = True
|
||||
|
||||
# Strip HTML from Atom and RSS feed summaries and content. Defaults to False.
|
||||
# FEED_PLAIN = False
|
||||
FEED_PLAIN = True
|
||||
|
||||
# Number of posts in Atom and RSS feeds.
|
||||
# FEED_LENGTH = 10
|
||||
|
@ -1183,20 +1188,20 @@ GENERATE_ATOM = True
|
|||
# This search form works for any site and looks good in the "site" theme where
|
||||
# it appears on the navigation bar:
|
||||
|
||||
SEARCH_FORM = """
|
||||
<!-- DuckDuckGo custom search -->
|
||||
<form method="get" id="search" action="https://duckduckgo.com/"
|
||||
class="navbar-form pull-left">
|
||||
<input type="hidden" name="sites" value="%s">
|
||||
<input type="hidden" name="k8" value="#444444">
|
||||
<input type="hidden" name="k9" value="#D51920">
|
||||
<input type="hidden" name="kt" value="h">
|
||||
<input type="text" name="q" maxlength="255"
|
||||
placeholder="Search…" class="span2" style="margin-top: 4px;">
|
||||
<input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
|
||||
</form>
|
||||
<!-- End of custom search -->
|
||||
""" % SITE_URL
|
||||
#SEARCH_FORM = """
|
||||
#<!-- DuckDuckGo custom search -->
|
||||
#<form method="get" id="search" action="https://duckduckgo.com/"
|
||||
# class="navbar-form pull-left">
|
||||
#<input type="hidden" name="sites" value="%s">
|
||||
#<input type="hidden" name="k8" value="#444444">
|
||||
#<input type="hidden" name="k9" value="#D51920">
|
||||
#<input type="hidden" name="kt" value="h">
|
||||
#<input type="text" name="q" maxlength="255"
|
||||
# placeholder="Search…" class="span2" style="margin-top: 4px;">
|
||||
#<input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
|
||||
#</form>
|
||||
#<!-- End of custom search -->
|
||||
#""" % SITE_URL
|
||||
|
||||
# If you prefer a Google search form, here's an example that should just work:
|
||||
# SEARCH_FORM = """
|
||||
|
@ -1231,10 +1236,12 @@ SEARCH_FORM = """
|
|||
# before </head>
|
||||
# (translatable)
|
||||
# EXTRA_HEAD_DATA = ""
|
||||
EXTRA_HEAD_DATA = """<link rel="stylesheet" href="/assets/css/index.css">"""
|
||||
# Google Analytics or whatever else you use. Added to the bottom of <body>
|
||||
# in the default template (base.tmpl).
|
||||
# (translatable)
|
||||
# BODY_END = ""
|
||||
BODY_END = """<script src="/assets/js/index.js"></script>"""
|
||||
|
||||
# The possibility to extract metadata from the filename by using a
|
||||
# regular expression.
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
.literal {
|
||||
border: 1px solid #ccc;
|
||||
color: #999;
|
||||
background-color: #272822;
|
||||
border-radius: 3px;
|
||||
font-family: Monaco, Menlo, Consolas, "Courier New", monospace;
|
||||
white-space: nowrap;
|
||||
font-size: 12px;
|
||||
padding: 2px 4px;
|
||||
}
|
||||
|
||||
div.note {
|
||||
word-wrap: break-word;
|
||||
background-color: rgb(34,34,34);
|
||||
border: 1px solid #007053;
|
||||
}
|
||||
|
||||
div.admonition, div.hint, div.important, div.note, div.tip, div.sidebar, div.attention, div.caution, div.danger, div.error, div.warning, div.system-message {
|
||||
background-color: rgb(34,34,34);
|
||||
}
|
||||
|
||||
div.note p.admonition-title {
|
||||
color: #ffffff;
|
||||
background-color: #007053 !important;
|
||||
border-bottom: 1px solid #007053;
|
||||
}
|
||||
|
||||
div.admonition p, div.hint p, div.important p, div.note p, div.tip p, div.sidebar p, div.attention p, div.caution p, div.danger p, div.error p, div.warning p, div.system-message p {
|
||||
color: #ffffff;
|
||||
}
|
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 96 KiB |
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 54 KiB |
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 65 KiB |
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 91 KiB After Width: | Height: | Size: 91 KiB |
Before Width: | Height: | Size: 123 KiB After Width: | Height: | Size: 123 KiB |
Before Width: | Height: | Size: 126 KiB After Width: | Height: | Size: 126 KiB |
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 42 KiB |
Before Width: | Height: | Size: 130 KiB After Width: | Height: | Size: 130 KiB |
Before Width: | Height: | Size: 72 KiB After Width: | Height: | Size: 72 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
23
pages/about.org
Normal file
|
@ -0,0 +1,23 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: About
|
||||
.. date: 2019-06-21T00:00:00+02:00
|
||||
.. author: Elia el Lazkani
|
||||
.. url: about
|
||||
.. status: published
|
||||
#+END_COMMENT
|
||||
|
||||
* Who am I ?
|
||||
I am a DevOps engineer with a passion for technology, automation, Linux and OpenSource. I love learning new tricks and challenging myself with new tools being released on a monthly bases around /kubernetes/ and/or /configuration management/. On my free time, I like to write automation tools and packages which can be found on PyPI. Or, I might as well tinker with new things around /kubernetes/. I blog about all that /here/. I think if I can write a blog about it, I understand it enough to have an opinion about it. It all comes in handy when the business need arises. I play around with technologies all day long by deploying, configuring, managing and maintaining all parts of the infrastructure below the application layer. I dabbled with "architecting" parts of different infrastructures, from end to end and I can say I have a knack for it and I like it when possible.
|
||||
|
||||
* Experience
|
||||
Here's a quick and dirty list of some of the technologies I've had my hands dirty with.
|
||||
|
||||
- *Neworking*: Configuring routers and switches (Brocade, CISCO, Dell).
|
||||
- *Infrastructure*: Finding, automating, deploying and managing infrastructure key services. Too many to mention.
|
||||
- *Virtualization*: Building infrastructures for virtualization (HyperV, libvirt, proxmox, RHEV, VMWare).
|
||||
- *Configuration Management*: Ansible, Chef, Puppet, SaltStack.
|
||||
- *CI/CD*: Gitlab-CI, Jenkins.
|
||||
- *Cloud*: AWS, Linode & Hetzner.
|
||||
- *Development*: Python and Golang packages for plugging in different technologies together for automation.
|
||||
- *Containers*: Docker, K8s and k3s deployment, management and supporting team deployments.
|
||||
- *Misc*: Service meshes, Hashistack and misc other tools
|
|
@ -1,20 +0,0 @@
|
|||
.. title: About me
|
||||
.. date: 2019-06-21
|
||||
.. status: published
|
||||
.. authors: Elia el Lazkani
|
||||
|
||||
I am a DevOps engineer with a passion for technology, automation, Linux and OpenSource. I love learning new tricks and challenging myself with new tools being released on a monthly bases around *kubernetes* and/or *configuration management*. On my free time, I like to write automation tools and packages which can be found on PyPI. Or, I might as well tinker with new things around *kubernetes*. I blog about all that *here*. I think if I can write a blog about it, I understand it enough to have an opinion about it. It all comes in handy when the business need arises. I play around with technologies all day long by deploying, configuring, managing and maintaining all parts of the infrastructure below the application layer. I dabbled with "architecting" parts of different infrastructures, from end to end and I can say I have a knack for it and I like it when possible.
|
||||
|
||||
Experience
|
||||
==========
|
||||
|
||||
Here's a quick and dirty list of some of the technologies I've had my hands dirty with.
|
||||
|
||||
- **Neworking**: Configuring routers and switches (Brocade, CISCO, Dell).
|
||||
- **Infrastructure**: Finding, automating, deploying and managing infrastructure key services. Too many to mention.
|
||||
- **Virtualization**: Building infrastructures for virtualization (HyperV, libvirt, proxmox, RHEV, VMWare).
|
||||
- **Configuration Management**: Ansible, Chef, Puppet.
|
||||
- **CI/CD**: Gitlab-CI, Jenkins.
|
||||
- **Cloud**: AWS.
|
||||
- **Development**: Python packages for plugging in different technologies together for automation.
|
||||
- **Containers**: Docker and Kubernetes deployment, management and supporting team deployments.
|
41
pages/index.org
Normal file
|
@ -0,0 +1,41 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Welcome to the DevOps Blog
|
||||
.. date: 2019-06-23T00:00:00+02:00
|
||||
.. status: published
|
||||
.. slug: index
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
* What is this ?
|
||||
This is my humble blog where I post things related to DevOps in hope that I or someone else might benefit from it.
|
||||
|
||||
* Wait what ? What is DevOps ?
|
||||
[[https://duckduckgo.com/?q=what+is+devops+%3F&t=ffab&ia=web&iax=about][Duckduckgo]] defines DevOps as:
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
DevOps is a software engineering culture and practice that aims at unifying
|
||||
software development and software operation. The main characteristic of the
|
||||
DevOps movement is to strongly advocate automation and monitoring at all
|
||||
steps of software construction, from integration, testing, releasing to
|
||||
deployment and infrastructure management. DevOps aims at shorter development
|
||||
cycles, increased deployment frequency, and more dependable releases,
|
||||
in close alignment with business objectives.
|
||||
#+END_QUOTE
|
||||
|
||||
In short, we build an infrastructure that is easily deployable, maintainable and, in all forms, makes the lives of the developers a breeze.
|
||||
|
||||
* What do you blog about ?
|
||||
Anything and everything related to DevOps. The field is very big and complex with a lot of different tools and technologies implemented.
|
||||
|
||||
I try to blog about interesting and new things as much as possible, when time permits.
|
||||
|
||||
Here's a short list of the latest posts.
|
||||
|
||||
{{% post-list start=0 stop=3 %}}
|
||||
{{% /post-list %}}
|
||||
|
||||
* Projects
|
||||
- [[https://gitea.project42.io/Elia/blog.lazkani.io][blog.lazkani.io]]: The DevOps [[https://blog.lazkani.io][blog]]
|
||||
- [[https://gitlab.com/elazkani/weenotify][weenotify]]: an official [[https://weechat.org][weechat]] notification plugin.
|
||||
- [[https://gitlab.com/elazkani/rundeck-resources][rundeck-resources]]: python tool to query resources from different sources and export them into a data structure that [[https://www.rundeck.com/open-source][Rundeck]] can consume. This tool can be found on [[https://pypi.org/project/rundeck-resources/][PyPI]].
|
||||
- [[https://gitlab.com/elazkani/get-k8s-resources][get\under{}k8s\under{}resources]]: a small python script that returns a list of kubernetes resources.
|
|
@ -1,51 +0,0 @@
|
|||
.. title: Welcome to the DevOps blog
|
||||
.. slug: index
|
||||
.. date: 2019-06-23
|
||||
.. tags:
|
||||
.. category:
|
||||
.. description:
|
||||
.. type: text
|
||||
|
||||
|
||||
What is this ?
|
||||
==============
|
||||
|
||||
This is my humble blog where I post things related to DevOps in hope that I or someone else might benefit from it.
|
||||
|
||||
|
||||
Wait what ? What is DevOps ?
|
||||
============================
|
||||
|
||||
`Duckduckgo <https://duckduckgo.com/?q=what+is+devops+%3F&t=ffab&ia=web&iax=about>`_ define DevOps as:
|
||||
|
||||
DevOps is a software engineering culture and practice that aims at unifying
|
||||
software development and software operation. The main characteristic of the
|
||||
DevOps movement is to strongly advocate automation and monitoring at all
|
||||
steps of software construction, from integration, testing, releasing to
|
||||
deployment and infrastructure management. DevOps aims at shorter development
|
||||
cycles, increased deployment frequency, and more dependable releases,
|
||||
in close alignment with business objectives.
|
||||
|
||||
In short, we build an infrastructure that is easily deployable, maintainable and, in all forms, makes the lives of the developers a breeze.
|
||||
|
||||
|
||||
What do you blog about ?
|
||||
========================
|
||||
|
||||
Anything and everything related to DevOps. The field is very big and complex with a lot of different tools and technologies implemented.
|
||||
I try to blog about interesting and new things as much as possible, when time permits.
|
||||
|
||||
Here's a short list of the latest posts.
|
||||
|
||||
.. post-list::
|
||||
:start: 0
|
||||
:stop: 3
|
||||
|
||||
|
||||
Projects
|
||||
========
|
||||
|
||||
- `blog.lazkani.io <https://gitea.project42.io/Elia/blog.lazkani.io>`_: The DevOps `blog <https://blog.lazkani.io>`_.
|
||||
- `weenotify <https://gitlab.com/elazkani/weenotify>`_: an official `weechat <https://weechat.org>`_ notification plugin.
|
||||
- `rundeck-resources <https://gitlab.com/elazkani/rundeck-resources>`_: python tool to query resources from different sources and export them into a data structure that `Rundeck <https://www.rundeck.com/open-source>`_ can consume. This tool can be found on `PyPI <https://pypi.org/project/rundeck-resources/>`_.
|
||||
- `get_k8s_resources <https://gitlab.com/elazkani/get-k8s-resources>`_: a small python script that returns a list of kubernetes resources.
|
15
pages/not-found.org
Normal file
|
@ -0,0 +1,15 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Not Found
|
||||
.. slug: not-found
|
||||
.. date: 2020-02-08
|
||||
.. tags:
|
||||
.. category:
|
||||
.. description:
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
Oops... We don't know how you ended up here.
|
||||
|
||||
There is nothing here to look at...
|
||||
|
||||
The main site is over @ {{% doc name="test" %}}index{{% /doc %}}.
|
|
@ -1,12 +0,0 @@
|
|||
.. title: Not Found
|
||||
.. slug: not-found
|
||||
.. date: 2020-02-08
|
||||
.. tags:
|
||||
.. category:
|
||||
.. description:
|
||||
.. type: text
|
||||
|
||||
|
||||
Oops... We don't know how you ended up here.
|
||||
|
||||
There is nothing here to look at, try the main :doc:`site <index>`.
|
1
plugins/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
# Plugin modules go here.
|
52
plugins/orgmode/README.md
Normal file
|
@ -0,0 +1,52 @@
|
|||
This plugin implements an Emacs Org-mode based compiler for Nikola.
|
||||
|
||||
## Setup
|
||||
|
||||
If your emacs does not ship with org-mode (>=8.x), you will have to edit the
|
||||
`init.el` file supplied with this plugin, and load a newer version of org-mode.
|
||||
|
||||
You will also need to add the orgmode compiler to your list of compilers, and
|
||||
modify your POSTS & PAGES variables. (See the sample conf file provided.)
|
||||
|
||||
### Syntax highlighting with pygments
|
||||
|
||||
By default, the plugin uses `pygments` for syntax highlighting. You can disable
|
||||
this by setting `nikola-use-pygments` to `nil` in `init.el` or `conf.el` (see
|
||||
Customization section below).
|
||||
|
||||
To get proper syntax highlighting, you will need to add custom CSS to your
|
||||
theme. You can generate this CSS using the `pygmentize` command as follows:
|
||||
|
||||
mkdir -p files/assets/css
|
||||
pygmentize -S <PYGMENTS_STYLE> -a .highlight -f html >> files/assets/css/custom.css
|
||||
|
||||
and make sure that `custom.css` is included in your site by your
|
||||
theme. The various available style options for `<PYGMENTS_STYLE>` can be found
|
||||
using the command `pygmentize -L style`.
|
||||
|
||||
## Customization
|
||||
|
||||
You can add any customization variables that you wish to add, to modify the
|
||||
output generated by org-mode to `conf.el` inside the plugin directory. This
|
||||
lets you have custom configuration, that doesn't get wiped out each time the
|
||||
plugin is updated.
|
||||
|
||||
## Teasers
|
||||
|
||||
You may use teasers by enabling `INDEX_TEASERS = True` in conf.py, and
|
||||
use `{{{TEASER_END}}}` to generate `<!-- TEASER_END -->` in org posts.
|
||||
|
||||
## Image URLs
|
||||
|
||||
The image url in ox-html is a little fuzzy. For example, `[[/images/test.jpg]]` will be
|
||||
generated as `<img src="file:///images/test.jpg" alt="test.jpg">`
|
||||
because the path is considered as an absolute file path.
|
||||
|
||||
If you add inline images with built-in file scheme to see them on your
|
||||
Emacs orgmode, the references are resolved. For examples, you may
|
||||
refer with `[[file:../images/test.jpg]]` to review an image stored on
|
||||
images directory during writing, then it will be generated as `<img
|
||||
src="images/test.jpg" alt="test.jpg">`.
|
||||
|
||||
Alternatively, you may write `[[img-url:/images/test.jpg]]`, and then
|
||||
it should be generated as `<img src="/images/test.jpg" alt="test.jpg">`.
|
8
plugins/orgmode/conf.py.sample
Normal file
|
@ -0,0 +1,8 @@
|
|||
# NOTE: Needs additional configuration in init.el file.
|
||||
|
||||
# Add the orgmode compiler to your COMPILERS dict.
|
||||
COMPILERS["orgmode"] = ['.org']
|
||||
|
||||
# Add org files to your POSTS, PAGES
|
||||
POSTS = POSTS + (("posts/*.org", "posts", "post.tmpl"),)
|
||||
PAGES = PAGES + (("pages/*.org", "pages", "page.tmpl"),)
|
1
plugins/orgmode/emacs-htmlize
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit 49205105898ba8993b5253beec55d8bddd820a70
|
136
plugins/orgmode/init.el
Normal file
|
@ -0,0 +1,136 @@
|
|||
;; Init file to use with the orgmode plugin.
|
||||
|
||||
;; Load org-mode
|
||||
;; Requires org-mode v8.x
|
||||
|
||||
(require 'package)
|
||||
(add-to-list 'load-path "~/.emacs.d/.local/straight/build/htmlize")
|
||||
(setq package-load-list '((htmlize t)))
|
||||
(package-initialize)
|
||||
|
||||
(require 'org)
|
||||
(require 'ox-html)
|
||||
|
||||
;;; Custom configuration for the export.
|
||||
|
||||
;;; Add any custom configuration that you would like to 'conf.el'.
|
||||
(setq nikola-use-pygments t
|
||||
org-export-with-toc nil
|
||||
org-export-with-section-numbers nil
|
||||
org-startup-folded 'showeverything)
|
||||
|
||||
;; Load additional configuration from conf.el
|
||||
(let ((conf (expand-file-name "conf.el" (file-name-directory load-file-name))))
|
||||
(if (file-exists-p conf)
|
||||
(load-file conf)))
|
||||
|
||||
;;; Macros
|
||||
|
||||
;; Load Nikola macros
|
||||
(setq nikola-macro-templates
|
||||
(with-current-buffer
|
||||
(find-file
|
||||
(expand-file-name "macros.org" (file-name-directory load-file-name)))
|
||||
(org-macro--collect-macros)))
|
||||
|
||||
;;; Code highlighting
|
||||
(defun org-html-decode-plain-text (text)
|
||||
"Convert HTML character to plain TEXT. i.e. do the inversion of
|
||||
`org-html-encode-plain-text`. Possible conversions are set in
|
||||
`org-html-protect-char-alist'."
|
||||
(mapc
|
||||
(lambda (pair)
|
||||
(setq text (replace-regexp-in-string (cdr pair) (car pair) text t t)))
|
||||
(reverse org-html-protect-char-alist))
|
||||
text)
|
||||
|
||||
;; Use pygments highlighting for code
|
||||
(defun pygmentize (lang code)
|
||||
"Use Pygments to highlight the given code and return the output"
|
||||
(with-temp-buffer
|
||||
(insert code)
|
||||
(let ((lang (or (cdr (assoc lang org-pygments-language-alist)) "text")))
|
||||
(shell-command-on-region (point-min) (point-max)
|
||||
(format "pygmentize -f html -l %s" lang)
|
||||
(buffer-name) t))
|
||||
(buffer-string)))
|
||||
|
||||
(defconst org-pygments-language-alist
|
||||
'(("asymptote" . "asymptote")
|
||||
("awk" . "awk")
|
||||
("c" . "c")
|
||||
("c++" . "cpp")
|
||||
("cpp" . "cpp")
|
||||
("clojure" . "clojure")
|
||||
("css" . "css")
|
||||
("d" . "d")
|
||||
("emacs-lisp" . "scheme")
|
||||
("F90" . "fortran")
|
||||
("gnuplot" . "gnuplot")
|
||||
("groovy" . "groovy")
|
||||
("haskell" . "haskell")
|
||||
("java" . "java")
|
||||
("js" . "js")
|
||||
("julia" . "julia")
|
||||
("latex" . "latex")
|
||||
("lisp" . "lisp")
|
||||
("makefile" . "makefile")
|
||||
("matlab" . "matlab")
|
||||
("mscgen" . "mscgen")
|
||||
("ocaml" . "ocaml")
|
||||
("octave" . "octave")
|
||||
("perl" . "perl")
|
||||
("picolisp" . "scheme")
|
||||
("python" . "python")
|
||||
("r" . "r")
|
||||
("ruby" . "ruby")
|
||||
("sass" . "sass")
|
||||
("scala" . "scala")
|
||||
("scheme" . "scheme")
|
||||
("sh" . "sh")
|
||||
("sql" . "sql")
|
||||
("sqlite" . "sqlite3")
|
||||
("tcl" . "tcl"))
|
||||
"Alist between org-babel languages and Pygments lexers.
|
||||
lang is downcased before assoc, so use lowercase to describe language available.
|
||||
See: http://orgmode.org/worg/org-contrib/babel/languages.html and
|
||||
http://pygments.org/docs/lexers/ for adding new languages to the mapping.")
|
||||
|
||||
;; Override the html export function to use pygments
|
||||
(defun org-html-src-block (src-block contents info)
|
||||
"Transcode a SRC-BLOCK element from Org to HTML.
|
||||
CONTENTS holds the contents of the item. INFO is a plist holding
|
||||
contextual information."
|
||||
(if (org-export-read-attribute :attr_html src-block :textarea)
|
||||
(org-html--textarea-block src-block)
|
||||
(let ((lang (org-element-property :language src-block))
|
||||
(code (org-element-property :value src-block))
|
||||
(code-html (org-html-format-code src-block info)))
|
||||
(if nikola-use-pygments
|
||||
(progn
|
||||
(unless lang (setq lang ""))
|
||||
(pygmentize (downcase lang) (org-html-decode-plain-text code)))
|
||||
code-html))))
|
||||
|
||||
;; Export images with custom link type
|
||||
(defun org-custom-link-img-url-export (path desc format)
|
||||
(cond
|
||||
((eq format 'html)
|
||||
(format "<img src=\"%s\" alt=\"%s\"/>" path desc))))
|
||||
(org-add-link-type "img-url" nil 'org-custom-link-img-url-export)
|
||||
|
||||
;; Export images with built-in file scheme
|
||||
(defun org-file-link-img-url-export (path desc format)
|
||||
(cond
|
||||
((eq format 'html)
|
||||
(format "<img src=\"/%s\" alt=\"%s\"/>" path desc))))
|
||||
(org-add-link-type "file" nil 'org-file-link-img-url-export)
|
||||
|
||||
;; Export function used by Nikola.
|
||||
(defun nikola-html-export (infile outfile)
|
||||
"Export the body only of the input file and write it to
|
||||
specified location."
|
||||
(with-current-buffer (find-file infile)
|
||||
(org-macro-replace-all nikola-macro-templates)
|
||||
(org-html-export-as-html nil nil t t)
|
||||
(write-file outfile nil)))
|
19
plugins/orgmode/macros.org
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Macros for embedding media into org-mode posts.
|
||||
|
||||
#+MACRO: TEASER_END #+HTML: <!-- TEASER_END -->
|
||||
{{{TEASER_END}}}
|
||||
|
||||
#+MACRO: gist #+HTML: <script src="https://gist.github.com/$1.js"></script>
|
||||
{{{gist(2395294)}}}
|
||||
|
||||
#+MACRO: soundcloud #+HTML: <iframe width="$3" height="$2" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https://api.soundcloud.com/tracks/$1"> </iframe>
|
||||
{{{soundcloud(31824842,240,320)}}}
|
||||
|
||||
#+MACRO: soundcloud_playlist #+HTML: <iframe width="$3" height="$2" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https://api.soundcloud.com/playlists/$1"> </iframe>
|
||||
{{{soundcloud_playlist(694081,800,400)}}}
|
||||
|
||||
#+MACRO: vimeo #+HTML: <iframe src="https://player.vimeo.com/video/$1" width="$3" height="$2" frameborder="0" webkitAllowFullScreen mozallowfullscreen allowFullScreen> </iframe>
|
||||
{{{vimeo(85360039,240,320)}}}
|
||||
|
||||
#+MACRO: youtube #+HTML: <iframe width="$3" height="$2" src="https://www.youtube.com/embed/$1?rel=0&hd=1&wmode=transparent"></iframe>
|
||||
{{{youtube(8N_tupPBtWQ,240,320)}}}
|
15
plugins/orgmode/orgmode.plugin
Normal file
|
@ -0,0 +1,15 @@
|
|||
[Core]
|
||||
Name = orgmode
|
||||
Module = orgmode
|
||||
|
||||
|
||||
[Nikola]
|
||||
MinVersion = 6.0.0
|
||||
PluginCategory = PageCompiler
|
||||
|
||||
[Documentation]
|
||||
Author = Puneeth Chaganti
|
||||
Version = 0.3
|
||||
Website = http://plugins.getnikola.com/#orgmode
|
||||
Description = Compile org-mode markup into HTML using emacs.
|
||||
|
118
plugins/orgmode/orgmode.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright © 2012-2013 Puneeth Chaganti and others.
|
||||
|
||||
# Permission is hereby granted, free of charge, to any
|
||||
# person obtaining a copy of this software and associated
|
||||
# documentation files (the "Software"), to deal in the
|
||||
# Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the
|
||||
# Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice
|
||||
# shall be included in all copies or substantial portions of
|
||||
# the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
|
||||
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
|
||||
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
""" Implementation of compile_html based on Emacs Org-mode.
|
||||
|
||||
You will need to install emacs and org-mode (v8.x or greater).
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import io
|
||||
import os
|
||||
from os.path import abspath, dirname, join
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
OrderedDict = dict # NOQA
|
||||
|
||||
from nikola.plugin_categories import PageCompiler
|
||||
from nikola.utils import req_missing, makedirs
|
||||
|
||||
# v6 compat
|
||||
try:
|
||||
from nikola.utils import write_metadata
|
||||
except ImportError:
|
||||
write_metadata = None # NOQA
|
||||
|
||||
|
||||
class CompileOrgmode(PageCompiler):
|
||||
""" Compile org-mode markup into HTML using emacs. """
|
||||
|
||||
name = "orgmode"
|
||||
|
||||
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
|
||||
"""Compile the source file into HTML and save as dest."""
|
||||
makedirs(os.path.dirname(dest))
|
||||
try:
|
||||
command = [
|
||||
'emacs', '--batch',
|
||||
'-l', join(dirname(abspath(__file__)), 'init.el'),
|
||||
'--eval', '(nikola-html-export "{0}" "{1}")'.format(
|
||||
abspath(source), abspath(dest))
|
||||
]
|
||||
|
||||
# Dirty walkaround for this plugin to run on Windows platform.
|
||||
if os.name == 'nt':
|
||||
command[5] = command[5].replace("\\", "\\\\")
|
||||
|
||||
subprocess.check_call(command)
|
||||
with io.open(dest, 'r', encoding='utf-8') as inf:
|
||||
output, shortcode_deps = self.site.apply_shortcodes(
|
||||
inf.read(), extra_context={'post': post})
|
||||
with io.open(dest, 'w', encoding='utf-8') as outf:
|
||||
outf.write(output)
|
||||
if post is None:
|
||||
if shortcode_deps:
|
||||
self.logger.error(
|
||||
"Cannot save dependencies for post {0} (post unknown)",
|
||||
source)
|
||||
else:
|
||||
post._depfile[dest] += shortcode_deps
|
||||
except OSError as e:
|
||||
import errno
|
||||
if e.errno == errno.ENOENT:
|
||||
req_missing(['emacs', 'org-mode'],
|
||||
'use the orgmode compiler', python=False)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise Exception('''Cannot compile {0} -- bad org-mode configuration (return code {1})
|
||||
The command is {2}'''.format(source, e.returncode, ' '.join(shlex.quote(arg) for arg in e.cmd)))
|
||||
|
||||
def create_post(self, path, content=None, onefile=False, is_page=False, **kw):
|
||||
"""Create post file with optional metadata."""
|
||||
metadata = OrderedDict()
|
||||
metadata.update(self.default_metadata)
|
||||
metadata.update(kw)
|
||||
makedirs(os.path.dirname(path))
|
||||
|
||||
with io.open(path, "w+", encoding="utf-8") as fd:
|
||||
if onefile:
|
||||
fd.write("#+BEGIN_COMMENT\n")
|
||||
if write_metadata:
|
||||
fd.write(write_metadata(metadata))
|
||||
else:
|
||||
for k, v in metadata.items():
|
||||
fd.write('.. {0}: {1}\n'.format(k, v))
|
||||
fd.write("#+END_COMMENT\n")
|
||||
fd.write("\n\n")
|
||||
|
||||
if content:
|
||||
fd.write(content)
|
||||
else:
|
||||
fd.write('Write your post here.')
|
2
plugins/orgmode/requirements-nonpy.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
Emacs::https://www.gnu.org/software/emacs/
|
||||
Org-mode::http://orgmode.org/
|
237
posts/backup/automating-borg.org
Normal file
|
@ -0,0 +1,237 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Automating Borg
|
||||
.. date: 2020-02-02
|
||||
.. slug: automating-borg
|
||||
.. updated: 2020-02-02
|
||||
.. status: published
|
||||
.. tags: backup, borgbackup, borg, borgmatic
|
||||
.. category: backup
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: We've had a look at **Borg** before, let's find out how to automate it.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In the previous blog post entitle "{{% doc %}}borgbackup{{% /doc %}}, I talked about *borg*.
|
||||
If you read that post, you would've noticed that *borg* has a lot of features.
|
||||
With a lot of features come a lot of automation.
|
||||
|
||||
If you were thinking about using *borg*, you should either make a /simple cron/ or you're gonna have to write an elaborate script to take care of all the different steps.
|
||||
|
||||
What if I told you there's another way ? An easier way ! The *Borgmatic* way... What would you say ?
|
||||
|
||||
* Borgmatic
|
||||
*Borgmatic* is defined on their [[https://torsion.org/borgmatic/][website]] as follows.
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
borgmatic is simple, configuration-driven backup software for servers
|
||||
and workstations. Protect your files with client-side encryption.
|
||||
Backup your databases too. Monitor it all with integrated third-party
|
||||
services.
|
||||
#+END_QUOTE
|
||||
|
||||
If you go down to it, *borgmatic* uses *borg*'s /API/ to automate a list of configurable /tasks/.
|
||||
This way, it saves you the trouble of writing your own scripts to automate these steps.
|
||||
|
||||
*Borgmatic* uses a /YAML/ configuration file. Let's configure a few tasks.
|
||||
|
||||
* Location
|
||||
First, let's start by configuring the locations that *borg* is going to be working with.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
location:
|
||||
source_directories:
|
||||
- /home/
|
||||
|
||||
repositories:
|
||||
- user@backupserver:sourcehostname.borg
|
||||
|
||||
one_file_system: true
|
||||
|
||||
exclude_patterns:
|
||||
- /home/*/.cache
|
||||
- '*.pyc'
|
||||
#+END_SRC
|
||||
|
||||
This tells *borg* that we need to backup our =/home= directories excluding a few patterns.
|
||||
Let's not forget that we told *borg* where the repository is located at.
|
||||
|
||||
* Storage
|
||||
We need to configure the storage next.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
storage:
|
||||
# Recommended
|
||||
# encryption_passcommand: secret-tool lookup borg-repository repo-name
|
||||
|
||||
encryption_passphrase: "ReallyStrongPassphrase"
|
||||
compression: zstd,15
|
||||
ssh_command: ssh -i /path/to/private/key
|
||||
borg_security_directory: /path/to/base/config/security
|
||||
archive_name_format: 'borgmatic-{hostname}-{now}'
|
||||
#+END_SRC
|
||||
|
||||
In this section, we tell borg a little big of information about our repository.
|
||||
What are the credentials, where it can find them, etc.
|
||||
|
||||
The easy way is to go with a =passphrase=, but I recommend using an =encryption_passcommand= instead.
|
||||
I also use =zstd= for encryption instead of =lz4=, you better do your research before you change the default.
|
||||
I also recommend, just as they do, the use of a security directory as well.
|
||||
|
||||
* Retention
|
||||
We can configure a retention for our backups, if we like.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
retention:
|
||||
keep_hourly: 7
|
||||
keep_daily: 7
|
||||
keep_weekly: 4
|
||||
keep_monthly: 6
|
||||
keep_yearly: 2
|
||||
|
||||
prefix: "borgmatic-"
|
||||
#+END_SRC
|
||||
|
||||
The part of what to keep from /hourly/ to /daily/ is self explanatory.
|
||||
I would like to point out the =prefix= part as it is important.
|
||||
This is the /prefix/ that *borgmatic* uses to consider backups for *pruning*.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition warning">
|
||||
<p class="admonition-title">warning</p>
|
||||
#+END_EXPORT
|
||||
Watch out for the retention =prefix=
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* Consistency
|
||||
After the updates, we'd like to check our backups.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
consistency:
|
||||
checks:
|
||||
- repository
|
||||
- archives
|
||||
|
||||
check_last: 3
|
||||
|
||||
prefix: "borgmatic-"
|
||||
#+END_SRC
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition warning">
|
||||
<p class="admonition-title">warning</p>
|
||||
#+END_EXPORT
|
||||
Watch out, again, for the consistency =prefix=
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* Hooks
|
||||
Finally, hooks.
|
||||
|
||||
I'm going to talk about hooks a bit. Hooks can be used to backup *MySQL*, *PostgreSQL* or *MariaDB*.
|
||||
They can also be hooks for =on_error=, =before_backup=, =after_backup=, =before_everything= and =after_everything=.
|
||||
You can also hook to third party services which you can check on their webpage.
|
||||
|
||||
I deployed my own, so I configured my own.
|
||||
|
||||
* Borgmatic Configuration
|
||||
Let's put everything together now.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
location:
|
||||
source_directories:
|
||||
- /home/
|
||||
|
||||
repositories:
|
||||
- user@backupserver:sourcehostname.borg
|
||||
|
||||
one_file_system: true
|
||||
|
||||
exclude_patterns:
|
||||
- /home/*/.cache
|
||||
- '*.pyc'
|
||||
|
||||
storage:
|
||||
# Recommended
|
||||
# encryption_passcommand: secret-tool lookup borg-repository repo-name
|
||||
|
||||
encryption_passphrase: "ReallyStrongPassphrase"
|
||||
compression: zstd,15
|
||||
ssh_command: ssh -i /path/to/private/key
|
||||
borg_security_directory: /path/to/base/config/security
|
||||
archive_name_format: 'borgmatic-{hostname}-{now}'
|
||||
|
||||
retention:
|
||||
keep_hourly: 7
|
||||
keep_daily: 7
|
||||
keep_weekly: 4
|
||||
keep_monthly: 6
|
||||
keep_yearly: 2
|
||||
|
||||
prefix: "borgmatic-"
|
||||
|
||||
consistency:
|
||||
checks:
|
||||
- repository
|
||||
- archives
|
||||
|
||||
check_last: 3
|
||||
|
||||
prefix: "borgmatic-"
|
||||
#+END_SRC
|
||||
|
||||
Now that we have everything together, let's save it in =/etc/borgmatic.d/home.yaml=.
|
||||
|
||||
* Usage
|
||||
If you have *borg* and *borgmatic* already installed on your system and the *borgmatic* configuration file in place, you can test it out.
|
||||
|
||||
You can create the repository.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# borgmatic init -v 2
|
||||
#+END_EXAMPLE
|
||||
|
||||
You can list the backups for the repository.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# borgmatic list --last 5
|
||||
borgmatic-home-2020-01-30T22:01:30 Thu, 2020-01-30 22:01:42 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-01-31T22:02:12 Fri, 2020-01-31 22:02:24 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-02-01T22:01:34 Sat, 2020-02-01 22:01:45 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-02-02T16:01:22 Sun, 2020-02-02 16:01:32 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-02-02T18:01:36 Sun, 2020-02-02 18:01:47 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
#+END_EXAMPLE
|
||||
|
||||
You could run a check.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# borgmatic check -v 1
|
||||
/etc/borgmatic.d/home.yaml: Pinging Healthchecks start
|
||||
/borg/home: Running consistency checks
|
||||
Remote: Starting repository check
|
||||
Remote: Starting repository index check
|
||||
Remote: Completed repository check, no problems found.
|
||||
Starting archive consistency check...
|
||||
Analyzing archive borgmatic-home-2020-02-01T22:01:34 (1/3)
|
||||
Analyzing archive borgmatic-home-2020-02-02T16:01:22 (2/3)
|
||||
Analyzing archive borgmatic-home-2020-02-02T18:01:36 (3/3)
|
||||
Orphaned objects check skipped (needs all archives checked).
|
||||
Archive consistency check complete, no problems found.
|
||||
|
||||
summary:
|
||||
/etc/borgmatic.d/home.yaml: Successfully ran configuration file
|
||||
#+END_EXAMPLE
|
||||
|
||||
But most of all, if you simply run =borgmatic= without any parameters, it will run through the whole configuration and apply all the steps.
|
||||
|
||||
At this point, you can simply add the =borgmatic= command in a *cron* to run on an interval.
|
||||
The other options would be to configure a =systemd= *timer* and *service* to run this on an interval.
|
||||
The latter is usually provided to you if you used your *package manager* to install *borgmatic*.
|
||||
|
||||
* Conclusion
|
||||
If you've checked *borg* and found it too much work to script, give *borgmatic* a try.
|
||||
I've been using borgmatic for few weeks now with no issues at all.
|
||||
I recently hooked it to a monitoring system so I will have a better view on when it runs, how much time each run takes.
|
||||
Also, if any of my backups fail I get notified by email. I hope you enjoy *borg* and *borgmatic* as much as I am.
|
|
@ -1,247 +0,0 @@
|
|||
.. title: Automating Borg
|
||||
.. date: 2020-02-02
|
||||
.. slug: automating-borg
|
||||
.. updated: 2020-02-02
|
||||
.. status: published
|
||||
.. tags: backup, borgbackup, borg, borgmatic
|
||||
.. category: backup
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: We've had a look at **Borg** before, let's find out how to automate it.
|
||||
.. type: text
|
||||
|
||||
|
||||
In the :doc:`previous blog post <borgbackup>`, I talked about **borg**.
|
||||
If you read that post, you would've noticed that **borg** has a lot of features.
|
||||
With a lot of features come a lot of automation.
|
||||
|
||||
If you were thinking about using **borg**, you should either make a *simple cron*
|
||||
or you're gonna have to write an elaborate script to take care of all the different steps.
|
||||
|
||||
What if I told you there's another way ? An easier way ! The **Borgmatic** way...
|
||||
What would you say ?
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Borgmatic
|
||||
=========
|
||||
|
||||
**Borgmatic** is defined on their `website <https://torsion.org/borgmatic/>`_ as follows.
|
||||
|
||||
borgmatic is simple, configuration-driven backup software for servers and workstations.
|
||||
Protect your files with client-side encryption. Backup your databases too.
|
||||
Monitor it all with integrated third-party services.
|
||||
|
||||
If you go down to it, **borgmatic** uses **borg**'s *API* to automate a list of configurable *tasks*.
|
||||
This way, it saves you the trouble of writing your own scripts to automate these steps.
|
||||
|
||||
**Borgmatic** uses a *YAML* configuration file. Let's configure a few tasks.
|
||||
|
||||
Location
|
||||
========
|
||||
|
||||
First, let's start by configuring the locations that **borg** is going to be working with.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
location:
|
||||
source_directories:
|
||||
- /home/
|
||||
|
||||
repositories:
|
||||
- user@backupserver:sourcehostname.borg
|
||||
|
||||
one_file_system: true
|
||||
|
||||
exclude_patterns:
|
||||
- /home/*/.cache
|
||||
- '*.pyc'
|
||||
|
||||
This tells **borg** that we need to backup our ``/home`` directories excluding a few patterns.
|
||||
Let's not forget that we told **borg** where the repository is located at.
|
||||
|
||||
Storage
|
||||
=======
|
||||
|
||||
We need to configure the storage next.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
storage:
|
||||
# Recommended
|
||||
# encryption_passcommand: secret-tool lookup borg-repository repo-name
|
||||
|
||||
encryption_passphrase: "ReallyStrongPassphrase"
|
||||
compression: zstd,15
|
||||
ssh_command: ssh -i /path/to/private/key
|
||||
borg_security_directory: /path/to/base/config/security
|
||||
archive_name_format: 'borgmatic-{hostname}-{now}'
|
||||
|
||||
In this section, we tell borg a little big of information about our repository.
|
||||
What are the credentials, where it can find them, etc.
|
||||
|
||||
The easy way is to go with a ``passphrase``, but I recommend using an ``encryption_passcommand`` instead.
|
||||
I also use ``zstd`` for encryption instead of ``lz4``, you better do your research before you change the default.
|
||||
I also recommend, just as they do, the use of a security directory as well.
|
||||
|
||||
Retention
|
||||
=========
|
||||
|
||||
We can configure a retention for our backups, if we like.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
retention:
|
||||
keep_hourly: 7
|
||||
keep_daily: 7
|
||||
keep_weekly: 4
|
||||
keep_monthly: 6
|
||||
keep_yearly: 2
|
||||
|
||||
prefix: "borgmatic-"
|
||||
|
||||
The part of what to keep from *hourly* to *daily* is self explanatory.
|
||||
I would like to point out the ``prefix`` part as it is important.
|
||||
This is the *prefix* that **borgmatic** uses to consider backups for **pruning**.
|
||||
|
||||
.. warning::
|
||||
|
||||
Watch out for the retention ``prefix``
|
||||
|
||||
Consistency
|
||||
===========
|
||||
|
||||
After the updates, we'd like to check our backups.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
consistency:
|
||||
checks:
|
||||
- repository
|
||||
- archives
|
||||
|
||||
check_last: 3
|
||||
|
||||
prefix: "borgmatic-"
|
||||
|
||||
.. warning::
|
||||
|
||||
Watch out, again, for the consistency ``prefix``
|
||||
|
||||
Hooks
|
||||
=====
|
||||
|
||||
Finally, hooks.
|
||||
|
||||
I'm going to talk about hooks a bit. Hooks can be used to backup **MySQL**, **PostgreSQL** or **MariaDB**.
|
||||
They can also be hooks for ``on_error``, ``before_backup``, ``after_backup``, ``before_everything`` and ``after_everything``.
|
||||
You can also hook to third party services which you can check on their webpage.
|
||||
|
||||
I deployed my own, so I configured my own.
|
||||
|
||||
Borgmatic Configuration
|
||||
=======================
|
||||
|
||||
Let's put everything together now.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
location:
|
||||
source_directories:
|
||||
- /home/
|
||||
|
||||
repositories:
|
||||
- user@backupserver:sourcehostname.borg
|
||||
|
||||
one_file_system: true
|
||||
|
||||
exclude_patterns:
|
||||
- /home/*/.cache
|
||||
- '*.pyc'
|
||||
|
||||
storage:
|
||||
# Recommended
|
||||
# encryption_passcommand: secret-tool lookup borg-repository repo-name
|
||||
|
||||
encryption_passphrase: "ReallyStrongPassphrase"
|
||||
compression: zstd,15
|
||||
ssh_command: ssh -i /path/to/private/key
|
||||
borg_security_directory: /path/to/base/config/security
|
||||
archive_name_format: 'borgmatic-{hostname}-{now}'
|
||||
|
||||
retention:
|
||||
keep_hourly: 7
|
||||
keep_daily: 7
|
||||
keep_weekly: 4
|
||||
keep_monthly: 6
|
||||
keep_yearly: 2
|
||||
|
||||
prefix: "borgmatic-"
|
||||
|
||||
consistency:
|
||||
checks:
|
||||
- repository
|
||||
- archives
|
||||
|
||||
check_last: 3
|
||||
|
||||
prefix: "borgmatic-"
|
||||
|
||||
|
||||
Now that we have everything together, let's save it in ``/etc/borgmatic.d/home.yaml``.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
If you have **borg** and **borgmatic** already installed on your system and the **borgmatic** configuration file in place,
|
||||
you can test it out.
|
||||
|
||||
You can create the repository.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# borgmatic init -v 2
|
||||
|
||||
You can list the backups for the repository.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# borgmatic list --last 5
|
||||
borgmatic-home-2020-01-30T22:01:30 Thu, 2020-01-30 22:01:42 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-01-31T22:02:12 Fri, 2020-01-31 22:02:24 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-02-01T22:01:34 Sat, 2020-02-01 22:01:45 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-02-02T16:01:22 Sun, 2020-02-02 16:01:32 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
borgmatic-home-2020-02-02T18:01:36 Sun, 2020-02-02 18:01:47 [0000000000000000000000000000000000000000000000000000000000000000]
|
||||
|
||||
You could run a check.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# borgmatic check -v 1
|
||||
/etc/borgmatic.d/home.yaml: Pinging Healthchecks start
|
||||
/borg/home: Running consistency checks
|
||||
Remote: Starting repository check
|
||||
Remote: Starting repository index check
|
||||
Remote: Completed repository check, no problems found.
|
||||
Starting archive consistency check...
|
||||
Analyzing archive borgmatic-home-2020-02-01T22:01:34 (1/3)
|
||||
Analyzing archive borgmatic-home-2020-02-02T16:01:22 (2/3)
|
||||
Analyzing archive borgmatic-home-2020-02-02T18:01:36 (3/3)
|
||||
Orphaned objects check skipped (needs all archives checked).
|
||||
Archive consistency check complete, no problems found.
|
||||
|
||||
summary:
|
||||
/etc/borgmatic.d/home.yaml: Successfully ran configuration file
|
||||
|
||||
But most of all, if you simply run ``borgmatic`` without any parameters, it will run through the whole configuration and apply all the steps.
|
||||
|
||||
At this point, you can simply add the ``borgmatic`` command in a **cron** to run on an interval.
|
||||
The other options would be to configure a ``systemd`` **timer** and **service** to run this on an interval.
|
||||
The latter is usually provided to you if you used your **package manager** to install **borgmatic**.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
If you've checked **borg** and found it too much work to script, give **borgmatic** a try.
|
||||
I've been using borgmatic for few weeks now with no issues at all.
|
||||
I recently hooked it to a monitoring system so I will have a better view on when it runs, how much time each run takes.
|
||||
Also, if any of my backups fail I get notified by email. I hope you enjoy **borg** and **borgmatic** as much as I am.
|
117
posts/backup/borgbackup.org
Normal file
|
@ -0,0 +1,117 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: BorgBackup
|
||||
.. date: 2020-01-30
|
||||
.. slug: borgbackup
|
||||
.. updated: 2020-01-30
|
||||
.. status: published
|
||||
.. tags: backup, borgbackup, borg
|
||||
.. category: backup
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: It has been called the Holy Grail of backup! BorgBackup is coming to town.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I usually lurk around *Freenode* in a few projects that I use, can learn from and/or help with. This is a great opportunity to learn new things /all the time/.
|
||||
|
||||
This story is familiar in that manner, but that's where similarities diverge. Someone asked around =#Weechat= a question that caught my attention because it was, sort of, out of topic. The question was around how do you backup your stuff ?
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
I mean if I were asked that, I would've mentioned revision controlled off-site repositories for the code that I have.
|
||||
For the personal stuff on the other hand, I would've admitted simple rudimentary solutions like =rsync=, =tar= and external drives.
|
||||
So I was sort of happy with my backup solution, it has worked. Plain and simple.
|
||||
|
||||
I have to admit that, by modern standards it might not offer the ability to go back in time to a certain point.
|
||||
But I use /file systems/ that offer /snapshot/ capabilities. I can recover from previous snapshots and send them somewhere safe.
|
||||
Archiving and encrypting those is not a simple process, wish it was. That limits storage possibilities if you care to keep your data private.
|
||||
|
||||
But if you know me, you'd know that I'm always open to new ways of doing things.
|
||||
|
||||
I can't remember exactly the conversation but the name *BorgBackup* was mentioned (thank you however you are). That's when things changed.
|
||||
|
||||
* BorgBackup
|
||||
[[https://www.borgbackup.org/][Borg]] is defined as a
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
Deduplicating archiver with compression and encryption
|
||||
#+END_QUOTE
|
||||
|
||||
Although this is a very accurate and encompassing definition, it doesn't really show you how /AWESOME/ this thing is.
|
||||
|
||||
I had to go to the docs first before I stumbled upon this video.
|
||||
|
||||
#+BEGIN_HTML
|
||||
<div class="custom-center" style="
|
||||
width: 50%;
|
||||
margin: 0 auto;
|
||||
">
|
||||
<script id="asciicast-133292" src="https://asciinema.org/a/133292.js" async></script>
|
||||
</div>
|
||||
#+END_HTML
|
||||
|
||||
It can be a bit difficult to follow the video, I understand.
|
||||
|
||||
This is why I decided to write this post, to sort of explain to you how *Borg* can backup your stuff.
|
||||
|
||||
* Encryption
|
||||
Oh yeah, that's the *first* thing I look at when I consider any suggested backup solution. *Borg* offers built-in /encryption/ and /authentication/. You can read about it in details in the [[https://borgbackup.readthedocs.io/en/stable/usage/init.html#encryption-modes][docs]].
|
||||
|
||||
So that's a check.
|
||||
|
||||
* Compression
|
||||
This is another thing I look for in a suggested backup solution. And I'm happy to report that *Borg* has this under the belt as well.
|
||||
*Borg* currently supports /LZ4/, /zlib/, /LZMA/ and /zstd/. You can also tune the level of compression. Pretty neat !
|
||||
|
||||
* Full Backup
|
||||
I've watched a few videos and read a bit of their documentation and they talk about *FULL BACKUP*.
|
||||
Which means every time you run *Borg*, it will take a full backup of your stuff. A full backup at that point in time, don't forget.
|
||||
The implication of this is that you have a versioned list of your backups, and you can go back in time to any of them.
|
||||
|
||||
Yes, you read that right. *Borg* does a full backup every time you run it. That's a pretty neat feature.
|
||||
|
||||
If you're a bit ahead of me, you were gonna say woooow there bud ! I have *Gigabytes* of data, what do you mean *FULL BACKUP*, you keep saying *FULL BACKUP*.
|
||||
|
||||
I mean *FULL BACKUP*, wait until you hear about the next feature.
|
||||
|
||||
* Deduplication
|
||||
Booyah ! It has deduplication. Ain't that awesome. I've watched a presentation by the project's original maintainer explain this.
|
||||
I have one thing to say. It's pretty good. How good, you may ask ?
|
||||
|
||||
My answer would be, good enough to fool me into thinking that it was taking snapshots of my data.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
-----------------------------------------------------------------------------
|
||||
Original size Compressed size Deduplicated size
|
||||
All archives: 34.59 GB 9.63 GB 1.28 GB
|
||||
Unique chunks Total chunks
|
||||
Chunk index: 47772 469277
|
||||
#+END_EXAMPLE
|
||||
|
||||
It wasn't until I dug in deeper into the matter that I understood that it was a full backup and the deduping taking care of the rest.
|
||||
|
||||
* Check
|
||||
*Borg* offers a way to vefiry the consistency of the repository and the archives within. This way, you can make sure that your backups haven't been corrupted.
|
||||
|
||||
This is a very good feature, and a must in my opinion from a backup solution. *Borg* has /YOU/ covered.
|
||||
|
||||
* Restore
|
||||
A backup solution is nothing if you can't get your data backup.
|
||||
*Borg* has a few ways for you to get your data.
|
||||
You can either create an /archive/ file out of a backup. You can export a file, a directory or the whole directory tree from a backup.
|
||||
You can also, if you like, mount a backup and get stuff out.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition warning">
|
||||
<p class="admonition-title">warning</p>
|
||||
#+END_EXPORT
|
||||
Mounting a *Borg* backup is done using /fuse/
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* Conclusion
|
||||
*Borg* is a great tool for backup. It comes in an easily installable self-contained binary so you can use it, pretty much, anywhere giving you no excuse /whatsoever/ not to use it.
|
||||
Their documentation is very good, and *Borg* is easy to use.
|
||||
It offers you all the features you need to do off-site and on-site backups of all your important data.
|
||||
|
||||
I'll be testing *Borg* moving forward for my data. I'll make sure to report back anything I find, in the future, related to the subject.
|
|
@ -1,131 +0,0 @@
|
|||
.. title: BorgBackup
|
||||
.. date: 2020-01-30
|
||||
.. slug: borgbackup
|
||||
.. updated: 2020-01-30
|
||||
.. status: published
|
||||
.. tags: backup, borgbackup, borg
|
||||
.. category: backup
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: It has been called the Holy Grail of backup! BorgBackup is coming to town.
|
||||
.. type: text
|
||||
|
||||
I usually lurk around **Freenode** in a few projects that I use, can learn from and/or help with.
|
||||
This is a great opportunity to learn new things *all the time*.
|
||||
|
||||
This story is familiar in that manner, but that's where similarities diverge.
|
||||
Someone asked around ``#Weechat`` a question that caught my attention because it was, sort of, out of topic. The question was around how do you backup your stuff ?
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
I mean if I were asked that, I would've mentioned revision controlled off-site repositories for the code that I have.
|
||||
For the personal stuff on the other hand, I would've admitted simple rudimentary solutions like ``rsync``, ``tar`` and external drives.
|
||||
So I was sort of happy with my backup solution, it has worked. Plain and simple.
|
||||
|
||||
I have to admit that, by modern standards it might not offer the ability to go back in time to a certain point.
|
||||
But I use *file systems* that offer *snapshot* capabilities. I can recover from previous snapshots and send them somewhere safe.
|
||||
Archiving and encrypting those is not a simple process, wish it was. That limits storage possibilities if you care to keep your data private.
|
||||
|
||||
But if you know me, you'd know that I'm always open to new ways of doing things.
|
||||
|
||||
I can't remember exactly the conversation but the name **BorgBackup** was mentioned (thank you however you are). That's when things changed.
|
||||
|
||||
|
||||
BorgBackup
|
||||
==========
|
||||
|
||||
`Borg <https://www.borgbackup.org/>`_ is defined as a
|
||||
|
||||
Deduplicating archiver with compression and encryption
|
||||
|
||||
Although this is a very accurate and encompassing definition, it doesn't really show you how *AWESOME* this thing is.
|
||||
|
||||
I had to go to the docs first before I stumbled upon this video.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<div class="custom-center" style="
|
||||
width: 50%;
|
||||
margin: 0 auto;
|
||||
">
|
||||
<script id="asciicast-133292" src="https://asciinema.org/a/133292.js" async></script>
|
||||
</div>
|
||||
|
||||
It can be a bit difficult to follow the video, I understand.
|
||||
|
||||
This is why I decided to write this post, to sort of explain to you how **Borg** can backup your stuff.
|
||||
|
||||
Encryption
|
||||
==========
|
||||
|
||||
Oh yeah, that's the **first** thing I look at when I consider any suggested backup solution.
|
||||
**Borg** offers built-in *encryption* and *authentication*.
|
||||
You can read about it in details in the `docs <https://borgbackup.readthedocs.io/en/stable/usage/init.html#encryption-modes>`_.
|
||||
|
||||
So that's a check.
|
||||
|
||||
Compression
|
||||
===========
|
||||
|
||||
This is another thing I look for in a suggested backup solution. And I'm happy to report that **Borg** has this under the belt as well.
|
||||
**Borg** currently supports *LZ4*, *zlib*, *LZMA* and *zstd*. You can also tune the level of compression. Pretty neat !
|
||||
|
||||
Full Backup
|
||||
===========
|
||||
|
||||
I've watched a few videos and read a bit of their documentation and they talk about **FULL BACKUP**.
|
||||
Which means every time you run **Borg**, it will take a full backup of your stuff. A full backup at that point in time, don't forget.
|
||||
The implication of this is that you have a versioned list of your backups, and you can go back in time to any of them.
|
||||
|
||||
Yes, you read that right. **Borg** does a full backup every time you run it. That's a pretty neat feature.
|
||||
|
||||
If you're a bit ahead of me, you were gonna say woooow there bud ! I have **Gigabytes** of data, what do you mean **FULL BACKUP**, you keep saying **FULL BACKUP**.
|
||||
|
||||
I mean **FULL BACKUP**, wait until you hear about the next feature.
|
||||
|
||||
Deduplication
|
||||
=============
|
||||
|
||||
Booyah ! It has deduplication. Ain't that awesome. I've watched a presentation by the project's original maintainer explain this.
|
||||
I have one thing to say. It's pretty good. How good, you may ask ?
|
||||
|
||||
My answer would be, good enough to fool me into thinking that it was taking snapshots of my data.
|
||||
|
||||
.. code:: text
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Original size Compressed size Deduplicated size
|
||||
All archives: 34.59 GB 9.63 GB 1.28 GB
|
||||
Unique chunks Total chunks
|
||||
Chunk index: 47772 469277
|
||||
|
||||
|
||||
It wasn't until I dug in deeper into the matter that I understood that it was a full backup and the deduping taking care of the rest.
|
||||
|
||||
Check
|
||||
=====
|
||||
|
||||
**Borg** offers a way to vefiry the consistency of the repository and the archives within.
|
||||
This way, you can make sure that your backups haven't been corrupted.
|
||||
|
||||
This is a very good feature, and a must in my opinion from a backup solution. **Borg** has *YOU* covered.
|
||||
|
||||
Restore
|
||||
=======
|
||||
|
||||
A backup solution is nothing if you can't get your data backup.
|
||||
**Borg** has a few ways for you to get your data. You can either create an *archive* file out of a backup.
|
||||
You can export a file, a directory or the whole directory tree from a backup.
|
||||
You can also, if you like, mount a backup and get stuff out.
|
||||
|
||||
.. warning::
|
||||
|
||||
Mounting a **Borg** backup is done using *fuse*
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
**Borg** is a great tool for backup. It comes in an easily installable self-contained binary so you can use it, pretty much,
|
||||
anywhere giving you no excuse *whatsoever* not to use it. Their documentation is very good, and **Borg** is easy to use.
|
||||
It offers you all the features you need to do off-site and on-site backups of all your important data.
|
||||
|
||||
I'll be testing **Borg** moving forward for my data. I'll make sure to report back anything I find, in the future, related to the subject.
|
480
posts/configuration-management/ansible-testing-with-molecule.org
Normal file
|
@ -0,0 +1,480 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Ansible testing with Molecule
|
||||
.. date: 2019-01-11
|
||||
.. slug: ansible-testing-with-molecule
|
||||
.. updated: 2019-06-21
|
||||
.. status: published
|
||||
.. tags: configuration management, ansible, molecule,
|
||||
.. category: configuration management
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A fast way to create a testable ansible role using molecule.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
When I first started using [[https://www.ansible.com/][ansible]], I did not know about [[https://molecule.readthedocs.io/en/latest/][molecule]]. It was a bit daunting to start a /role/ from scratch and trying to develop it without having the ability to test it. Then a co-worker of mine told me about molecule and everything changed.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
I do not have any of the tools I need installed on this machine, so I will go through, step by step, how I set up ansible and molecule on any new machine I come across for writing ansible roles.
|
||||
|
||||
* Requirements
|
||||
What we are trying to achieve in this post, is a working ansible role that can be tested inside a docker container. To be able to achieve that, we need to install docker on the system. Follow the instructions on [[https://docs.docker.com/install/][installing docker]] found on the docker website.
|
||||
|
||||
* Good Practices
|
||||
First thing's first. Let's start by making sure that we have python installed properly on the system.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ python --version
|
||||
Python 3.7.1
|
||||
#+END_EXAMPLE
|
||||
|
||||
Because in this case I have /python3/ installed, I can create a /virtualenv/ easier without the use of external tools.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# Create the directory to work with
|
||||
$ mkdir -p sandbox/test-roles
|
||||
# Navigate to the directory
|
||||
$ cd sandbox/test-roles/
|
||||
# Create the virtualenv
|
||||
~/sandbox/test-roles $ python -m venv .ansible-venv
|
||||
# Activate the virtualenv
|
||||
~/sandbox/test-roles $ source .ansible-venv/bin/activate
|
||||
# Check that your virtualenv activated properly
|
||||
(.ansible-venv) ~/sandbox/test-roles $ which python
|
||||
/home/elijah/sandbox/test-roles/.ansible-venv/bin/python
|
||||
#+END_EXAMPLE
|
||||
|
||||
At this point, we can install the required dependencies.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ pip install ansible molecule docker
|
||||
Collecting ansible
|
||||
Downloading https://files.pythonhosted.org/packages/56/fb/b661ae256c5e4a5c42859860f59f9a1a0b82fbc481306b30e3c5159d519d/ansible-2.7.5.tar.gz (11.8MB)
|
||||
100% |████████████████████████████████| 11.8MB 3.8MB/s
|
||||
Collecting molecule
|
||||
Downloading https://files.pythonhosted.org/packages/84/97/e5764079cb7942d0fa68b832cb9948274abb42b72d9b7fe4a214e7943786/molecule-2.19.0-py3-none-any.whl (180kB)
|
||||
100% |████████████████████████████████| 184kB 2.2MB/s
|
||||
|
||||
...
|
||||
|
||||
Successfully built ansible ansible-lint anyconfig cerberus psutil click-completion tabulate tree-format pathspec future pycparser arrow
|
||||
Installing collected packages: MarkupSafe, jinja2, PyYAML, six, pycparser, cffi, pynacl, idna, asn1crypto, cryptography, bcrypt, paramiko, ansible, pbr, git-url-parse, monotonic, fasteners, click, colorama, sh, python-gilt, ansible-lint, pathspec, yamllint, anyconfig, cerberus, psutil, more-itertools, py, attrs, pluggy, atomicwrites, pytest, testinfra, ptyprocess, pexpect, click-completion, tabulate, future, chardet, binaryornot, poyo, urllib3, certifi, requests, python-dateutil, arrow, jinja2-time, whichcraft, cookiecutter, tree-format, molecule, docker-pycreds, websocket-client, docker
|
||||
Successfully installed MarkupSafe-1.1.0 PyYAML-3.13 ansible-2.7.5 ansible-lint-3.4.23 anyconfig-0.9.7 arrow-0.13.0 asn1crypto-0.24.0 atomicwrites-1.2.1 attrs-18.2.0 bcrypt-3.1.5 binaryornot-0.4.4 cerberus-1.2 certifi-2018.11.29 cffi-1.11.5 chardet-3.0.4 click-6.7 click-completion-0.3.1 colorama-0.3.9 cookiecutter-1.6.0 cryptography-2.4.2 docker-3.7.0 docker-pycreds-0.4.0 fasteners-0.14.1 future-0.17.1 git-url-parse-1.1.0 idna-2.8 jinja2-2.10 jinja2-time-0.2.0 molecule-2.19.0 monotonic-1.5 more-itertools-5.0.0 paramiko-2.4.2 pathspec-0.5.9 pbr-4.1.0 pexpect-4.6.0 pluggy-0.8.1 poyo-0.4.2 psutil-5.4.6 ptyprocess-0.6.0 py-1.7.0 pycparser-2.19 pynacl-1.3.0 pytest-4.1.0 python-dateutil-2.7.5 python-gilt-1.2.1 requests-2.21.0 sh-1.12.14 six-1.11.0 tabulate-0.8.2 testinfra-1.16.0 tree-format-0.1.2 urllib3-1.24.1 websocket-client-0.54.0 whichcraft-0.5.2 yamllint-1.11.1
|
||||
#+END_EXAMPLE
|
||||
|
||||
* Creating your first ansible role
|
||||
Once all the steps above are complete, we can start by creating our first ansible role.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ molecule init role -r example-role
|
||||
--> Initializing new role example-role...
|
||||
Initialized role in /home/elijah/sandbox/test-roles/example-role successfully.
|
||||
|
||||
$ tree example-role/
|
||||
example-role/
|
||||
├── defaults
|
||||
│ └── main.yml
|
||||
├── handlers
|
||||
│ └── main.yml
|
||||
├── meta
|
||||
│ └── main.yml
|
||||
├── molecule
|
||||
│ └── default
|
||||
│ ├── Dockerfile.j2
|
||||
│ ├── INSTALL.rst
|
||||
│ ├── molecule.yml
|
||||
│ ├── playbook.yml
|
||||
│ └── tests
|
||||
│ ├── __pycache__
|
||||
│ │ └── test_default.cpython-37.pyc
|
||||
│ └── test_default.py
|
||||
├── README.md
|
||||
├── tasks
|
||||
│ └── main.yml
|
||||
└── vars
|
||||
└── main.yml
|
||||
|
||||
9 directories, 12 files
|
||||
#+END_EXAMPLE
|
||||
|
||||
You can find what each directory is for and how ansible works by visiting [[https://docs.ansible.com][docs.ansible.com]].
|
||||
|
||||
** =meta/main.yml=
|
||||
The meta file needs to modified and filled with information about the role. This is not a required file to modify if you are keeping this for yourself, for example. But it is a good idea to have as much information as possible if this is going to be released. In my case, I don't need any fanciness as this is just sample code.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
galaxy_info:
|
||||
author: Elia el Lazkani
|
||||
description: This is an example ansible role to showcase molecule at work
|
||||
license: license (BDS-2)
|
||||
min_ansible_version: 2.7
|
||||
galaxy_tags: []
|
||||
dependencies: []
|
||||
#+END_SRC
|
||||
|
||||
** =tasks/main.yml=
|
||||
This is where the magic is set in motion. Tasks are the smallest entities in a role that do small and idempotent actions. Let's write a few simple tasks to create a user and install a service.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
# Create the user example
|
||||
- name: Create 'example' user
|
||||
user:
|
||||
name: example
|
||||
comment: Example user
|
||||
shell: /bin/bash
|
||||
state: present
|
||||
create_home: yes
|
||||
home: /home/example
|
||||
|
||||
# Install nginx
|
||||
- name: Install nginx
|
||||
apt:
|
||||
name: nginx
|
||||
state: present
|
||||
update_cache: yes
|
||||
notify: Restart nginx
|
||||
#+END_SRC
|
||||
|
||||
** =handlers/main.yml=
|
||||
If you noticed, we are notifying a handler to be called after installing /nginx/. All handlers notified will run after all the tasks complete and each handler will only run once. This is a good way to make sure that you don't restart /nginx/ multiple times if you call the handler more than once.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
# Handler to restart nginx
|
||||
- name: Restart nginx
|
||||
service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
#+END_SRC
|
||||
|
||||
** =molecule/default/molecule.yml=
|
||||
It's time to configure molecule to do what we need. We need to start an ubuntu docker container, so we need to specify that in the molecule YAML file. All we need to do is change the image line to specify that we want an =ubuntu:bionic= image.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
lint:
|
||||
name: yamllint
|
||||
platforms:
|
||||
- name: instance
|
||||
image: ubuntu:bionic
|
||||
provisioner:
|
||||
name: ansible
|
||||
lint:
|
||||
name: ansible-lint
|
||||
scenario:
|
||||
name: default
|
||||
verifier:
|
||||
name: testinfra
|
||||
lint:
|
||||
name: flake8
|
||||
#+END_SRC
|
||||
|
||||
** =molecule/default/playbook.yml=
|
||||
This is the playbook that molecule will run. Make sure that you have all the steps that you need here. I will keep this as is.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
roles:
|
||||
- role: example-role
|
||||
#+END_SRC
|
||||
|
||||
* First Role Pass
|
||||
This is time to test our role and see what's going on.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
(.ansible-role) ~/sandbox/test-roles/example-role/ $ molecule converge
|
||||
--> Validating schema /home/elijah/sandbox/test-roles/example-role/molecule/default/molecule.yml.
|
||||
Validation completed successfully.
|
||||
--> Test matrix
|
||||
|
||||
└── default
|
||||
├── dependency
|
||||
├── create
|
||||
├── prepare
|
||||
└── converge
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'dependency'
|
||||
Skipping, missing the requirements file.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'create'
|
||||
|
||||
PLAY [Create] ******************************************************************
|
||||
|
||||
TASK [Log into a Docker registry] **********************************************
|
||||
skipping: [localhost] => (item=None)
|
||||
|
||||
TASK [Create Dockerfiles from image names] *************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Discover local Docker images] ********************************************
|
||||
ok: [localhost] => (item=None)
|
||||
ok: [localhost]
|
||||
|
||||
TASK [Build an Ansible compatible image] ***************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Create docker network(s)] ************************************************
|
||||
|
||||
TASK [Create molecule instance(s)] *********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) creation to complete] *******************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=5 changed=4 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'prepare'
|
||||
Skipping, prepare playbook not configured.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'converge'
|
||||
|
||||
PLAY [Converge] ****************************************************************
|
||||
|
||||
TASK [Gathering Facts] *********************************************************
|
||||
ok: [instance]
|
||||
|
||||
TASK [example-role : Create 'example' user] ************************************
|
||||
changed: [instance]
|
||||
|
||||
TASK [example-role : Install nginx] ********************************************
|
||||
changed: [instance]
|
||||
|
||||
RUNNING HANDLER [example-role : Restart nginx] *********************************
|
||||
changed: [instance]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
instance : ok=4 changed=3 unreachable=0 failed=0
|
||||
#+END_EXAMPLE
|
||||
|
||||
It looks like the *converge* step succeeded.
|
||||
|
||||
* Writing Tests
|
||||
It is always a good practice to write unittests when you're writing code. Ansible roles should not be an exception. Molecule offers a way to run tests, which you can think of as unittest, to make sure that what the role gives you is what you were expecting. This helps future development of the role and keeps you from falling in previously solved traps.
|
||||
|
||||
** =molecule/default/tests/test_default.py=
|
||||
Molecule leverages the [[https://testinfra.readthedocs.io/en/latest/][testinfra]] project to run its tests. You can use other tools if you so wish, and there are many. In this example we will be using /testinfra/.
|
||||
|
||||
#+BEGIN_SRC python
|
||||
import os
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
|
||||
def test_hosts_file(host):
|
||||
f = host.file('/etc/hosts')
|
||||
|
||||
assert f.exists
|
||||
assert f.user == 'root'
|
||||
assert f.group == 'root'
|
||||
|
||||
|
||||
def test_user_created(host):
|
||||
user = host.user("example")
|
||||
assert user.name == "example"
|
||||
assert user.home == "/home/example"
|
||||
|
||||
|
||||
def test_user_home_exists(host):
|
||||
user_home = host.file("/home/example")
|
||||
assert user_home.exists
|
||||
assert user_home.is_directory
|
||||
|
||||
|
||||
def test_nginx_is_installed(host):
|
||||
nginx = host.package("nginx")
|
||||
assert nginx.is_installed
|
||||
|
||||
|
||||
def test_nginx_running_and_enabled(host):
|
||||
nginx = host.service("nginx")
|
||||
assert nginx.is_running
|
||||
#+END_SRC
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition warning">
|
||||
<p class="admonition-title">warning</p>
|
||||
#+END_EXPORT
|
||||
Uncomment =truthy: disable= in =.yamllint= found at the base of the role.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
(.ansible_venv) ~/sandbox/test-roles/example-role $ molecule test
|
||||
--> Validating schema /home/elijah/sandbox/test-roles/example-role/molecule/default/molecule.yml.
|
||||
Validation completed successfully.
|
||||
--> Test matrix
|
||||
|
||||
└── default
|
||||
├── lint
|
||||
├── destroy
|
||||
├── dependency
|
||||
├── syntax
|
||||
├── create
|
||||
├── prepare
|
||||
├── converge
|
||||
├── idempotence
|
||||
├── side_effect
|
||||
├── verify
|
||||
└── destroy
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'lint'
|
||||
--> Executing Yamllint on files found in /home/elijah/sandbox/test-roles/example-role/...
|
||||
Lint completed successfully.
|
||||
--> Executing Flake8 on files found in /home/elijah/sandbox/test-roles/example-role/molecule/default/tests/...
|
||||
/home/elijah/.virtualenvs/world/lib/python3.7/site-packages/pycodestyle.py:113: FutureWarning: Possible nested set at position 1
|
||||
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
|
||||
Lint completed successfully.
|
||||
--> Executing Ansible Lint on /home/elijah/sandbox/test-roles/example-role/molecule/default/playbook.yml...
|
||||
Lint completed successfully.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'destroy'
|
||||
|
||||
PLAY [Destroy] *****************************************************************
|
||||
|
||||
TASK [Destroy molecule instance(s)] ********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) deletion to complete] *******************************
|
||||
ok: [localhost] => (item=None)
|
||||
ok: [localhost]
|
||||
|
||||
TASK [Delete docker network(s)] ************************************************
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=2 changed=1 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'dependency'
|
||||
Skipping, missing the requirements file.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'syntax'
|
||||
|
||||
playbook: /home/elijah/sandbox/test-roles/example-role/molecule/default/playbook.yml
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'create'
|
||||
|
||||
PLAY [Create] ******************************************************************
|
||||
|
||||
TASK [Log into a Docker registry] **********************************************
|
||||
skipping: [localhost] => (item=None)
|
||||
|
||||
TASK [Create Dockerfiles from image names] *************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Discover local Docker images] ********************************************
|
||||
ok: [localhost] => (item=None)
|
||||
ok: [localhost]
|
||||
|
||||
TASK [Build an Ansible compatible image] ***************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Create docker network(s)] ************************************************
|
||||
|
||||
TASK [Create molecule instance(s)] *********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) creation to complete] *******************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=5 changed=4 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'prepare'
|
||||
Skipping, prepare playbook not configured.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'converge'
|
||||
|
||||
PLAY [Converge] ****************************************************************
|
||||
|
||||
TASK [Gathering Facts] *********************************************************
|
||||
ok: [instance]
|
||||
|
||||
TASK [example-role : Create 'example' user] ************************************
|
||||
changed: [instance]
|
||||
|
||||
TASK [example-role : Install nginx] ********************************************
|
||||
changed: [instance]
|
||||
|
||||
RUNNING HANDLER [example-role : Restart nginx] *********************************
|
||||
changed: [instance]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
instance : ok=4 changed=3 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'idempotence'
|
||||
Idempotence completed successfully.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'side_effect'
|
||||
Skipping, side effect playbook not configured.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'verify'
|
||||
--> Executing Testinfra tests found in /home/elijah/sandbox/test-roles/example-role/molecule/default/tests/...
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.7.1, pytest-4.1.0, py-1.7.0, pluggy-0.8.1
|
||||
rootdir: /home/elijah/sandbox/test-roles/example-role/molecule/default, inifile:
|
||||
plugins: testinfra-1.16.0
|
||||
collected 5 items
|
||||
|
||||
tests/test_default.py ..... [100%]
|
||||
|
||||
=============================== warnings summary ===============================
|
||||
|
||||
...
|
||||
|
||||
==================== 5 passed, 7 warnings in 27.37 seconds =====================
|
||||
Verifier completed successfully.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'destroy'
|
||||
|
||||
PLAY [Destroy] *****************************************************************
|
||||
|
||||
TASK [Destroy molecule instance(s)] ********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) deletion to complete] *******************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Delete docker network(s)] ************************************************
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=2 changed=2 unreachable=0 failed=0
|
||||
#+END_EXAMPLE
|
||||
|
||||
I have a few warning messages (that's likely because I am using /python 3.7/ and some of the libraries still don't fully support the new standards released with it) but all my tests passed.
|
||||
|
||||
* Conclusion
|
||||
Molecule is a great tool to test ansible roles quickly and while developing them. It also comes bundled with a bunch of other features from different projects that will test all aspects of your ansible code. I suggest you start using it when writing new ansible roles.
|
|
@ -1,500 +0,0 @@
|
|||
.. title: Ansible testing with Molecule
|
||||
.. date: 2019-01-11
|
||||
.. slug: ansible-testing-with-molecule
|
||||
.. updated: 2019-06-21
|
||||
.. status: published
|
||||
.. tags: configuration management, ansible, molecule,
|
||||
.. category: configuration management
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A fast way to create a testable ansible role using molecule.
|
||||
.. type: text
|
||||
|
||||
|
||||
When I first started using `ansible <https://www.ansible.com/>`_, I did not know about `molecule <https://molecule.readthedocs.io/en/latest/>`_. It was a bit daunting to start a *role* from scratch and trying to develop it without having the ability to test it. Then a co-worker of mine told me about molecule and everything changed.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
I do not have any of the tools I need installed on this machine, so I will go through, step by step, how I set up ansible and molecule on any new machine I come across for writing ansible roles.
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
What we are trying to achieve in this post, is a working ansible role that can be tested inside a docker container. To be able to achieve that, we need to install docker on the system. Follow the instructions on `installing docker <https://docs.docker.com/install/>`_ found on the docker website.
|
||||
|
||||
Good Practices
|
||||
==============
|
||||
|
||||
First thing's first. Let's start by making sure that we have python installed properly on the system.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ python --version
|
||||
Python 3.7.1
|
||||
|
||||
Because in this case I have *python3* installed, I can create a *virtualenv* easier without the use of external tools.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# Create the directory to work with
|
||||
$ mkdir -p sandbox/test-roles
|
||||
# Navigate to the directory
|
||||
$ cd sandbox/test-roles/
|
||||
# Create the virtualenv
|
||||
~/sandbox/test-roles $ python -m venv .ansible-venv
|
||||
# Activate the virtualenv
|
||||
~/sandbox/test-roles $ source .ansible-venv/bin/activate
|
||||
# Check that your virtualenv activated properly
|
||||
(.ansible-venv) ~/sandbox/test-roles $ which python
|
||||
/home/elijah/sandbox/test-roles/.ansible-venv/bin/python
|
||||
|
||||
At this point, we can install the required dependencies.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ pip install ansible molecule docker
|
||||
Collecting ansible
|
||||
Downloading https://files.pythonhosted.org/packages/56/fb/b661ae256c5e4a5c42859860f59f9a1a0b82fbc481306b30e3c5159d519d/ansible-2.7.5.tar.gz (11.8MB)
|
||||
100% |████████████████████████████████| 11.8MB 3.8MB/s
|
||||
Collecting molecule
|
||||
Downloading https://files.pythonhosted.org/packages/84/97/e5764079cb7942d0fa68b832cb9948274abb42b72d9b7fe4a214e7943786/molecule-2.19.0-py3-none-any.whl (180kB)
|
||||
100% |████████████████████████████████| 184kB 2.2MB/s
|
||||
|
||||
...
|
||||
|
||||
Successfully built ansible ansible-lint anyconfig cerberus psutil click-completion tabulate tree-format pathspec future pycparser arrow
|
||||
Installing collected packages: MarkupSafe, jinja2, PyYAML, six, pycparser, cffi, pynacl, idna, asn1crypto, cryptography, bcrypt, paramiko, ansible, pbr, git-url-parse, monotonic, fasteners, click, colorama, sh, python-gilt, ansible-lint, pathspec, yamllint, anyconfig, cerberus, psutil, more-itertools, py, attrs, pluggy, atomicwrites, pytest, testinfra, ptyprocess, pexpect, click-completion, tabulate, future, chardet, binaryornot, poyo, urllib3, certifi, requests, python-dateutil, arrow, jinja2-time, whichcraft, cookiecutter, tree-format, molecule, docker-pycreds, websocket-client, docker
|
||||
Successfully installed MarkupSafe-1.1.0 PyYAML-3.13 ansible-2.7.5 ansible-lint-3.4.23 anyconfig-0.9.7 arrow-0.13.0 asn1crypto-0.24.0 atomicwrites-1.2.1 attrs-18.2.0 bcrypt-3.1.5 binaryornot-0.4.4 cerberus-1.2 certifi-2018.11.29 cffi-1.11.5 chardet-3.0.4 click-6.7 click-completion-0.3.1 colorama-0.3.9 cookiecutter-1.6.0 cryptography-2.4.2 docker-3.7.0 docker-pycreds-0.4.0 fasteners-0.14.1 future-0.17.1 git-url-parse-1.1.0 idna-2.8 jinja2-2.10 jinja2-time-0.2.0 molecule-2.19.0 monotonic-1.5 more-itertools-5.0.0 paramiko-2.4.2 pathspec-0.5.9 pbr-4.1.0 pexpect-4.6.0 pluggy-0.8.1 poyo-0.4.2 psutil-5.4.6 ptyprocess-0.6.0 py-1.7.0 pycparser-2.19 pynacl-1.3.0 pytest-4.1.0 python-dateutil-2.7.5 python-gilt-1.2.1 requests-2.21.0 sh-1.12.14 six-1.11.0 tabulate-0.8.2 testinfra-1.16.0 tree-format-0.1.2 urllib3-1.24.1 websocket-client-0.54.0 whichcraft-0.5.2 yamllint-1.11.1
|
||||
|
||||
Creating your first ansible role
|
||||
================================
|
||||
|
||||
Once all the steps above are complete, we can start by creating our first ansible role.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ molecule init role -r example-role
|
||||
--> Initializing new role example-role...
|
||||
Initialized role in /home/elijah/sandbox/test-roles/example-role successfully.
|
||||
|
||||
$ tree example-role/
|
||||
example-role/
|
||||
├── defaults
|
||||
│ └── main.yml
|
||||
├── handlers
|
||||
│ └── main.yml
|
||||
├── meta
|
||||
│ └── main.yml
|
||||
├── molecule
|
||||
│ └── default
|
||||
│ ├── Dockerfile.j2
|
||||
│ ├── INSTALL.rst
|
||||
│ ├── molecule.yml
|
||||
│ ├── playbook.yml
|
||||
│ └── tests
|
||||
│ ├── __pycache__
|
||||
│ │ └── test_default.cpython-37.pyc
|
||||
│ └── test_default.py
|
||||
├── README.md
|
||||
├── tasks
|
||||
│ └── main.yml
|
||||
└── vars
|
||||
└── main.yml
|
||||
|
||||
9 directories, 12 files
|
||||
|
||||
You can find what each directory is for and how ansible works by visiting docs.ansible.com.
|
||||
|
||||
``meta/main.yml``
|
||||
-----------------
|
||||
|
||||
The meta file needs to modified and filled with information about the role. This is not a required file to modify if you are keeping this for yourself, for example. But it is a good idea to have as much information as possible if this is going to be released. In my case, I don't need any fanciness as this is just sample code.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
galaxy_info:
|
||||
author: Elia el Lazkani
|
||||
description: This is an example ansible role to showcase molecule at work
|
||||
license: license (BDS-2)
|
||||
min_ansible_version: 2.7
|
||||
galaxy_tags: []
|
||||
dependencies: []
|
||||
|
||||
``tasks/main.yml``
|
||||
------------------
|
||||
|
||||
This is where the magic is set in motion. Tasks are the smallest entities in a role that do small and idempotent actions. Let's write a few simple tasks to create a user and install a service.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
# Create the user example
|
||||
- name: Create 'example' user
|
||||
user:
|
||||
name: example
|
||||
comment: Example user
|
||||
shell: /bin/bash
|
||||
state: present
|
||||
create_home: yes
|
||||
home: /home/example
|
||||
|
||||
# Install nginx
|
||||
- name: Install nginx
|
||||
apt:
|
||||
name: nginx
|
||||
state: present
|
||||
update_cache: yes
|
||||
notify: Restart nginx
|
||||
|
||||
``handlers/main.yml``
|
||||
---------------------
|
||||
|
||||
If you noticed, we are notifying a handler to be called after installing *nginx*. All handlers notified will run after all the tasks complete and each handler will only run once. This is a good way to make sure that you don't restart *nginx* multiple times if you call the handler more than once.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
# Handler to restart nginx
|
||||
- name: Restart nginx
|
||||
service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
|
||||
``molecule/default/molecule.yml``
|
||||
---------------------------------
|
||||
|
||||
It's time to configure molecule to do what we need. We need to start an ubuntu docker container, so we need to specify that in the molecule YAML file. All we need to do is change the image line to specify that we want an ``ubuntu:bionic`` image.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
lint:
|
||||
name: yamllint
|
||||
platforms:
|
||||
- name: instance
|
||||
image: ubuntu:bionic
|
||||
provisioner:
|
||||
name: ansible
|
||||
lint:
|
||||
name: ansible-lint
|
||||
scenario:
|
||||
name: default
|
||||
verifier:
|
||||
name: testinfra
|
||||
lint:
|
||||
name: flake8
|
||||
|
||||
``molecule/default/playbook.yml``
|
||||
---------------------------------
|
||||
|
||||
This is the playbook that molecule will run. Make sure that you have all the steps that you need here. I will keep this as is.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
roles:
|
||||
- role: example-role
|
||||
|
||||
First Role Pass
|
||||
===============
|
||||
|
||||
This is time to test our role and see what's going on.
|
||||
|
||||
.. code:: text
|
||||
|
||||
(.ansible-role) ~/sandbox/test-roles/example-role/ $ molecule converge
|
||||
--> Validating schema /home/elijah/sandbox/test-roles/example-role/molecule/default/molecule.yml.
|
||||
Validation completed successfully.
|
||||
--> Test matrix
|
||||
|
||||
└── default
|
||||
├── dependency
|
||||
├── create
|
||||
├── prepare
|
||||
└── converge
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'dependency'
|
||||
Skipping, missing the requirements file.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'create'
|
||||
|
||||
PLAY [Create] ******************************************************************
|
||||
|
||||
TASK [Log into a Docker registry] **********************************************
|
||||
skipping: [localhost] => (item=None)
|
||||
|
||||
TASK [Create Dockerfiles from image names] *************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Discover local Docker images] ********************************************
|
||||
ok: [localhost] => (item=None)
|
||||
ok: [localhost]
|
||||
|
||||
TASK [Build an Ansible compatible image] ***************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Create docker network(s)] ************************************************
|
||||
|
||||
TASK [Create molecule instance(s)] *********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) creation to complete] *******************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=5 changed=4 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'prepare'
|
||||
Skipping, prepare playbook not configured.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'converge'
|
||||
|
||||
PLAY [Converge] ****************************************************************
|
||||
|
||||
TASK [Gathering Facts] *********************************************************
|
||||
ok: [instance]
|
||||
|
||||
TASK [example-role : Create 'example' user] ************************************
|
||||
changed: [instance]
|
||||
|
||||
TASK [example-role : Install nginx] ********************************************
|
||||
changed: [instance]
|
||||
|
||||
RUNNING HANDLER [example-role : Restart nginx] *********************************
|
||||
changed: [instance]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
instance : ok=4 changed=3 unreachable=0 failed=0
|
||||
|
||||
It looks like the **converge** step succeeded.
|
||||
|
||||
Writing Tests
|
||||
=============
|
||||
|
||||
It is always a good practice to write unittests when you're writing code. Ansible roles should not be an exception. Molecule offers a way to run tests, which you can think of as unittest, to make sure that what the role gives you is what you were expecting. This helps future development of the role and keeps you from falling in previously solved traps.
|
||||
|
||||
``molecule/default/tests/test_default.py``
|
||||
------------------------------------------
|
||||
|
||||
Molecule leverages the `testinfra <https://testinfra.readthedocs.io/en/latest/>`_ project to run its tests. You can use other tools if you so wish, and there are many. In this example we will be using *testinfra*.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import os
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
|
||||
def test_hosts_file(host):
|
||||
f = host.file('/etc/hosts')
|
||||
|
||||
assert f.exists
|
||||
assert f.user == 'root'
|
||||
assert f.group == 'root'
|
||||
|
||||
|
||||
def test_user_created(host):
|
||||
user = host.user("example")
|
||||
assert user.name == "example"
|
||||
assert user.home == "/home/example"
|
||||
|
||||
|
||||
def test_user_home_exists(host):
|
||||
user_home = host.file("/home/example")
|
||||
assert user_home.exists
|
||||
assert user_home.is_directory
|
||||
|
||||
|
||||
def test_nginx_is_installed(host):
|
||||
nginx = host.package("nginx")
|
||||
assert nginx.is_installed
|
||||
|
||||
|
||||
def test_nginx_running_and_enabled(host):
|
||||
nginx = host.service("nginx")
|
||||
assert nginx.is_running
|
||||
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Uncomment ``truthy: disable`` in ``.yamllint`` found at the base of the role.
|
||||
|
||||
.. code:: text
|
||||
|
||||
(.ansible_venv) ~/sandbox/test-roles/example-role $ molecule test
|
||||
--> Validating schema /home/elijah/sandbox/test-roles/example-role/molecule/default/molecule.yml.
|
||||
Validation completed successfully.
|
||||
--> Test matrix
|
||||
|
||||
└── default
|
||||
├── lint
|
||||
├── destroy
|
||||
├── dependency
|
||||
├── syntax
|
||||
├── create
|
||||
├── prepare
|
||||
├── converge
|
||||
├── idempotence
|
||||
├── side_effect
|
||||
├── verify
|
||||
└── destroy
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'lint'
|
||||
--> Executing Yamllint on files found in /home/elijah/sandbox/test-roles/example-role/...
|
||||
Lint completed successfully.
|
||||
--> Executing Flake8 on files found in /home/elijah/sandbox/test-roles/example-role/molecule/default/tests/...
|
||||
/home/elijah/.virtualenvs/world/lib/python3.7/site-packages/pycodestyle.py:113: FutureWarning: Possible nested set at position 1
|
||||
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
|
||||
Lint completed successfully.
|
||||
--> Executing Ansible Lint on /home/elijah/sandbox/test-roles/example-role/molecule/default/playbook.yml...
|
||||
Lint completed successfully.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'destroy'
|
||||
|
||||
PLAY [Destroy] *****************************************************************
|
||||
|
||||
TASK [Destroy molecule instance(s)] ********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) deletion to complete] *******************************
|
||||
ok: [localhost] => (item=None)
|
||||
ok: [localhost]
|
||||
|
||||
TASK [Delete docker network(s)] ************************************************
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=2 changed=1 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'dependency'
|
||||
Skipping, missing the requirements file.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'syntax'
|
||||
|
||||
playbook: /home/elijah/sandbox/test-roles/example-role/molecule/default/playbook.yml
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'create'
|
||||
|
||||
PLAY [Create] ******************************************************************
|
||||
|
||||
TASK [Log into a Docker registry] **********************************************
|
||||
skipping: [localhost] => (item=None)
|
||||
|
||||
TASK [Create Dockerfiles from image names] *************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Discover local Docker images] ********************************************
|
||||
ok: [localhost] => (item=None)
|
||||
ok: [localhost]
|
||||
|
||||
TASK [Build an Ansible compatible image] ***************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Create docker network(s)] ************************************************
|
||||
|
||||
TASK [Create molecule instance(s)] *********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) creation to complete] *******************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=5 changed=4 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'prepare'
|
||||
Skipping, prepare playbook not configured.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'converge'
|
||||
|
||||
PLAY [Converge] ****************************************************************
|
||||
|
||||
TASK [Gathering Facts] *********************************************************
|
||||
ok: [instance]
|
||||
|
||||
TASK [example-role : Create 'example' user] ************************************
|
||||
changed: [instance]
|
||||
|
||||
TASK [example-role : Install nginx] ********************************************
|
||||
changed: [instance]
|
||||
|
||||
RUNNING HANDLER [example-role : Restart nginx] *********************************
|
||||
changed: [instance]
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
instance : ok=4 changed=3 unreachable=0 failed=0
|
||||
|
||||
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'idempotence'
|
||||
Idempotence completed successfully.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'side_effect'
|
||||
Skipping, side effect playbook not configured.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'verify'
|
||||
--> Executing Testinfra tests found in /home/elijah/sandbox/test-roles/example-role/molecule/default/tests/...
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.7.1, pytest-4.1.0, py-1.7.0, pluggy-0.8.1
|
||||
rootdir: /home/elijah/sandbox/test-roles/example-role/molecule/default, inifile:
|
||||
plugins: testinfra-1.16.0
|
||||
collected 5 items
|
||||
|
||||
tests/test_default.py ..... [100%]
|
||||
|
||||
=============================== warnings summary ===============================
|
||||
|
||||
...
|
||||
|
||||
==================== 5 passed, 7 warnings in 27.37 seconds =====================
|
||||
Verifier completed successfully.
|
||||
--> Scenario: 'default'
|
||||
--> Action: 'destroy'
|
||||
|
||||
PLAY [Destroy] *****************************************************************
|
||||
|
||||
TASK [Destroy molecule instance(s)] ********************************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Wait for instance(s) deletion to complete] *******************************
|
||||
changed: [localhost] => (item=None)
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Delete docker network(s)] ************************************************
|
||||
|
||||
PLAY RECAP *********************************************************************
|
||||
localhost : ok=2 changed=2 unreachable=0 failed=0
|
||||
|
||||
I have a few warning messages (that's likely because I am using python 3.7 and some of the libraries still don't fully support the new standards released with it) but all my tests passed
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
Molecule is a great tool to test ansible roles quickly and while developing them. It also comes bundled with a bunch of other features from different projects that will test all aspects of your ansible code. I suggest you start using it when writing new ansible roles.
|
98
posts/irc/weechat-ssh-and-notification.org
Normal file
|
@ -0,0 +1,98 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Weechat, SSH and Notification
|
||||
.. date: 2019-01-01
|
||||
.. updated: 2019-07-02
|
||||
.. status: published
|
||||
.. tags: irc, ssh, weechat, notification,
|
||||
.. category: irc
|
||||
.. slug: weechat-ssh-and-notification
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A way to patch weechat notifications through your system's libnotify over ssh.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I have been on IRC for as long as I have been using /Linux/ and that is a long time. Throughout the years, I have moved between /terminal IRC/ clients. In this current iteration, I am using [[https://weechat.org/][Weechat]].
|
||||
|
||||
There are many ways one can use /weechat/ and the one I chose is to run it in /tmux/ on a /cloud server/. In other words, I have a /Linux/ server running on one of the many cloud providers on which I have /tmux/ and /weechat/ installed and configured the way I like them. If you run a setup like mine, then you might face the same issue I have with IRC notifications.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Why?
|
||||
/Weechat/ can cause a terminal bell which will show on some /terminals/ and /window managers/ as a notification. But you only know that /weechat/ pinged. Furthermore, if this is happening on a server that you are /ssh/'ing to, and with various shell configurations, this system might not even work. I wanted something more useful than that so I went on the hunt for the plugins available to see if any one of them could offer me a solution. I found many official plugins that did things in a similar fashion and each in a different and interesting way but none the way I want them to work.
|
||||
|
||||
* Solution
|
||||
After trying multiple solutions offered online which included various plugins, I decided to write my own. That's when /weenotify/ was born. If you know my background then you know, already, that I am big on open source so /weenotify/ was first released on [[https://gitlab.com/elazkani/weenotify][Gitlab]]. After a few changes, requested by a weechat developer (*FlashCode* in *#weechat* on [[https://freenode.net/][Freenode]]), /weenotify/ became as an [[https://weechat.org/scripts/source/weenotify.py.html/][official weechat plugin]].
|
||||
|
||||
* Weenotify
|
||||
Without getting into too many details, /weenotify/ acts as both a weechat plugin and a server. The main function is to intercept weechat notifications and patch them through the system's notification system. In simple terms, if someone mentions your name, you will get a pop-up notification on your system with information about that. The script can be configured to work locally, if you run weechat on your own machine, or to open a socket and send the notification to /weenotify/ running as a server. In the latter configuration, /weenotify/ will display the notification on the system the server is running on.
|
||||
|
||||
* Configuration
|
||||
Let's look at the configuration to accomplish this... As mentioned in the beginning of the post, I run weechat in /tmux/ on a server. So I /ssh/ to the server before attaching /tmux/. The safest way to do this is to *port forward over ssh* and this can be done easily by /ssh/'ing using the following example.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ ssh -R 5431:localhost:5431 server.example.com
|
||||
#+END_EXAMPLE
|
||||
|
||||
At this point, you should have port *5431* forwarded between the server and your machine.
|
||||
|
||||
Once the previous step is done, you can test if it works by trying to run the /weenotify/ script in server mode on your machine using the following command.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ python weenotify.py -s
|
||||
Starting server...
|
||||
Server listening locally on port 5431...
|
||||
#+END_EXAMPLE
|
||||
|
||||
The server is now running, you can test port forwarding from the server to make sure everything is working as expected.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ telnet localhost 5431
|
||||
Trying ::1...
|
||||
Connected to localhost.
|
||||
Escape character is '^]'.
|
||||
#+END_EXAMPLE
|
||||
|
||||
If the connection is successful then you know that port forwarding is working as expected. You can close the connection by hitting =Ctrl= + =]=.
|
||||
|
||||
Now we are ready to install the plugin in weechat and configure it. In weechat, run the following command.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
/script search weenotify
|
||||
#+END_EXAMPLE
|
||||
|
||||
At which point, you should be greeted with the buffer shown in the screenshot below.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/weechat-ssh-and-notification/01-weechat-weenotify.png" alt="weenotify" align="center">
|
||||
<img src="/images/weechat-ssh-and-notification/01-weechat-weenotify.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
You can install the plugin with =Alt= + =i= and make sure it autoloads with =Alt= + =A=.
|
||||
You can get more information about working with weechat scripts by reading the help menu.
|
||||
You can get the scripts help menu by running the following in weechat.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
/help script
|
||||
#+END_EXAMPLE
|
||||
|
||||
The /weenotify/ plugin is installed at this stage and only needs to be configured. The plugin has a list of values that can be configured. My configuration looks like the following.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
plugins.var.python.weenotify.enable string "on"
|
||||
plugins.var.python.weenotify.host string "localhost"
|
||||
plugins.var.python.weenotify.mode string "remote"
|
||||
plugins.var.python.weenotify.port string "5431"
|
||||
#+END_EXAMPLE
|
||||
|
||||
Each one of those configuration options can be set as shown in the example below in weechat.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
/set plugins.var.python.weenotify.enable on
|
||||
#+END_EXAMPLE
|
||||
|
||||
Make sure that the plugin *enable* value is *on* and that the *mode* is *remote*, if you're following this post and using ssh with port forwarding. Otherwise, If you want the plugin to work locally, make sure you set the *mode* to *local*.
|
||||
|
||||
If you followed this post so far, then whenever someone highlights you on weechat you should get a pop-up on your system notifying you about it.
|
|
@ -1,99 +0,0 @@
|
|||
.. title: Weechat, SSH and Notification
|
||||
.. date: 2019-01-01
|
||||
.. updated: 2019-07-02
|
||||
.. status: published
|
||||
.. tags: irc, ssh, weechat, notification,
|
||||
.. category: irc
|
||||
.. slug: weechat-ssh-and-notification
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A way to patch weechat notifications through your system's libnotify over ssh.
|
||||
.. type: text
|
||||
|
||||
|
||||
I have been on IRC for as long as I have been using *Linux* and that is a long time. Throughout the years, I have moved between *terminal IRC* clients. In this current iteration, I am using `Weechat <https://weechat.org/>`_.
|
||||
|
||||
There are many ways one can use *weechat* and the one I chose is to run it in *tmux* on a *cloud server*. In other words, I have a *Linux* server running on one of the many cloud providers on which I have *tmux* and *weechat* installed and configured the way I like them. If you run a setup like mine, then you might face the same issue I have with IRC notifications.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Why?
|
||||
====
|
||||
|
||||
*Weechat* can cause a terminal bell which will show on some *terminals* and *window managers* as a notification. But you only know that *weechat* pinged. Furthermore, if this is happening on a server that you are *ssh*'ing to, and with various shell configurations, this system might not even work. I wanted something more useful than that so I went on the hunt for the plugins available to see if any one of them could offer me a solution. I found many official plugins that did things in a similar fashion and each in a different and interesting way but none the way I want them to work.
|
||||
|
||||
Solution
|
||||
========
|
||||
|
||||
After trying multiple solutions offered online which included various plugins, I decided to write my own. That's when *weenotify* was born. If you know my background then you know, already, that I am big on open source so *weenotify* was first released on `Gitlab <https://gitlab.com/elazkani/weenotify>`_. After a few changes, requested by a weechat developer (**FlashCode** in **#weechat** on `Freenode <https://freenode.net/>`_), *weenotify* became as an `official weechat plugin <https://weechat.org/scripts/source/weenotify.py.html/>`_.
|
||||
|
||||
Weenotify
|
||||
=========
|
||||
|
||||
Without getting into too many details, *weenotify* acts as both a weechat plugin and a server. The main function is to intercept weechat notifications and patch them through the system's notification system. In simple terms, if someone mentions your name, you will get a pop-up notification on your system with information about that. The script can be configured to work locally, if you run weechat on your own machine, or to open a socket and send the notification to *weenotify* running as a server. In the latter configuration, *weenotify* will display the notification on the system the server is is running on.
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
Let's look at the configuration to accomplish this... As mentioned in the beginning of the post, I run weechat in *tmux* on a server. So I *ssh* to the server before attaching *tmux*. The safest way to do this is to **port forward over ssh** and this can be done easily by *ssh*'ing using the following example.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ ssh -R 5431:localhost:5431 server.example.com
|
||||
|
||||
At this point, you should have port **5431** forwarded between the server and your machine.
|
||||
|
||||
Once the previous step is done, you can test if it works by trying to run the *weenotify* script in server mode on your machine using the following command.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ python weenotify.py -s
|
||||
Starting server...
|
||||
Server listening locally on port 5431...
|
||||
|
||||
The server is now running, you can test port forwarding from the server to make sure everything is working as expected.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ telnet localhost 5431
|
||||
Trying ::1...
|
||||
Connected to localhost.
|
||||
Escape character is '^]'.
|
||||
|
||||
If the connection is successful then you know that port forwarding is working as expected. You can close the connection by hitting **Ctrl + ]**.
|
||||
|
||||
Now we are ready to install the plugin in weechat and configure it. In weechat, run the following command.
|
||||
|
||||
.. code:: text
|
||||
|
||||
/script search weenotify
|
||||
|
||||
At which point, you should be greeted with the buffer shown in the screenshot below.
|
||||
|
||||
.. thumbnail:: /images/weechat_ssh_and_notification/01-weechat_weenotify.png
|
||||
:align: center
|
||||
:alt: weenotify
|
||||
|
||||
You can install the plugin with **Alt + i** and make sure it autoloads with **Alt + A**. You can get more information about working with weechat scripts by reading the help menu. You can get the scripts help menu by running the following in weechat.
|
||||
|
||||
.. code:: text
|
||||
|
||||
/help script
|
||||
|
||||
The *weenotify* plugin is installed at this stage and only needs to be configured. The plugin has a list of values that can be configured. My configuration looks like the following.
|
||||
|
||||
.. code:: text
|
||||
|
||||
plugins.var.python.weenotify.enable string "on"
|
||||
plugins.var.python.weenotify.host string "localhost"
|
||||
plugins.var.python.weenotify.mode string "remote"
|
||||
plugins.var.python.weenotify.port string "5431"
|
||||
|
||||
Each one of those configuration options can be set as shown in the example below in weechat.
|
||||
|
||||
.. code:: text
|
||||
|
||||
/set plugins.var.python.weenotify.enable on
|
||||
|
||||
Make sure that the plugin **enable** value is **on** and that the **mode** is **remote**, if you're following this post and using ssh with port forwarding. Otherwise, If you want the plugin to work locally, make sure you set the **mode** to **local**.
|
||||
|
||||
If you followed this post so far, then whenever someone highlights you on weechat you should get a pop-up on your system notifying you about it.
|
163
posts/k3s/building-k3s-on-a-pi.org
Normal file
|
@ -0,0 +1,163 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Building k3s on a Pi
|
||||
.. date: 2020-08-09
|
||||
.. slug: building-k3s-on-a-pi
|
||||
.. updated: 2020-08-09
|
||||
.. status: published
|
||||
.. tags: kubernetes, k3s, arm
|
||||
.. category: k3s
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: I have decided to make a better use of my pi, k3s came next.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I have had a *Pi* laying around used for a simple task for a while now.
|
||||
A few days ago, I was browsing the web, learning more about privacy, when I stumbled upon [[https://adguard.com/en/welcome.html][AdGuard Home]].
|
||||
|
||||
I have been using it as my internal DNS on top of the security and privacy layers I add to my machine.
|
||||
Its benefits can be argued but it is a DNS after all and I wanted to see what else it can do for me.
|
||||
Anyway, I digress. I searched to see if I could find a container for *AdGuard Home* and I did.
|
||||
|
||||
At this point, I started thinking about what I could do to make the [[https://www.raspberrypi.org/][Pi]] more useful.
|
||||
|
||||
That's when [[https://k3s.io/][k3s]] came into the picture.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Pre-requisites
|
||||
As this is not a *Pi* tutorial, I am going to be assuming that you have a /Raspberry Pi/ with *Raspberry Pi OS* /Buster/ installed on it.
|
||||
The assumption does not mean you cannot install any other OS on the Pi and run this setup.
|
||||
It only means that I have tested this on /Buster/ and that your milage will vary.
|
||||
|
||||
* Prepare the Pi
|
||||
Now that you have /Buster/ already installed, let's go ahead and [[https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster][fix]] a small default configuration issue with it.
|
||||
|
||||
*K3s* uses =iptables= to route things around correctly. /Buster/ uses =nftables= by default, let's switch it to =iptables=.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ sudo iptables -F
|
||||
$ sudo update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
$ sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
|
||||
$ sudo reboot
|
||||
#+END_EXAMPLE
|
||||
|
||||
At this point, your /Pi/ should reboot. Your *OS* is configured for the next step.
|
||||
|
||||
* Pre-install Configuration
|
||||
After testing *k3s* a few times, I found out that by /default/ it will deploy a few extra services like [[https://docs.traefik.io/][Traefik]].
|
||||
|
||||
Unfortunately, just like anything the /default/ configuration is just that. It's plain and not very useful from the start. You will need to tweak it.
|
||||
|
||||
This step could be done either /post/ or /pre/ deploy. Figuring out the /pre-deploy/ is a bit more involving but a bit more fun as well.
|
||||
|
||||
The first thing you need to know is that the normal behavior of *k3s* is to deploy anything found in =/var/lib/rancher/k3s/server/manifests/=.
|
||||
So a good first step is, of course, to proceed with creating that.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ mkdir -p /var/lib/rancher/k3s/server/manifests/
|
||||
#+END_EXAMPLE
|
||||
|
||||
The other thing to know is that *k3s* can deploy /Helm Charts/.
|
||||
It will create the /manifests/ it will deploy by default, before beginning the setup, in the manifest path I mentioned.
|
||||
If you would like to see what it deployed and how, visit that path after *k3s* runs.
|
||||
I did, and I took their configuration of *Traefik* which I was unhappy with its /defaults/.
|
||||
|
||||
My next step was securing the /defaults/ as much as possible and I found out that *Traefik* can do [[https://docs.traefik.io/v2.0/middlewares/basicauth/][basic authentication]].
|
||||
As a starting point, that's great. Let's create the credentials.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ htpasswd -c ./auth myUser
|
||||
#+END_EXAMPLE
|
||||
|
||||
That was easy so far. Let's turn up the notch and create the manifest for *k3s*.
|
||||
|
||||
Create =traefik.yaml= in =/var/lib/rancher/k3s/server/manifests/= with the following content.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
chart: https://%{KUBERNETES_API}%/static/charts/traefik-1.81.0.tgz
|
||||
valuesContent: |-
|
||||
rbac:
|
||||
enabled: true
|
||||
ssl:
|
||||
enabled: true
|
||||
dashboard:
|
||||
enabled: true
|
||||
domain: traefik-ui.example.com
|
||||
auth:
|
||||
basic:
|
||||
myUser: $ars3$4A5tdstr$trSDDa4467Tsa54sTs.
|
||||
metrics:
|
||||
prometheus:
|
||||
enabled: false
|
||||
kubernetes:
|
||||
ingressEndpoint:
|
||||
useDefaultPublishedService: true
|
||||
image: "rancher/library-traefik"
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
#+END_SRC
|
||||
|
||||
It's a *Pi*, I don't need prometheus so I disabled it.
|
||||
I also enabled the dashboard and added the credentials we created in the previous step.
|
||||
|
||||
Now, the /Helm Chart/ will deploy an ingress and expose the dashboard for you on the value of =domain=.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
I figured out the values to set in =valuesContent= by reading the /Helm Chart/
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* K3s
|
||||
If everything is in place, you are ready to proceed.
|
||||
You can install *k3s*, now, but before I get to that step, I will say a few things about *k3s*.
|
||||
|
||||
*K3s* has a smaller feature set than *k8s*, hence the smaller footprint.
|
||||
Read the documentation to see if you need any of the missing features.
|
||||
The second thing to mention is that *k3s* is a one binary deploy that uses *containerd*.
|
||||
That's why we will use the script installation method as it adds the necessary *systemd* configuration for us.
|
||||
It is a nice gesture.
|
||||
|
||||
Let's do that, shall we ?
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ curl -sfL https://get.k3s.io | sh -s - --no-deploy traefik
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
We need to make sure that *k3s* does not deploy its own *traefik* but ours.
|
||||
Make sure to add =--no-deploy traefik= to our deployment command.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
Point =traefik.example.com= to your *Pi* =IP= in =/etc/hosts= on your machine.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
traefik.example.com 192.168.0.5
|
||||
#+END_EXAMPLE
|
||||
|
||||
When the installation command is done, you should be able to visit [[http://traefik.example.com/][http://traefik.example.com/]]
|
||||
|
||||
You can get the /kubeconfig/ from the /Raspberry Pi/, you can find it in =/etc/rancher/k3s/k3s.yaml=. You will need to change the =server= *IP*.
|
||||
|
||||
* Conclusion
|
||||
If you've made it so far, you should have a *k3s* cluster running on a single /Raspberry Pi/.
|
||||
The next steps you might want to look into is disable the /metrics/ server and use the resources for other things.
|
|
@ -1,162 +0,0 @@
|
|||
.. title: Building k3s on a Pi
|
||||
.. date: 2020-08-09
|
||||
.. slug: building-k3s-on-a-pi
|
||||
.. updated: 2020-08-09
|
||||
.. status: published
|
||||
.. tags: kubernetes, k3s, arm
|
||||
.. category: k3s
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: I have decided to make a better use of my pi, k3s came next.
|
||||
.. type: text
|
||||
|
||||
I have had a **Pi** laying around used for a simple task for a while now.
|
||||
A few days ago, I was browsing the web, learning more about privacy, when I stumbled upon `AdGuard Home <https://adguard.com/en/welcome.html>`_.
|
||||
|
||||
I have been using it as my internal DNS on top of the security and privacy layers I add to my machine.
|
||||
Its benefits can be argued but it is a DNS after all and I wanted to see what else it can do for me.
|
||||
Anyway, I digress. I searched to see if I could find a container for **AdGuard Home** and I did.
|
||||
|
||||
At this point, I started thinking about what I could do to make the `Pi <https://www.raspberrypi.org/>`_ more useful.
|
||||
|
||||
That's when `k3s <https://k3s.io/>`_ came into the picture.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Pre-requisites
|
||||
==============
|
||||
|
||||
As this is not a **Pi** tutorial, I am going to be assuming that you have a *Raspberry Pi* with **Raspberry Pi OS** *Buster* installed on it.
|
||||
The assumption does not mean you cannot install any other OS on the Pi and run this setup.
|
||||
It only means that I have tested this on *Buster* and that your milage will vary.
|
||||
|
||||
Prepare the Pi
|
||||
==============
|
||||
|
||||
Now that you have *Buster* already installed, let's go ahead and `fix <https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster>`_ a small default configuration issue with it.
|
||||
|
||||
**K3s** uses ``iptables`` to route things around correctly. *Buster* uses ``nftables`` by default, let's switch it to ``iptables``.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ sudo iptables -F
|
||||
$ sudo update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
$ sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
|
||||
$ sudo reboot
|
||||
|
||||
At this point, your *Pi* should reboot. Your **OS** is configured for the next step.
|
||||
|
||||
Pre-install Configuration
|
||||
=========================
|
||||
|
||||
After testing **k3s** a few times, I found out that by *default* it will deploy a few extra services like `Traefik <https://docs.traefik.io/>`_.
|
||||
|
||||
Unfortunately, just like anything the *default* configuration is just that. It's plain and not very useful from the start. You will need to tweak it.
|
||||
|
||||
This step could be done either *post* or *pre* deploy. Figuring out the *pre-deploy* is a bit more involving but a bit more fun as well.
|
||||
|
||||
The first thing you need to know is that the normal behavior of **k3s** is to deploy anything found in ``/var/lib/rancher/k3s/server/manifests/``.
|
||||
So a good first step is, of course, to proceed with creating that.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ mkdir -p /var/lib/rancher/k3s/server/manifests/
|
||||
|
||||
The other thing to know is that **k3s** can deploy *Helm Charts*.
|
||||
It will create the *manifests* it will deploy by default, before beginning the setup, in the manifest path I mentioned.
|
||||
If you would like to see what it deployed and how, visit that path after **k3s** runs.
|
||||
I did, and I took their configuration of **Traefik** which I was unhappy with its *defaults*.
|
||||
|
||||
My next step was securing the *defaults* as much as possible and I found out that **Traefik** can do `basic authentication <https://docs.traefik.io/v2.0/middlewares/basicauth/>`_.
|
||||
As a starting point, that's great. Let's create the credentials.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ htpasswd -c ./auth myUser
|
||||
|
||||
That was easy so far. Let's turn up the notch and create the manifest for **k3s**.
|
||||
|
||||
Create ``traefik.yaml`` in ``/var/lib/rancher/k3s/server/manifests/`` with the following content.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
chart: https://%{KUBERNETES_API}%/static/charts/traefik-1.81.0.tgz
|
||||
valuesContent: |-
|
||||
rbac:
|
||||
enabled: true
|
||||
ssl:
|
||||
enabled: true
|
||||
dashboard:
|
||||
enabled: true
|
||||
domain: traefik-ui.example.com
|
||||
auth:
|
||||
basic:
|
||||
myUser: $ars3$4A5tdstr$trSDDa4467Tsa54sTs.
|
||||
metrics:
|
||||
prometheus:
|
||||
enabled: false
|
||||
kubernetes:
|
||||
ingressEndpoint:
|
||||
useDefaultPublishedService: true
|
||||
image: "rancher/library-traefik"
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
It's a **Pi**, I don't need prometheus so I disabled it.
|
||||
I also enabled the dashboard and added the credentials we created in the previous step.
|
||||
|
||||
Now, the *Helm Chart* will deploy an ingress and expose the dashboard for you on the value of ``domain``.
|
||||
|
||||
.. note::
|
||||
|
||||
I figured out the values to set in ``valuesContent`` by reading the *Helm Chart*
|
||||
|
||||
K3s
|
||||
===
|
||||
|
||||
If everything is in place, you are ready to proceed.
|
||||
You can install **k3s**, now, but before I get to that step, I will say a few things about **k3s**.
|
||||
|
||||
**K3s** has a smaller feature set than **k8s**, hence the smaller footprint.
|
||||
Read the documentation to see if you need any of the missing features.
|
||||
The second thing to mention is that **k3s** is a one binary deploy that uses **containerd**.
|
||||
That's why we will use the script installation method as it adds the necessary **systemd** configuration for us.
|
||||
It is a nice gesture.
|
||||
|
||||
Let's do that, shall we ?
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ curl -sfL https://get.k3s.io | sh -s - --no-deploy traefik
|
||||
|
||||
.. note::
|
||||
|
||||
We need to make sure that **k3s** does not deploy its own **traefik** but ours.
|
||||
Make sure to add ``--no-deploy traefik`` to our deployment command.
|
||||
|
||||
Point ``traefik.example.com`` to your **Pi** ``IP`` in ``/etc/hosts`` on your machine.
|
||||
|
||||
.. code:: text
|
||||
|
||||
traefik.example.com 192.168.0.5
|
||||
|
||||
When the installation command is done, you should be able to visit `http://traefik.example.com/ <http://traefik.example.com/>`_
|
||||
|
||||
You can get the *kubeconfig* from the *Raspberry Pi*, you can find it in ``/etc/rancher/k3s/k3s.yaml``.
|
||||
You will need to change the ``server`` **IP**.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
If you've made it so far, you should have a **k3s** cluster running on a single *Raspberry Pi*.
|
||||
The next steps you might want to look into is disable the *metrics* server and use the resources for other things.
|
144
posts/kubernetes/deploying-helm-in-your-kubernetes-cluster.org
Normal file
|
@ -0,0 +1,144 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Deploying Helm in your Kubernetes Cluster
|
||||
.. date: 2019-03-16
|
||||
.. updated: 2019-07-02
|
||||
.. status: published
|
||||
.. tags: kubernetes, helm, tiller,
|
||||
.. category: kubernetes
|
||||
.. slug: deploying-helm-in-your-kubernetes-cluster
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Post explaining how to deploy helm in your kubernetes cluster.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In the previous post in the /kubernetes/ series, we deployed a small /kubernetes/ cluster locally on /KVM/. In future posts we will be deploying more things into the cluster. This will enable us to test different projects, ingresses, service meshes, and more from the open source community, build specifically for /kubernetes/. To help with this future quest, we will be leveraging a kubernetes package manager. You've read it right, helm is a kubernetes package manager. Let's get started shall we ?
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Helm
|
||||
As mentioned above, helm is a kubernetes package manager. You can read more about the helm project on their [[https://helm.sh/][homepage]]. It offers a way to Go template the deployments of service and package them into a portable package that can be installed using the helm command line.
|
||||
|
||||
Generally, you would install the helm binary on your machine and install it into the cluster. In our case, the /RBACs/ deployed in the kubernetes cluster by rancher prevent the default installation from working. Not a problem, we can go around the problem and we will in this post. This is a win for us because this will give us the opportunity to learn more about helm and kubernetes.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
This is not a production recommended way to deploy helm. I would *NOT* deploy helm this way on a production cluster. I would restrict the permissions of any =ServiceAccount= deployed in the cluster to its bare minimum requirements.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* What are we going to do ?
|
||||
We need to understand a bit of what's going on and what we are trying to do. To be able to do that, we need to understand how /helm/ works. From a high level, the =helm= command line tool will deploy a service called /Tiller/ as a =Deployment=.
|
||||
|
||||
The /Tiller/ service talks to the /kubernetes/ /API/ and manages the deployment process while the =helm= command line tool talks to /Tiller/ from its end. So a proper deployment of /Tiller/ in a /kubernetes/ sense is to create a =ServiceAccount=, give the =ServiceAccount= the proper permissions to be able to do what it needs to do and you got yourself a working /Tiller/.
|
||||
|
||||
* Service Account
|
||||
This is where we start by creating a =ServiceAccount=. The =ServiceAccount= looks like this.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: tiller
|
||||
namespace: kube-system
|
||||
#+END_SRC
|
||||
|
||||
We de deploy the =ServiceAccount= to the cluster. Save it to =ServiceAccount.yaml=.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl apply -f ServiceAccount.yaml
|
||||
serviceaccount/tiller created
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
To read more about =ServiceAccount= and their uses please visit the /kubernetes/ documentation page on the [[https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/][topic]].
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* Cluster Role Binding
|
||||
We have /Tiller/ (=ServiceAccount=) deployed in =kube-system= (=namespace=). We need to give it access.
|
||||
|
||||
** Option 1
|
||||
We have the option of either creating a =Role= which would restrict /Tiller/ to the current =namespace=, then tie them together with a =RoleBinding=.
|
||||
|
||||
This option will restrict /Tiller/ to that =namespace= and that =namespace= only.
|
||||
|
||||
** Option 2
|
||||
Another option is to create a =ClusterRole= and tie the =ServiceAccount= to that =ClusterRole= with a =ClusterRoleBinding= and this will give /Tiller/ access across /namespaces/.
|
||||
|
||||
** Option 3
|
||||
In our case, we already know that =ClustRole= =cluster-admin= already exists in the cluster so we are going to give /Tiller/ =cluster-admin= access.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tiller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tiller
|
||||
namespace: kube-system
|
||||
#+END_SRC
|
||||
|
||||
Save the following in =ClusterRoleBinding.yaml= and then
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl apply -f ClusterRoleBinding.yaml
|
||||
clusterrolebinding.rbac.authorization.k8s.io/tiller created
|
||||
#+END_EXAMPLE
|
||||
|
||||
* Deploying Tiller
|
||||
Now that we have all the basics deployed, we can finally deploy /Tiller/ in the cluster.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ helm init --service-account tiller --tiller-namespace kube-system --history-max 10
|
||||
Creating ~/.helm
|
||||
Creating ~/.helm/repository
|
||||
Creating ~/.helm/repository/cache
|
||||
Creating ~/.helm/repository/local
|
||||
Creating ~/.helm/plugins
|
||||
Creating ~/.helm/starters
|
||||
Creating ~/.helm/cache/archive
|
||||
Creating ~/.helm/repository/repositories.yaml
|
||||
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
|
||||
Adding local repo with URL: http://127.0.0.1:8879/charts
|
||||
$HELM_HOME has been configured at ~/.helm.
|
||||
|
||||
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
|
||||
|
||||
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
|
||||
To prevent this, run `helm init` with the --tiller-tls-verify flag.
|
||||
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
|
||||
Happy Helming!
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
Please make sure you read the helm installation documentation if you are deploying this in a production environment. You can find how you can make it more secure [[https://helm.sh/docs/using_helm/#securing-your-helm-installation][there]].
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
After a few minutes, your /Tiller/ deployment or as it's commonly known as a =helm install= or a =helm init=. If you want to check that everything has been deployed properly you can run.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ helm version
|
||||
Client: &version.Version{SemVer:"v2.13.0", GitCommit:"79d07943b03aea2b76c12644b4b54733bc5958d6", GitTreeState:"clean"}
|
||||
Server: &version.Version{SemVer:"v2.13.0", GitCommit:"79d07943b03aea2b76c12644b4b54733bc5958d6", GitTreeState:"clean"}
|
||||
#+END_EXAMPLE
|
||||
|
||||
Everything seems to be working properly. In future posts, we will be leveraging the power and convenience of helm to expand our cluster's capabilities and learn more about what we can do with kubernetes.
|
|
@ -1,145 +0,0 @@
|
|||
.. title: Deploying Helm in your Kubernetes Cluster
|
||||
.. date: 2019-03-16
|
||||
.. updated: 2019-07-02
|
||||
.. status: published
|
||||
.. tags: kubernetes, helm, tiller,
|
||||
.. category: kubernetes
|
||||
.. slug: deploying-helm-in-your-kubernetes-cluster
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Post explaining how to deploy helm in your kubernetes cluster.
|
||||
.. type: text
|
||||
|
||||
|
||||
In the previous post in the *kubernetes* series, we deployed a small *kubernetes* cluster locally on *KVM*. In future posts we will be deploying more things into the cluster. This will enable us to test different projects, ingresses, service meshes, and more from the open source community, build specifically for *kubernetes*. To help with this future quest, we will be leveraging a kubernetes package manager. You've read it right, helm is a kubernetes package manager. Let's get started shall we ?
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Helm
|
||||
====
|
||||
|
||||
As mentioned above, helm is a kubernetes package manager. You can read more about the helm project on their `homepage <https://helm.sh/>`_. It offers a way to Go template the deployments of service and package them into a portable package that can be installed using the helm command line.
|
||||
|
||||
Generally, you would install the helm binary on your machine and install it into the cluster. In our case, the *RBACs* deployed in the kubernetes cluster by rancher prevent the default installation from working. Not a problem, we can go around the problem and we will in this post. This is a win for us because this will give us the opportunity to learn more about helm and kubernetes.
|
||||
|
||||
.. note::
|
||||
|
||||
This is not a production recommended way to deploy helm. I would **NOT** deploy helm this way on a production cluster. I would restrict the permissions of any ``ServiceAccount`` deployed in the cluster to its bare minimum requirements.
|
||||
|
||||
What are we going to do ?
|
||||
=========================
|
||||
|
||||
We need to understand a bit of what's going on and what we are trying to do. To be able to do that, we need to understand how *helm* works. From a high level, the ``helm`` command line tool will deploy a service called *Tiller* as a ``Deployment``.
|
||||
|
||||
The *Tiller* service talks to the *kubernetes* *API* and manages the deployment process while the ``helm`` command line tool talks to *Tiller* from its end. So a proper deployment of *Tiller* in a *kubernetes* sense is to create a ``ServiceAccount``, give the ``ServiceAccount`` the proper permissions to be able to do what it needs to do and you got yourself a working *Tiller*.
|
||||
|
||||
Service Account
|
||||
===============
|
||||
|
||||
This is where we start by creating a ``ServiceAccount``. The ``ServiceAccount`` looks like this.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: tiller
|
||||
namespace: kube-system
|
||||
|
||||
We de deploy the ``ServiceAccount`` to the cluster. Save it to ``ServiceAccount.yaml``.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl apply -f ServiceAccount.yaml
|
||||
serviceaccount/tiller created
|
||||
|
||||
.. note::
|
||||
|
||||
To read more about ``ServiceAccount`` and their uses please visit the *kubernetes* documentation page on the `topic <https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/>`_.
|
||||
|
||||
Cluster Role Binding
|
||||
====================
|
||||
|
||||
We have *Tiller* (``ServiceAccount``) deployed in ``kube-system`` (``namespace``). We need to give it access.
|
||||
|
||||
Option 1
|
||||
--------
|
||||
|
||||
We have the option of either creating a ``Role`` which would restrict *Tiller* to the current ``namespace``, then tie them together with a ``RoleBinding``.
|
||||
|
||||
This option will restrict *Tiller* to that ``namespace`` and that ``namespace`` only.
|
||||
|
||||
Option 2
|
||||
--------
|
||||
|
||||
Another option is to create a ``ClusterRole`` and tie the ``ServiceAccount`` to that ``ClusterRole`` with a ``ClusterRoleBinding`` and this will give *Tiller* access across *namespaces*.
|
||||
|
||||
Option 3
|
||||
--------
|
||||
|
||||
In our case, we already know that ``ClustRole`` ``cluster-admin`` already exists in the cluster so we are going to give *Tiller* ``cluster-admin`` access.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tiller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tiller
|
||||
namespace: kube-system
|
||||
|
||||
Save the following in ``ClusterRoleBinding.yaml`` and then
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl apply -f ClusterRoleBinding.yaml
|
||||
clusterrolebinding.rbac.authorization.k8s.io/tiller created
|
||||
|
||||
|
||||
Deploying Tiller
|
||||
================
|
||||
|
||||
Now that we have all the basics deployed, we can finally deploy *Tiller* in the cluster.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ helm init --service-account tiller --tiller-namespace kube-system --history-max 10
|
||||
Creating ~/.helm
|
||||
Creating ~/.helm/repository
|
||||
Creating ~/.helm/repository/cache
|
||||
Creating ~/.helm/repository/local
|
||||
Creating ~/.helm/plugins
|
||||
Creating ~/.helm/starters
|
||||
Creating ~/.helm/cache/archive
|
||||
Creating ~/.helm/repository/repositories.yaml
|
||||
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
|
||||
Adding local repo with URL: http://127.0.0.1:8879/charts
|
||||
$HELM_HOME has been configured at ~/.helm.
|
||||
|
||||
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
|
||||
|
||||
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
|
||||
To prevent this, run `helm init` with the --tiller-tls-verify flag.
|
||||
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
|
||||
Happy Helming!
|
||||
|
||||
.. note::
|
||||
|
||||
Please make sure you read the helm installation documentation if you are deploying this in a production environment. You can find how you can make it more secure `there <https://helm.sh/docs/using_helm/#securing-your-helm-installation>`_.
|
||||
|
||||
After a few minutes, your *Tiller* deployment or as it's commonly known as a ``helm install`` or a ``helm init``. If you want to check that everything has been deployed properly you can run.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ helm version
|
||||
Client: &version.Version{SemVer:"v2.13.0", GitCommit:"79d07943b03aea2b76c12644b4b54733bc5958d6", GitTreeState:"clean"}
|
||||
Server: &version.Version{SemVer:"v2.13.0", GitCommit:"79d07943b03aea2b76c12644b4b54733bc5958d6", GitTreeState:"clean"}
|
||||
|
||||
Everything seems to be working properly. In future posts, we will be leveraging the power and convenience of helm to expand our cluster's capabilities and learn more about what we can do with kubernetes.
|
229
posts/kubernetes/local-kubernetes-cluster-on-kvm.org
Normal file
|
@ -0,0 +1,229 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Local Kubernetes Cluster on KVM
|
||||
.. date: 2019-02-17
|
||||
.. updated: 2019-06-21
|
||||
.. status: published
|
||||
.. tags: kubernetes, rancher, rancheros, kvm, libvirt,
|
||||
.. category: kubernetes
|
||||
.. slug: local-kubernetes-cluster-on-kvm
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Deploying a kubernetes cluster locally on KVM.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I wanted to explore /kubernetes/ even more for myself and for this blog. I've worked on pieces of this at work but not the totality of the work which I would like to understand for myself. I wanted, also to explore new tools and ways to leverage the power of /kubernetes/.
|
||||
|
||||
So far, I have been using /minikube/ to do the deployments but there is an inherit restriction that comes with using a single bundled node. Sure, it is easy to get it up and running but at some point I had to use =nodePort= to go around the IP restriction. This is a restriction that you will have in an actual /kubernetes/ cluster but I will show you later how to go around it. For now, let's just get a local cluster up and running.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Objective
|
||||
I needed a local /kubernetes/ cluster using all open source tools and easy to deploy. So I went with using /KVM/ as the hypervisor layer and installed =virt-manager= for shallow management. As an OS, I wanted something light and made for /kubernetes/. As I already know of Rancher (being an easy way to deploy /kubernetes/ and they have done a great job so far since the launch of their Rancer 2.0) I decided to try /RancherOS/. So let's see how all that works together.
|
||||
|
||||
* Requirements
|
||||
Let's start by thinking about what we actually need. Rancher, the dashboard they offer is going to need a VM by itself and they [[https://rancher.com/docs/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/][recommend]] /4GB of RAM/. I only have /16GB of RAM/ on my machine so I'll have to do the math to see how much I can afford to give this /dashboard/ and /manager/. By looking at the /RancherOS/ hardware [[https://rancher.com/docs/os/v1.x/en/][requirements]], I can tell that by giving a each node /2GB/ of RAM I should be able to host a /3 node cluster/ and with /2/ more for the /dashboard/ that puts me right on /8GB of RAM/. So we need to create /4 VMs/ with /2GB of RAM/ each.
|
||||
|
||||
* Installing RancherOS
|
||||
Once all 4 nodes have been created, when you boot into the /RancherOS/ [[https://rancher.com/docs/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/][ISO]] do the following.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
Because I was using /libvirt/, I was able to do =virsh console <vm>= and
|
||||
run these commands.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* Virsh Console
|
||||
If you are running these VMs on /libvirt/, then you can console into the box and run =vi=.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# virsh list
|
||||
Id Name State
|
||||
-------------------------
|
||||
21 kube01 running
|
||||
22 kube02 running
|
||||
23 kube03 running
|
||||
24 rancher running
|
||||
|
||||
# virsh console rancher
|
||||
#+END_EXAMPLE
|
||||
|
||||
* Configuration
|
||||
If you read the /RancherOS/ [[https://rancher.com/docs/os/v1.x/en/][documentation]], you'll find out that you can configure the /OS/ with a =YAML= configuration file so let's do that.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ vi cloud-config.yml
|
||||
#+END_EXAMPLE
|
||||
|
||||
And that file should hold.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
hostname: rancher.kube.loco
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAA...
|
||||
rancher:
|
||||
network:
|
||||
interfaces:
|
||||
eth0:
|
||||
address: 192.168.122.5/24
|
||||
dhcp: false
|
||||
gateway: 192.168.122.1
|
||||
mtu: 1500
|
||||
#+END_SRC
|
||||
|
||||
Make sure that your *public* /ssh key/ is replaced in the example before and if you have a different network configuration for your VMs, change the network configuration here.
|
||||
|
||||
After you save that file, install the /OS/.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ sudo ros install -c cloud-config.yml -d /dev/sda
|
||||
#+END_EXAMPLE
|
||||
|
||||
Do the same for the rest of the servers and their names and IPs should be as follows (if you are following this tutorial):
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
192.168.122.5 rancher.kube.loco
|
||||
192.168.122.10 kube01.kube.loco
|
||||
192.168.122.11 kube02.kube.loco
|
||||
192.168.122.12 kube03.kube.loco
|
||||
#+END_EXAMPLE
|
||||
|
||||
* Post Installation Configuration
|
||||
After /RancherOS/ has been installed, one will need to configure =/etc/hosts= and it should look like the following if one is working off of the /Rancher/ box.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ sudo vi /etc/hosts
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
127.0.0.1 rancher.kube.loco
|
||||
192.168.122.5 rancher.kube.loco
|
||||
192.168.122.10 kube01.kube.loco
|
||||
192.168.122.11 kube02.kube.loco
|
||||
192.168.122.12 kube03.kube.loco
|
||||
#+END_EXAMPLE
|
||||
|
||||
Do the same on the rest of the servers while changing the =127.0.0.1= hostname to the host of the server.
|
||||
|
||||
* Installing Rancher
|
||||
At this point, I have to stress a few facts:
|
||||
|
||||
- This is not the Rancher recommended way to deploy /kubernetes/.
|
||||
- The recommended way is of course [[https://rancher.com/docs/rke/v0.1.x/en/][RKE]].
|
||||
- This is for testing, so I did not take into consideration backup of anything.
|
||||
- There are ways to backup Rancher configuration by mounting storage from the =rancher= docker container.
|
||||
|
||||
If those points are understood, let's go ahead and deploy Rancher.
|
||||
First, =$ ssh rancher@192.168.122.5= then:
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
[rancher@rancher ~]$ docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher
|
||||
#+END_EXAMPLE
|
||||
|
||||
Give it a few minutes for the container to come up and the application as well. Meanwhile configure your =/etc/hosts= file on your machine.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
192.168.122.5 rancher.kube.loco
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now that all that is out of the way, you can login to [[https://rancher.kube.loco]] and set your =admin= password and the =url= for Rancher.
|
||||
|
||||
* Deploying Kubernetes
|
||||
Now that everything is ready, let's deploy /kubernetes/ the easy way.
|
||||
|
||||
At this point you should be greeted with a page that looks like the
|
||||
following.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/01-add-cluster.png" alt="Add Cluster Page" align="center">
|
||||
<img src="/images/local-kubernetes-cluster-on-kvm/01-add-cluster.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
Click on the *Add Cluser*
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/02-custom-cluster.png" alt="Custom Cluster Page" align="center">
|
||||
<img src="
|
||||
/images/local-kubernetes-cluster-on-kvm/02-custom-cluster.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
Make sure you choose *Custom* as a /provider/. Then fill in the *Cluser Name* in our case we'll call it *kube*.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/03-calico-networkProvider.png" alt="Network Provider: Calico (Optional)">
|
||||
<img src="/images/local-kubernetes-cluster-on-kvm/03-calico-networkProvider.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
Optionally, you can choose your *Network Providor*, in my case I chose *Calico*. Then I clicked on *show advanced* at the bottom right corner then expanded the /newly shown tab/ *Advanced Cluster Options*.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/04-nginx-ingressDisabled.png" alt="Nginx Ingress: Disabled" align="center">
|
||||
<img src="/images/local-kubernetes-cluster-on-kvm/04-nginx-ingressDisabled.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
We will disable the *Nginx Ingress* and the *Pod Security Policy Support* for the time being. This will become more apparent why in the future, hopefully. Then hit *Next*.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/05-customize-nodes.png" alt="Customize Nodes" align="center">
|
||||
<img src="/images/local-kubernetes-cluster-on-kvm/05-customize-nodes.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
Make sure that you select all *3 Node Roles*. Set the *Public Address* and the *Node Name* to the first node and then copy the command and paste it on the /first/ node.
|
||||
|
||||
Do the same for /all the rest/. Once the first docker image gets downloaded and ran you should see a message pop at the bottom.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/06-registered-nodes.png" alt="Registered Nodes" align="center">
|
||||
<img src="/images/local-kubernetes-cluster-on-kvm/06-registered-nodes.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition warning">
|
||||
<p class="admonition-title">warning</p>
|
||||
#+END_EXPORT
|
||||
Do *NOT* click /done/ until you see all /3 nodes registered/.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
* Finalizing
|
||||
Now that you have /3 registered nodes/, click *Done* and go grab yourself a cup of coffee. Maybe take a long walk, this will take time. Or if you are curious like me, you'd be looking at the logs, checking the containers in a quad pane =tmux= session.
|
||||
|
||||
After a long time has passed, our story ends with a refresh and a welcome with this page.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/local-kubernetes-cluster-on-kvm/07-kubernetes-cluster.png" alt="Kubernetes Cluster" align="center">
|
||||
<img src="/images/local-kubernetes-cluster-on-kvm/07-kubernetes-cluster.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
Welcome to your Kubernetes Cluster.
|
||||
|
||||
* Conclusion
|
||||
At this point, you can check that all the nodes are healthy and you got yourself a kubernetes cluster. In future blog posts we will explore an avenue to deploy /multiple ingress controllers/ on the same cluster on the same =port: 80= by giving them each an IP external to the cluster.
|
||||
|
||||
But for now, you got yourself a kubernetes cluster to play with. Enjoy.
|
|
@ -1,224 +0,0 @@
|
|||
.. title: Local Kubernetes Cluster on KVM
|
||||
.. date: 2019-02-17
|
||||
.. updated: 2019-06-21
|
||||
.. status: published
|
||||
.. tags: kubernetes, rancher, rancheros, kvm, libvirt,
|
||||
.. category: kubernetes
|
||||
.. slug: local-kubernetes-cluster-on-kvm
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Deploying a kubernetes cluster locally on KVM.
|
||||
.. type: text
|
||||
|
||||
|
||||
I wanted to explore *kubernetes* even more for myself and for this blog. I've worked on pieces of this at work but not the totality of the work which I would like to understand for myself. I wanted, also to explore new tools and ways to leverage the power of *kubernetes*.
|
||||
|
||||
So far, I have been using *minikube* to do the deployments but there is an inherit restriction that comes with using a single bundled node. Sure, it is easy to get it up and running but at some point I had to use ``nodePort`` to go around the IP restriction. This is a restriction that you will have in an actual *kubernetes* cluster but I will show you later how to go around it. For now, let's just get a local cluster up and running.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Objective
|
||||
=========
|
||||
|
||||
I needed a local *kubernetes* cluster using all open source tools and easy to deploy. So I went with using *KVM* as the hypervisor layer and installed ``virt-manager`` for shallow management. As an OS, I wanted something light and made for *kubernetes*. As I already know of Rancher (being an easy way to deploy *kubernetes* and they have done a great job so far since the launch of their Rancer 2.0) I decided to try *RancherOS*. So let's see how all that works together.
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
Let's start by thinking about what we actually need. Rancher, the dashboard they offer is going to need a VM by itself and they `recommend <https://rancher.com/docs/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/>`_ *4GB of RAM*. I only have *16GB of RAM* on my machine so I'll have to do the math to see how much I can afford to give this *dashboard* and *manager*. By looking at the *RancherOS* hardware `requirements <https://rancher.com/docs/os/v1.x/en/>`_, I can tell that by giving a each node *2GB* of RAM I should be able to host a *3 node cluster* and with *2* more for the *dashboard* that puts me right on *8GB of RAM*. So we need to create *4 VMs* with *2GB of RAM* each.
|
||||
|
||||
Installing RancherOS
|
||||
====================
|
||||
|
||||
Once all 4 nodes have been created, when you boot into the *RancherOS* `ISO <https://rancher.com/docs/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/>`_ do the following.
|
||||
|
||||
.. note::
|
||||
|
||||
Because I was using *libvirt*, I was able to do ``virsh console <vm>`` and run these commands.
|
||||
|
||||
Virsh Console
|
||||
=============
|
||||
|
||||
If you are running these VMs on *libvirt*, then you can console into the box and run ``vi``.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# virsh list
|
||||
Id Name State
|
||||
-------------------------
|
||||
21 kube01 running
|
||||
22 kube02 running
|
||||
23 kube03 running
|
||||
24 rancher running
|
||||
|
||||
# virsh console rancher
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
If you read the *RancherOS* `documentation <https://rancher.com/docs/os/v1.x/en/>`_, you'll find out that you can configure the *OS* with a ``YAML`` configuration file so let's do that.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ vi cloud-config.yml
|
||||
|
||||
And that file should hold.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
hostname: rancher.kube.loco
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAA...
|
||||
rancher:
|
||||
network:
|
||||
interfaces:
|
||||
eth0:
|
||||
address: 192.168.122.5/24
|
||||
dhcp: false
|
||||
gateway: 192.168.122.1
|
||||
mtu: 1500
|
||||
|
||||
Make sure that your **public** *ssh key* is replaced in the example before and if you have a different network configuration for your VMs, change the network configuration here.
|
||||
|
||||
After you save that file, install the *OS*.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ sudo ros install -c cloud-config.yml -d /dev/sda
|
||||
|
||||
Do the same for the rest of the servers and their names and IPs should be as follows (if you are following this tutorial):
|
||||
|
||||
.. code:: text
|
||||
|
||||
192.168.122.5 rancher.kube.loco
|
||||
192.168.122.10 kube01.kube.loco
|
||||
192.168.122.11 kube02.kube.loco
|
||||
192.168.122.12 kube03.kube.loco
|
||||
|
||||
Post Installation Configuration
|
||||
===============================
|
||||
|
||||
After *RancherOS* has been installed, one will need to configure ``/etc/hosts`` and it should look like the following if one is working off of the *Rancher* box.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ sudo vi /etc/hosts
|
||||
|
||||
.. code:: text
|
||||
|
||||
127.0.0.1 rancher.kube.loco
|
||||
192.168.122.5 rancher.kube.loco
|
||||
192.168.122.10 kube01.kube.loco
|
||||
192.168.122.11 kube02.kube.loco
|
||||
192.168.122.12 kube03.kube.loco
|
||||
|
||||
Do the same on the rest of the servers while changing the ``127.0.0.1`` hostname to the host of the server.
|
||||
|
||||
Installing Rancher
|
||||
==================
|
||||
|
||||
At this point, I have to stress a few facts:
|
||||
|
||||
- This is not the Rancher recommended way to deploy *kubernetes*.
|
||||
|
||||
- The recommended way is of course `RKE <https://rancher.com/docs/rke/v0.1.x/en/>`_.
|
||||
|
||||
- This is for testing, so I did not take into consideration backup of anything.
|
||||
|
||||
- There are ways to backup Rancher configuration by mounting storage from the ``rancher`` docker container.
|
||||
|
||||
If those points are understood, let's go ahead and deploy Rancher.
|
||||
First, ``$ ssh rancher@192.168.122.5`` then:
|
||||
|
||||
.. code:: text
|
||||
|
||||
[rancher@rancher ~]$ docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher
|
||||
|
||||
Give it a few minutes for the container to come up and the application as well. Meanwhile configure your ``/etc/hosts`` file on your machine.
|
||||
|
||||
.. code:: text
|
||||
|
||||
192.168.122.5 rancher.kube.loco
|
||||
|
||||
Now that all that is out of the way, you can login to https://rancher.kube.loco and set your ``admin`` password and the ``url`` for Rancher.
|
||||
|
||||
Deploying Kubernetes
|
||||
====================
|
||||
|
||||
Now that everything is ready, let's deploy *kubernetes* the easy way.
|
||||
|
||||
At this point you should be greeted with a page that looks like the following.
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/01-add_cluster.png
|
||||
:align: center
|
||||
:alt: Add Cluster Page
|
||||
|
||||
|
||||
Click on the **Add Cluser**
|
||||
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/02-custom_cluster.png
|
||||
:align: center
|
||||
:alt: Custom Cluster Page
|
||||
|
||||
|
||||
Make sure you choose **Custom** as a *provider*. Then fill in the **Cluser Name** in our case we'll call it **kube**.
|
||||
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/03-calico_networkProvider.png
|
||||
:align: center
|
||||
:alt: Network Provider: Calico (Optional)
|
||||
|
||||
|
||||
Optionally, you can choose your **Network Providor**, in my case I chose **Calico**. Then I clicked on **show advanced** at the bottom right corner then expanded the *newly shown tab* **Advanced Cluster Options**.
|
||||
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/04-nginx_ingressDisabled.png
|
||||
:align: center
|
||||
:alt: Nginx Ingress: Disabled
|
||||
|
||||
|
||||
We will disable the **Nginx Ingress** and the **Pod Security Policy Support** for the time being. This will become more apparent why in the future, hopefully. Then hit **Next**.
|
||||
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/05-customer_nodes.png
|
||||
:align: center
|
||||
:alt: Customize Nodes
|
||||
|
||||
|
||||
Make sure that you select all **3 Node Roles**. Set the **Public Address** and the **Node Name** to the first node and then copy the command and paste it on the *first* node.
|
||||
|
||||
Do the same for *all the rest*. Once the first docker image gets downloaded and ran you should see a message pop at the bottom.
|
||||
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/06-registered_nodes.png
|
||||
:align: center
|
||||
:alt: Registered Nodes
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Do **NOT** click *done* until you see all *3 nodes registered*.
|
||||
|
||||
|
||||
Finalizing
|
||||
==========
|
||||
|
||||
Now that you have *3 registered nodes*, click **Done** and go grab yourself a cup of coffee. Maybe take a long walk, this will take time. Or if you are curious like me, you'd be looking at the logs, checking the containers in a quad pane ``tmux`` session.
|
||||
|
||||
After a long time has passed, our story ends with a refresh and a welcome with this page.
|
||||
|
||||
.. thumbnail:: /images/local_kubernetes_cluster_on_kvm/07-kubernetes_cluster.png
|
||||
:align: center
|
||||
:alt: Kubernetes Cluster
|
||||
|
||||
|
||||
Welcome to your Kubernetes Cluster.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
At this point, you can check that all the nodes are healthy and you got yourself a kubernetes cluster. In future blog posts we will explore an avenue to deploy *multiple ingress controllers* on the same cluster on the same ``port: 80`` by giving them each an IP external to the cluster.
|
||||
|
||||
But for now, you got yourself a kubernetes cluster to play with. Enjoy.
|
||||
|
173
posts/kubernetes/minikube-setup.org
Normal file
|
@ -0,0 +1,173 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Minikube Setup
|
||||
.. date: 2019-02-09
|
||||
.. updated: 2019-07-02
|
||||
.. status: published
|
||||
.. tags: minikube, kubernetes, ingress, ingress-controller,
|
||||
.. category: kubernetes
|
||||
.. slug: minikube-setup
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A quick and dirty minikube setup.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
If you have ever worked with /kubernetes/, you'd know that minikube out of the box does not give you what you need for a quick setup. I'm sure you can go =minikube start=, everything's up... Great... =kubectl get pods -n kube-system=... It works, let's move on...
|
||||
|
||||
But what if it's not let's move on to something else. We need to look at this as a local test environment in capabilities. We can learn so much from it before applying to the lab. But, as always, there are a few tweaks we need to perform to give it the magic it needs to be a real environment.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Prerequisites
|
||||
If you are looking into /kubernetes/, I would suppose that you know your linux's ABCs and you can install and configure /minikube/ and its prerequisites prior to the beginning of this tutorial.
|
||||
|
||||
You can find the guide to install /minikube/ and configure it on the /minikube/ [[https://kubernetes.io/docs/setup/minikube/][webpage]].
|
||||
|
||||
Anyway, make sure you have /minikube/ installed, /kubectl/ and whatever driver dependencies you need to run it under that driver. In my case, I am using /kvm2/ which will be reflected in the commands given to start /minikube/.
|
||||
|
||||
* Starting /minikube/
|
||||
Let's start minikube.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ minikube start --vm-driver=kvm2
|
||||
Starting local Kubernetes v1.13.2 cluster...
|
||||
Starting VM...
|
||||
Getting VM IP address...
|
||||
Moving files into cluster...
|
||||
Setting up certs...
|
||||
Connecting to cluster...
|
||||
Setting up kubeconfig...
|
||||
Stopping extra container runtimes...
|
||||
Starting cluster components...
|
||||
Verifying apiserver health ...
|
||||
Kubectl is now configured to use the cluster.
|
||||
Loading cached images from config file.
|
||||
|
||||
|
||||
Everything looks great. Please enjoy minikube!
|
||||
#+END_EXAMPLE
|
||||
|
||||
Great... At this point we have a cluster that's running, let's verify.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# Id Name State
|
||||
--------------------------
|
||||
3 minikube running
|
||||
#+END_EXAMPLE
|
||||
|
||||
For me, I can check =virsh=. If you used /VirtualBox/ you can check that.
|
||||
|
||||
We can also test with =kubectl=.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.3", GitCommit:"721bfa751924da8d1680787490c54b9179b1fed0", GitTreeState:"clean", BuildDate:"2019-02-01T20:08:12Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.2", GitCommit:"cff46ab41ff0bb44d8584413b598ad8360ec1def", GitTreeState:"clean", BuildDate:"2019-01-10T23:28:14Z", GoVersion:"go1.11.4", Compiler:"gc", Platform:"linux/amd64"}
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now what ? Well, now we deploy a few addons that we need to deploy in production as well for a functioning /kubernetes/ cluster.
|
||||
|
||||
Let's check the list of add-ons available out of the box.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ minikube addons list
|
||||
- addon-manager: enabled
|
||||
- dashboard: enabled
|
||||
- default-storageclass: enabled
|
||||
- efk: disabled
|
||||
- freshpod: disabled
|
||||
- gvisor: disabled
|
||||
- heapster: enabled
|
||||
- ingress: enabled
|
||||
- kube-dns: disabled
|
||||
- metrics-server: enabled
|
||||
- nvidia-driver-installer: disabled
|
||||
- nvidia-gpu-device-plugin: disabled
|
||||
- registry: disabled
|
||||
- registry-creds: disabled
|
||||
- storage-provisioner: enabled
|
||||
- storage-provisioner-gluster: disabled
|
||||
#+END_EXAMPLE
|
||||
|
||||
Make sure you have /dashboard/, /heapster/, /ingress/ and /metrics-server/ *enabled*. You can enable add-ons with =kubectl addons enable=.
|
||||
|
||||
* What's the problem then ?
|
||||
Here's the problem that comes next. How do you access the dashboard or anything running in the cluster ? Everyone online suggests you proxy a port and you access the dashboard. Is that really how it should work ? Is that how production system do it ?
|
||||
|
||||
The answer is of course not. They use different types of /ingresses/ at their disposal. In this case, /minikube/ was kind enough to provide one for us, the default /kubernetes ingress controller/, It's a great option for an ingress controller that's solid enough for production use. Fine, a lot of babble. Yes sure but this babble is important. So how do we access stuff on a cluster ?
|
||||
|
||||
To answer that question we need to understand a few things. Yes, you can use a =NodePort= on your service and access it that way. But do you really want to manage these ports ? What's in use and what's not ? Besides, wouldn't it be better if you can use one port for all of the services ? How you may ask ?
|
||||
|
||||
We've been doing it for years, and by we I mean /ops/ and /devops/ people. You have to understand that the kubernetes ingress controller is simply an /nginx/ under the covers. We've always been able to configure /nginx/ to listen for a specific /hostname/ and redirect it where we want to. It shouldn't be that hard to do right ?
|
||||
|
||||
Well this is what an ingress controller does. It uses the default ports to route traffic from the outside according to hostname called. Let's look at our cluster and see what we need.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl get services --all-namespaces
|
||||
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
default kubernetes ClusterIP 10.96.0.1 443/TCP 17m
|
||||
kube-system default-http-backend NodePort 10.96.77.15 80:30001/TCP 17m
|
||||
kube-system heapster ClusterIP 10.100.193.109 80/TCP 17m
|
||||
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 17m
|
||||
kube-system kubernetes-dashboard ClusterIP 10.106.156.91 80/TCP 17m
|
||||
kube-system metrics-server ClusterIP 10.103.137.86 443/TCP 17m
|
||||
kube-system monitoring-grafana NodePort 10.109.127.87 80:30002/TCP 17m
|
||||
kube-system monitoring-influxdb ClusterIP 10.106.174.177 8083/TCP,8086/TCP 17m
|
||||
#+END_EXAMPLE
|
||||
|
||||
In my case, you can see that I have a few things that are in =NodePort= configuration and you can access them on those ports. But the /kubernetes-dashboard/ is a =ClusterIP= and we can't get to it. So let's change that by adding an ingress to the service.
|
||||
|
||||
* Ingress
|
||||
An ingress is an object of kind =ingress= that configures the ingress controller of your choice.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: dashboard.kube.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: kubernetes-dashboard
|
||||
servicePort: 80
|
||||
#+END_SRC
|
||||
|
||||
Save that to a file =kube-dashboard-ingress.yaml= or something then run.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl apply -f kube-bashboard-ingress.yaml
|
||||
ingress.extensions/kubernetes-dashboard created
|
||||
#+END_EXAMPLE
|
||||
|
||||
And now we get this.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl get ingress --all-namespaces
|
||||
NAMESPACE NAME HOSTS ADDRESS PORTS AGE
|
||||
kube-system kubernetes-dashboard dashboard.kube.local 80 17s
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now all we need to know is the IP of our kubernetes cluster of /one/.
|
||||
Don't worry /minikube/ makes it easy for us.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ minikube ip
|
||||
192.168.39.79
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now let's add that host to our =/etc/hosts= file.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
192.168.39.79 dashboard.kube.local
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now if you go to [[http://dashboard.kube.local]] in your browser, you will be welcomed with the dashboard. How is that so ? Well as I explained, point it to the nodes of the cluster with the proper hostname and it works.
|
||||
|
||||
You can deploy multiple services that can be accessed this way, you can also integrate this with a service mesh or a service discovery which could find the up and running nodes that can redirect you to point to at all times. But this is the clean way to expose services outside the cluster.
|
|
@ -1,179 +0,0 @@
|
|||
.. title: Minikube Setup
|
||||
.. date: 2019-02-09
|
||||
.. updated: 2019-07-02
|
||||
.. status: published
|
||||
.. tags: minikube, kubernetes, ingress, ingress-controller,
|
||||
.. category: kubernetes
|
||||
.. slug: minikube-setup
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A quick and dirty minikube setup.
|
||||
.. type: text
|
||||
|
||||
|
||||
If you have ever worked with *kubernetes*, you'd know that minikube out of the box does not give you what you need for a quick setup. I'm sure you can go ``minikube start``, everything's up... Great... ``kubectl get pods -n kube-system``... It works, let's move on...
|
||||
|
||||
But what if it's not let's move on to something else. We need to look at this as a local test environment in capabilities. We can learn so much from it before applying to the lab. But, as always, there are a few tweaks we need to perform to give it the magic it needs to be a real environment.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
If you are looking into *kubernetes*, I would suppose that you know your linux's ABCs and you can install and configure *minikube* and its prerequisites prior to the beginning of this tutorial.
|
||||
|
||||
You can find the guide to install *minikube* and configure it on the *minikube* `webpage <https://kubernetes.io/docs/setup/minikube/>`_.
|
||||
|
||||
Anyway, make sure you have *minikube* installed, *kubectl* and whatever driver dependencies you need to run it under that driver. In my case, I am using kvm2 which will be reflected in the commands given to start *minikube*.
|
||||
|
||||
Starting *minikube*
|
||||
===================
|
||||
|
||||
Let's start minikube.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ minikube start --vm-driver=kvm2
|
||||
Starting local Kubernetes v1.13.2 cluster...
|
||||
Starting VM...
|
||||
Getting VM IP address...
|
||||
Moving files into cluster...
|
||||
Setting up certs...
|
||||
Connecting to cluster...
|
||||
Setting up kubeconfig...
|
||||
Stopping extra container runtimes...
|
||||
Starting cluster components...
|
||||
Verifying apiserver health ...
|
||||
Kubectl is now configured to use the cluster.
|
||||
Loading cached images from config file.
|
||||
|
||||
|
||||
Everything looks great. Please enjoy minikube!
|
||||
|
||||
Great... At this point we have a cluster that's running, let's verify.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# Id Name State
|
||||
--------------------------
|
||||
3 minikube running
|
||||
|
||||
For me, I can check ``virsh``. If you used *VirtualBox* you can check that.
|
||||
|
||||
We can also test with ``kubectl``.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.3", GitCommit:"721bfa751924da8d1680787490c54b9179b1fed0", GitTreeState:"clean", BuildDate:"2019-02-01T20:08:12Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.2", GitCommit:"cff46ab41ff0bb44d8584413b598ad8360ec1def", GitTreeState:"clean", BuildDate:"2019-01-10T23:28:14Z", GoVersion:"go1.11.4", Compiler:"gc", Platform:"linux/amd64"}
|
||||
|
||||
Now what ? Well, now we deploy a few addons that we need to deploy in production as well for a functioning *kubernetes* cluster.
|
||||
|
||||
Let's check the list of add-ons available out of the box.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ minikube addons list
|
||||
- addon-manager: enabled
|
||||
- dashboard: enabled
|
||||
- default-storageclass: enabled
|
||||
- efk: disabled
|
||||
- freshpod: disabled
|
||||
- gvisor: disabled
|
||||
- heapster: enabled
|
||||
- ingress: enabled
|
||||
- kube-dns: disabled
|
||||
- metrics-server: enabled
|
||||
- nvidia-driver-installer: disabled
|
||||
- nvidia-gpu-device-plugin: disabled
|
||||
- registry: disabled
|
||||
- registry-creds: disabled
|
||||
- storage-provisioner: enabled
|
||||
- storage-provisioner-gluster: disabled
|
||||
|
||||
Make sure you have *dashboard*, *heapster*, *ingress* and *metrics-server* **enabled**. You can enable add-ons with ``kubectl addons enable``.
|
||||
|
||||
What's the problem then ?
|
||||
=========================
|
||||
|
||||
Here's the problem that comes next. How do you access the dashboard or anything running in the cluster ? Everyone online suggests you proxy a port and you access the dashboard. Is that really how it should work ? Is that how production system do it ?
|
||||
|
||||
The answer is of course not. They use different types of *ingresses* at their disposal. In this case, *minikube* was kind enough to provide one for us, the default *kubernetes ingress controller*, It's a great option for an ingress controller that's solid enough for production use. Fine, a lot of babble. Yes sure but this babble is important. So how do we access stuff on a cluster ?
|
||||
|
||||
To answer that question we need to understand a few things. Yes, you can use a ``NodePort`` on your service and access it that way. But do you really want to manage these ports ? What's in use and what's not ? Besides, wouldn't it be better if you can use one port for all of the services ? How you may ask ?
|
||||
|
||||
We've been doing it for years, and by we I mean *ops* and *devops* people. You have to understand that the kubernetes ingress controller is simply an *nginx* under the covers. We've always been able to configure *nginx* to listen for a specific *hostname* and redirect it where we want to. It shouldn't be that hard to do right ?
|
||||
|
||||
Well this is what an ingress controller does. It uses the default ports to route traffic from the outside according to hostname called. Let's look at our cluster and see what we need.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl get services --all-namespaces
|
||||
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
default kubernetes ClusterIP 10.96.0.1 443/TCP 17m
|
||||
kube-system default-http-backend NodePort 10.96.77.15 80:30001/TCP 17m
|
||||
kube-system heapster ClusterIP 10.100.193.109 80/TCP 17m
|
||||
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 17m
|
||||
kube-system kubernetes-dashboard ClusterIP 10.106.156.91 80/TCP 17m
|
||||
kube-system metrics-server ClusterIP 10.103.137.86 443/TCP 17m
|
||||
kube-system monitoring-grafana NodePort 10.109.127.87 80:30002/TCP 17m
|
||||
kube-system monitoring-influxdb ClusterIP 10.106.174.177 8083/TCP,8086/TCP 17m
|
||||
|
||||
In my case, you can see that I have a few things that are in ``NodePort`` configuration and you can access them on those ports. But the *kubernetes-dashboard* is a ``ClusterIP`` and we can't get to it. So let's change that by adding an ingress to the service.
|
||||
|
||||
Ingress
|
||||
=======
|
||||
|
||||
An ingress is an object of kind ``ingress`` that configures the ingress controller of your choice.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: dashboard.kube.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: kubernetes-dashboard
|
||||
servicePort: 80
|
||||
|
||||
Save that to a file ``kube-dashboard-ingress.yaml`` or something then run.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl apply -f kube-bashboard-ingress.yaml
|
||||
ingress.extensions/kubernetes-dashboard created
|
||||
|
||||
And now we get this.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl get ingress --all-namespaces
|
||||
NAMESPACE NAME HOSTS ADDRESS PORTS AGE
|
||||
kube-system kubernetes-dashboard dashboard.kube.local 80 17s
|
||||
|
||||
Now all we need to know is the IP of our kubernetes cluster of *one*. Don't worry *minikube* makes it easy for us.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ minikube ip
|
||||
192.168.39.79
|
||||
|
||||
Now let's add that host to our ``/etc/hosts`` file.
|
||||
|
||||
.. code:: text
|
||||
|
||||
192.168.39.79 dashboard.kube.local
|
||||
|
||||
Now if you go to http://dashboard.kube.local in your browser, you will be welcomed with the dashboard. How is that so ? Well as I explained, point it to the nodes of the cluster with the proper hostname and it works.
|
||||
|
||||
You can deploy multiple services that can be accessed this way, you can also integrate this with a service mesh or a service discovery which could find the up and running nodes that can redirect you to point to at all times. But this is the clean way to expose services outside the cluster.
|
388
posts/kubernetes/your-first-minikube-helm-deployment.org
Normal file
|
@ -0,0 +1,388 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Your First Minikube Helm Deployment
|
||||
.. date: 2019-02-10
|
||||
.. updated: 2019-06-21
|
||||
.. status: published
|
||||
.. tags: minikube, kubernetes, ingress, helm, prometheus, grafana,
|
||||
.. category: kubernetes
|
||||
.. slug: your-first-minikube-helm-deployment
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Deploying your first minikube helm charts.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In the last post, we have configured a basic /minikube/ cluster. In this post we will deploy a few items we will need in a cluster and maybe in the future, experiment with it a bit.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Prerequisite
|
||||
During this post and probably during future posts, we will be using /helm/ to deploy to our /minikube/ cluster. Some offered by the helm team, others by the community and maybe our own. We need to install =helm= on our machine. It should be as easy as downloading the binary but if you can find it in your package manager go that route.
|
||||
|
||||
* Deploying Tiller
|
||||
Before we can start with the deployments using =helm=, we need to deploy /tiller/. It's a service that manages communications with the client and deployments.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ helm init --history-max=10
|
||||
Creating ~/.helm
|
||||
Creating ~/.helm/repository
|
||||
Creating ~/.helm/repository/cache
|
||||
Creating ~/.helm/repository/local
|
||||
Creating ~/.helm/plugins
|
||||
Creating ~/.helm/starters
|
||||
Creating ~/.helm/cache/archive
|
||||
Creating ~/.helm/repository/repositories.yaml
|
||||
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
|
||||
Adding local repo with URL: http://127.0.0.1:8879/charts
|
||||
$HELM_HOME has been configured at ~/.helm.
|
||||
|
||||
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
|
||||
|
||||
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
|
||||
To prevent this, run ``helm init`` with the --tiller-tls-verify flag.
|
||||
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
|
||||
#+END_EXAMPLE
|
||||
|
||||
/Tiller/ is deployed, give it a few minutes for the pods to come up.
|
||||
|
||||
* Deploy Prometheus
|
||||
We often need to monitor multiple aspects of the cluster easily. Sometimes maybe even write our applications to (let's say) publish metrics to prometheus. And I said 'let's say' because technically we offer an endpoint that a prometheus exporter will consume regularly and publish to the prometheus server. Anyway, let's deploy prometheus.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ helm install stable/prometheus-operator --name prometheus-operator --namespace kube-prometheus
|
||||
NAME: prometheus-operator
|
||||
LAST DEPLOYED: Sat Feb 9 18:09:43 2019
|
||||
NAMESPACE: kube-prometheus
|
||||
STATUS: DEPLOYED
|
||||
|
||||
RESOURCES:
|
||||
==> v1/Secret
|
||||
NAME TYPE DATA AGE
|
||||
prometheus-operator-grafana Opaque 3 4s
|
||||
alertmanager-prometheus-operator-alertmanager Opaque 1 4s
|
||||
|
||||
==> v1beta1/ClusterRole
|
||||
NAME AGE
|
||||
prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-prometheus-node-exporter 3s
|
||||
|
||||
==> v1/Service
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
prometheus-operator-grafana ClusterIP 10.107.125.114 80/TCP 3s
|
||||
prometheus-operator-kube-state-metrics ClusterIP 10.99.250.30 8080/TCP 3s
|
||||
prometheus-operator-prometheus-node-exporter ClusterIP 10.111.99.199 9100/TCP 3s
|
||||
prometheus-operator-alertmanager ClusterIP 10.96.49.73 9093/TCP 3s
|
||||
prometheus-operator-coredns ClusterIP None 9153/TCP 3s
|
||||
prometheus-operator-kube-controller-manager ClusterIP None 10252/TCP 3s
|
||||
prometheus-operator-kube-etcd ClusterIP None 4001/TCP 3s
|
||||
prometheus-operator-kube-scheduler ClusterIP None 10251/TCP 3s
|
||||
prometheus-operator-operator ClusterIP 10.101.253.101 8080/TCP 3s
|
||||
prometheus-operator-prometheus ClusterIP 10.107.117.120 9090/TCP 3s
|
||||
|
||||
==> v1beta1/DaemonSet
|
||||
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
|
||||
prometheus-operator-prometheus-node-exporter 1 1 0 1 0 3s
|
||||
|
||||
==> v1/Deployment
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
prometheus-operator-operator 1 1 1 0 3s
|
||||
|
||||
==> v1/ServiceMonitor
|
||||
NAME AGE
|
||||
prometheus-operator-alertmanager 2s
|
||||
prometheus-operator-coredns 2s
|
||||
prometheus-operator-apiserver 2s
|
||||
prometheus-operator-kube-controller-manager 2s
|
||||
prometheus-operator-kube-etcd 2s
|
||||
prometheus-operator-kube-scheduler 2s
|
||||
prometheus-operator-kube-state-metrics 2s
|
||||
prometheus-operator-kubelet 2s
|
||||
prometheus-operator-node-exporter 2s
|
||||
prometheus-operator-operator 2s
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1/Pod(related)
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
prometheus-operator-prometheus-node-exporter-fntpx 0/1 ContainerCreating 0 3s
|
||||
prometheus-operator-grafana-8559d7df44-vrm8d 0/3 ContainerCreating 0 2s
|
||||
prometheus-operator-kube-state-metrics-7769f5bd54-6znvh 0/1 ContainerCreating 0 2s
|
||||
prometheus-operator-operator-7967865bf5-cbd6r 0/1 ContainerCreating 0 2s
|
||||
|
||||
==> v1beta1/PodSecurityPolicy
|
||||
NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES
|
||||
prometheus-operator-grafana false RunAsAny RunAsAny RunAsAny RunAsAny false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
prometheus-operator-kube-state-metrics false RunAsAny MustRunAsNonRoot MustRunAs MustRunAs false secret
|
||||
prometheus-operator-prometheus-node-exporter false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim,hostPath
|
||||
prometheus-operator-alertmanager false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
prometheus-operator-operator false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
prometheus-operator-prometheus false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
|
||||
==> v1/ConfigMap
|
||||
NAME DATA AGE
|
||||
prometheus-operator-grafana-config-dashboards 1 4s
|
||||
prometheus-operator-grafana 1 4s
|
||||
prometheus-operator-grafana-datasource 1 4s
|
||||
prometheus-operator-etcd 1 4s
|
||||
prometheus-operator-grafana-coredns-k8s 1 4s
|
||||
prometheus-operator-k8s-cluster-rsrc-use 1 4s
|
||||
prometheus-operator-k8s-node-rsrc-use 1 4s
|
||||
prometheus-operator-k8s-resources-cluster 1 4s
|
||||
prometheus-operator-k8s-resources-namespace 1 4s
|
||||
prometheus-operator-k8s-resources-pod 1 4s
|
||||
prometheus-operator-nodes 1 4s
|
||||
prometheus-operator-persistentvolumesusage 1 4s
|
||||
prometheus-operator-pods 1 4s
|
||||
prometheus-operator-statefulset 1 4s
|
||||
|
||||
==> v1/ClusterRoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-grafana-clusterrolebinding 3s
|
||||
prometheus-operator-alertmanager 3s
|
||||
prometheus-operator-operator 3s
|
||||
prometheus-operator-operator-psp 3s
|
||||
prometheus-operator-prometheus 3s
|
||||
prometheus-operator-prometheus-psp 3s
|
||||
|
||||
==> v1beta1/Role
|
||||
NAME AGE
|
||||
prometheus-operator-grafana 3s
|
||||
|
||||
==> v1beta1/Deployment
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
prometheus-operator-kube-state-metrics 1 1 1 0 3s
|
||||
|
||||
==> v1/Alertmanager
|
||||
NAME AGE
|
||||
prometheus-operator-alertmanager 3s
|
||||
|
||||
==> v1/ServiceAccount
|
||||
NAME SECRETS AGE
|
||||
prometheus-operator-grafana 1 4s
|
||||
prometheus-operator-kube-state-metrics 1 4s
|
||||
prometheus-operator-prometheus-node-exporter 1 4s
|
||||
prometheus-operator-alertmanager 1 4s
|
||||
prometheus-operator-operator 1 4s
|
||||
prometheus-operator-prometheus 1 4s
|
||||
|
||||
==> v1/ClusterRole
|
||||
NAME AGE
|
||||
prometheus-operator-grafana-clusterrole 4s
|
||||
prometheus-operator-alertmanager 3s
|
||||
prometheus-operator-operator 3s
|
||||
prometheus-operator-operator-psp 3s
|
||||
prometheus-operator-prometheus 3s
|
||||
prometheus-operator-prometheus-psp 3s
|
||||
|
||||
==> v1/Role
|
||||
NAME AGE
|
||||
prometheus-operator-prometheus-config 3s
|
||||
prometheus-operator-prometheus 2s
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1beta1/RoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-grafana 3s
|
||||
|
||||
==> v1beta2/Deployment
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
prometheus-operator-grafana 1 1 1 0 3s
|
||||
|
||||
==> v1/Prometheus
|
||||
NAME AGE
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1beta1/ClusterRoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-prometheus-node-exporter 3s
|
||||
|
||||
==> v1/RoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-prometheus-config 3s
|
||||
prometheus-operator-prometheus 2s
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1/PrometheusRule
|
||||
NAME AGE
|
||||
prometheus-operator-alertmanager.rules 2s
|
||||
prometheus-operator-etcd 2s
|
||||
prometheus-operator-general.rules 2s
|
||||
prometheus-operator-k8s.rules 2s
|
||||
prometheus-operator-kube-apiserver.rules 2s
|
||||
prometheus-operator-kube-prometheus-node-alerting.rules 2s
|
||||
prometheus-operator-kube-prometheus-node-recording.rules 2s
|
||||
prometheus-operator-kube-scheduler.rules 2s
|
||||
prometheus-operator-kubernetes-absent 2s
|
||||
prometheus-operator-kubernetes-apps 2s
|
||||
prometheus-operator-kubernetes-resources 2s
|
||||
prometheus-operator-kubernetes-storage 2s
|
||||
prometheus-operator-kubernetes-system 2s
|
||||
prometheus-operator-node.rules 2s
|
||||
prometheus-operator-prometheus-operator 2s
|
||||
prometheus-operator-prometheus.rules 2s
|
||||
|
||||
NOTES: The Prometheus Operator has been installed. Check its status by
|
||||
running: kubectl --namespace kube-prometheus get pods -l
|
||||
"release=prometheus-operator"
|
||||
|
||||
Visit [[https://github.com/coreos/prometheus-operator]] for
|
||||
instructions on how to create & configure Alertmanager and Prometheus
|
||||
instances using the Operator.
|
||||
#+END_EXAMPLE
|
||||
|
||||
At this point, prometheus has been deployed to the cluster. Give it a few minutes for all the pods to come up. Let's keep on working to get access to the rest of the consoles offered by the prometheus deployment.
|
||||
|
||||
* Prometheus Console
|
||||
Let's write an ingress configuration to expose the prometheus console.
|
||||
First off we need to list all the service deployed for prometheus.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl get service prometheus-operator-prometheus -o yaml -n kube-prometheus
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: "2019-02-09T23:09:55Z"
|
||||
labels:
|
||||
app: prometheus-operator-prometheus
|
||||
chart: prometheus-operator-2.1.6
|
||||
heritage: Tiller
|
||||
release: prometheus-operator
|
||||
name: prometheus-operator-prometheus
|
||||
namespace: kube-prometheus
|
||||
resourceVersion: "10996"
|
||||
selfLink: /api/v1/namespaces/kube-prometheus/services/prometheus-operator-prometheus
|
||||
uid: d038d6fa-2cbf-11e9-b74f-48ea5bb87c0b
|
||||
spec:
|
||||
clusterIP: 10.107.117.120
|
||||
ports:
|
||||
- name: web
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: web
|
||||
selector:
|
||||
app: prometheus
|
||||
prometheus: prometheus-operator-prometheus
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
#+END_EXAMPLE
|
||||
|
||||
As we can see from the service above, its name is =prometheus-operator-prometheus= and it's listening on port =9090=.
|
||||
So let's write the ingress configuration for it.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: prometheus-dashboard
|
||||
namespace: kube-prometheus
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: prometheus.kube.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: prometheus-operator-prometheus
|
||||
servicePort: 9090
|
||||
#+END_SRC
|
||||
|
||||
Save the file as =kube-prometheus-ingress.yaml= or some such and deploy.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl apply -f kube-prometheus-ingress.yaml
|
||||
ingress.extensions/prometheus-dashboard created
|
||||
#+END_EXAMPLE
|
||||
|
||||
And then add the service host to our =/etc/hosts=.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
192.168.39.78 prometheus.kube.local
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now you can access [[http://prometheus.kube.local]] from your browser.
|
||||
|
||||
* Grafana Console
|
||||
Much like what we did with the prometheus console previously, we need to do the same to the grafana dashboard.
|
||||
|
||||
First step, let's check the service.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl get service prometheus-operator-grafana -o yaml -n kube-prometheus
|
||||
#+END_EXAMPLE
|
||||
|
||||
Gives you the following output.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: "2019-02-09T23:09:55Z"
|
||||
labels:
|
||||
app: grafana
|
||||
chart: grafana-1.25.0
|
||||
heritage: Tiller
|
||||
release: prometheus-operator
|
||||
name: prometheus-operator-grafana
|
||||
namespace: kube-prometheus
|
||||
resourceVersion: "10973"
|
||||
selfLink: /api/v1/namespaces/kube-prometheus/services/prometheus-operator-grafana
|
||||
uid: cffe169b-2cbf-11e9-b74f-48ea5bb87c0b
|
||||
spec:
|
||||
clusterIP: 10.107.125.114
|
||||
ports:
|
||||
- name: service
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 3000
|
||||
selector:
|
||||
app: grafana
|
||||
release: prometheus-operator
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
#+END_SRC
|
||||
|
||||
We get =prometheus-operator-grafana= and port =80=. Next is the ingress configuration.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: prometheus-grafana
|
||||
namespace: kube-prometheus
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: grafana.kube.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: prometheus-operator-grafana
|
||||
servicePort: 80
|
||||
#+END_SRC
|
||||
|
||||
Then we deploy.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ kubectl apply -f kube-grafana-ingress.yaml
|
||||
$ ingress.extensions/prometheus-grafana created
|
||||
#+END_EXAMPLE
|
||||
|
||||
And let's not forget =/etc/hosts=.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
192.168.39.78 grafana.kube.local
|
||||
#+END_EXAMPLE
|
||||
|
||||
And the grafana dashboard should appear if you visit [[http://grafana.kube.local]].
|
|
@ -1,389 +0,0 @@
|
|||
.. title: Your First Minikube Helm Deployment
|
||||
.. date: 2019-02-10
|
||||
.. updated: 2019-06-21
|
||||
.. status: published
|
||||
.. tags: minikube, kubernetes, ingress, helm, prometheus, grafana,
|
||||
.. category: kubernetes
|
||||
.. slug: your-first-minikube-helm-deployment
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Deploying your first minikube helm charts.
|
||||
.. type: text
|
||||
|
||||
|
||||
In the last post, we have configured a basic *minikube* cluster. In this post we will deploy a few items we will need in a cluster and maybe in the future, experiment with it a bit.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Prerequisite
|
||||
============
|
||||
|
||||
During this post and probably during future posts, we will be using *helm* to deploy to our *minikube* cluster. Some offered by the helm team, others by the community and maybe our own. We need to install ``helm`` on our machine. It should be as easy as downloading the binary but if you can find it in your package manager go that route.
|
||||
|
||||
Deploying Tiller
|
||||
================
|
||||
|
||||
Before we can start with the deployments using ``helm``, we need to deploy *tiller*. It's a service that manages communications with the client and deployments.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ helm init --history-max=10
|
||||
Creating ~/.helm
|
||||
Creating ~/.helm/repository
|
||||
Creating ~/.helm/repository/cache
|
||||
Creating ~/.helm/repository/local
|
||||
Creating ~/.helm/plugins
|
||||
Creating ~/.helm/starters
|
||||
Creating ~/.helm/cache/archive
|
||||
Creating ~/.helm/repository/repositories.yaml
|
||||
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
|
||||
Adding local repo with URL: http://127.0.0.1:8879/charts
|
||||
$HELM_HOME has been configured at ~/.helm.
|
||||
|
||||
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
|
||||
|
||||
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
|
||||
To prevent this, run ``helm init`` with the --tiller-tls-verify flag.
|
||||
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
|
||||
|
||||
*Tiller* is deployed, give it a few minutes for the pods to come up.
|
||||
|
||||
Deploy Prometheus
|
||||
=================
|
||||
|
||||
We often need to monitor multiple aspects of the cluster easily. Sometimes maybe even write our applications to (let's say) publish metrics to prometheus. And I said 'let's say' because technically we offer an endpoint that a prometheus exporter will consume regularly and publish to the prometheus server. Anyway, let's deploy prometheus.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ helm install stable/prometheus-operator --name prometheus-operator --namespace kube-prometheus
|
||||
NAME: prometheus-operator
|
||||
LAST DEPLOYED: Sat Feb 9 18:09:43 2019
|
||||
NAMESPACE: kube-prometheus
|
||||
STATUS: DEPLOYED
|
||||
|
||||
RESOURCES:
|
||||
==> v1/Secret
|
||||
NAME TYPE DATA AGE
|
||||
prometheus-operator-grafana Opaque 3 4s
|
||||
alertmanager-prometheus-operator-alertmanager Opaque 1 4s
|
||||
|
||||
==> v1beta1/ClusterRole
|
||||
NAME AGE
|
||||
prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-prometheus-node-exporter 3s
|
||||
|
||||
==> v1/Service
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
prometheus-operator-grafana ClusterIP 10.107.125.114 80/TCP 3s
|
||||
prometheus-operator-kube-state-metrics ClusterIP 10.99.250.30 8080/TCP 3s
|
||||
prometheus-operator-prometheus-node-exporter ClusterIP 10.111.99.199 9100/TCP 3s
|
||||
prometheus-operator-alertmanager ClusterIP 10.96.49.73 9093/TCP 3s
|
||||
prometheus-operator-coredns ClusterIP None 9153/TCP 3s
|
||||
prometheus-operator-kube-controller-manager ClusterIP None 10252/TCP 3s
|
||||
prometheus-operator-kube-etcd ClusterIP None 4001/TCP 3s
|
||||
prometheus-operator-kube-scheduler ClusterIP None 10251/TCP 3s
|
||||
prometheus-operator-operator ClusterIP 10.101.253.101 8080/TCP 3s
|
||||
prometheus-operator-prometheus ClusterIP 10.107.117.120 9090/TCP 3s
|
||||
|
||||
==> v1beta1/DaemonSet
|
||||
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
|
||||
prometheus-operator-prometheus-node-exporter 1 1 0 1 0 3s
|
||||
|
||||
==> v1/Deployment
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
prometheus-operator-operator 1 1 1 0 3s
|
||||
|
||||
==> v1/ServiceMonitor
|
||||
NAME AGE
|
||||
prometheus-operator-alertmanager 2s
|
||||
prometheus-operator-coredns 2s
|
||||
prometheus-operator-apiserver 2s
|
||||
prometheus-operator-kube-controller-manager 2s
|
||||
prometheus-operator-kube-etcd 2s
|
||||
prometheus-operator-kube-scheduler 2s
|
||||
prometheus-operator-kube-state-metrics 2s
|
||||
prometheus-operator-kubelet 2s
|
||||
prometheus-operator-node-exporter 2s
|
||||
prometheus-operator-operator 2s
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1/Pod(related)
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
prometheus-operator-prometheus-node-exporter-fntpx 0/1 ContainerCreating 0 3s
|
||||
prometheus-operator-grafana-8559d7df44-vrm8d 0/3 ContainerCreating 0 2s
|
||||
prometheus-operator-kube-state-metrics-7769f5bd54-6znvh 0/1 ContainerCreating 0 2s
|
||||
prometheus-operator-operator-7967865bf5-cbd6r 0/1 ContainerCreating 0 2s
|
||||
|
||||
==> v1beta1/PodSecurityPolicy
|
||||
NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP READONLYROOTFS VOLUMES
|
||||
prometheus-operator-grafana false RunAsAny RunAsAny RunAsAny RunAsAny false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
prometheus-operator-kube-state-metrics false RunAsAny MustRunAsNonRoot MustRunAs MustRunAs false secret
|
||||
prometheus-operator-prometheus-node-exporter false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim,hostPath
|
||||
prometheus-operator-alertmanager false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
prometheus-operator-operator false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
prometheus-operator-prometheus false RunAsAny RunAsAny MustRunAs MustRunAs false configMap,emptyDir,projected,secret,downwardAPI,persistentVolumeClaim
|
||||
|
||||
==> v1/ConfigMap
|
||||
NAME DATA AGE
|
||||
prometheus-operator-grafana-config-dashboards 1 4s
|
||||
prometheus-operator-grafana 1 4s
|
||||
prometheus-operator-grafana-datasource 1 4s
|
||||
prometheus-operator-etcd 1 4s
|
||||
prometheus-operator-grafana-coredns-k8s 1 4s
|
||||
prometheus-operator-k8s-cluster-rsrc-use 1 4s
|
||||
prometheus-operator-k8s-node-rsrc-use 1 4s
|
||||
prometheus-operator-k8s-resources-cluster 1 4s
|
||||
prometheus-operator-k8s-resources-namespace 1 4s
|
||||
prometheus-operator-k8s-resources-pod 1 4s
|
||||
prometheus-operator-nodes 1 4s
|
||||
prometheus-operator-persistentvolumesusage 1 4s
|
||||
prometheus-operator-pods 1 4s
|
||||
prometheus-operator-statefulset 1 4s
|
||||
|
||||
==> v1/ClusterRoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-grafana-clusterrolebinding 3s
|
||||
prometheus-operator-alertmanager 3s
|
||||
prometheus-operator-operator 3s
|
||||
prometheus-operator-operator-psp 3s
|
||||
prometheus-operator-prometheus 3s
|
||||
prometheus-operator-prometheus-psp 3s
|
||||
|
||||
==> v1beta1/Role
|
||||
NAME AGE
|
||||
prometheus-operator-grafana 3s
|
||||
|
||||
==> v1beta1/Deployment
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
prometheus-operator-kube-state-metrics 1 1 1 0 3s
|
||||
|
||||
==> v1/Alertmanager
|
||||
NAME AGE
|
||||
prometheus-operator-alertmanager 3s
|
||||
|
||||
==> v1/ServiceAccount
|
||||
NAME SECRETS AGE
|
||||
prometheus-operator-grafana 1 4s
|
||||
prometheus-operator-kube-state-metrics 1 4s
|
||||
prometheus-operator-prometheus-node-exporter 1 4s
|
||||
prometheus-operator-alertmanager 1 4s
|
||||
prometheus-operator-operator 1 4s
|
||||
prometheus-operator-prometheus 1 4s
|
||||
|
||||
==> v1/ClusterRole
|
||||
NAME AGE
|
||||
prometheus-operator-grafana-clusterrole 4s
|
||||
prometheus-operator-alertmanager 3s
|
||||
prometheus-operator-operator 3s
|
||||
prometheus-operator-operator-psp 3s
|
||||
prometheus-operator-prometheus 3s
|
||||
prometheus-operator-prometheus-psp 3s
|
||||
|
||||
==> v1/Role
|
||||
NAME AGE
|
||||
prometheus-operator-prometheus-config 3s
|
||||
prometheus-operator-prometheus 2s
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1beta1/RoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-grafana 3s
|
||||
|
||||
==> v1beta2/Deployment
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
prometheus-operator-grafana 1 1 1 0 3s
|
||||
|
||||
==> v1/Prometheus
|
||||
NAME AGE
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1beta1/ClusterRoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-kube-state-metrics 3s
|
||||
psp-prometheus-operator-prometheus-node-exporter 3s
|
||||
|
||||
==> v1/RoleBinding
|
||||
NAME AGE
|
||||
prometheus-operator-prometheus-config 3s
|
||||
prometheus-operator-prometheus 2s
|
||||
prometheus-operator-prometheus 2s
|
||||
|
||||
==> v1/PrometheusRule
|
||||
NAME AGE
|
||||
prometheus-operator-alertmanager.rules 2s
|
||||
prometheus-operator-etcd 2s
|
||||
prometheus-operator-general.rules 2s
|
||||
prometheus-operator-k8s.rules 2s
|
||||
prometheus-operator-kube-apiserver.rules 2s
|
||||
prometheus-operator-kube-prometheus-node-alerting.rules 2s
|
||||
prometheus-operator-kube-prometheus-node-recording.rules 2s
|
||||
prometheus-operator-kube-scheduler.rules 2s
|
||||
prometheus-operator-kubernetes-absent 2s
|
||||
prometheus-operator-kubernetes-apps 2s
|
||||
prometheus-operator-kubernetes-resources 2s
|
||||
prometheus-operator-kubernetes-storage 2s
|
||||
prometheus-operator-kubernetes-system 2s
|
||||
prometheus-operator-node.rules 2s
|
||||
prometheus-operator-prometheus-operator 2s
|
||||
prometheus-operator-prometheus.rules 2s
|
||||
|
||||
|
||||
NOTES:
|
||||
The Prometheus Operator has been installed. Check its status by running:
|
||||
kubectl --namespace kube-prometheus get pods -l "release=prometheus-operator"
|
||||
|
||||
Visit https://github.com/coreos/prometheus-operator for instructions on how
|
||||
to create & configure Alertmanager and Prometheus instances using the Operator.
|
||||
|
||||
At this point, prometheus has been deployed to the cluster. Give it a few minutes for all the pods to come up. Let's keep on working to get access to the rest of the consoles offered by the prometheus deployment.
|
||||
|
||||
Prometheus Console
|
||||
==================
|
||||
|
||||
Let's write an ingress configuration to expose the prometheus console. First off we need to list all the service deployed for prometheus.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl get service prometheus-operator-prometheus -o yaml -n kube-prometheus
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: "2019-02-09T23:09:55Z"
|
||||
labels:
|
||||
app: prometheus-operator-prometheus
|
||||
chart: prometheus-operator-2.1.6
|
||||
heritage: Tiller
|
||||
release: prometheus-operator
|
||||
name: prometheus-operator-prometheus
|
||||
namespace: kube-prometheus
|
||||
resourceVersion: "10996"
|
||||
selfLink: /api/v1/namespaces/kube-prometheus/services/prometheus-operator-prometheus
|
||||
uid: d038d6fa-2cbf-11e9-b74f-48ea5bb87c0b
|
||||
spec:
|
||||
clusterIP: 10.107.117.120
|
||||
ports:
|
||||
- name: web
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: web
|
||||
selector:
|
||||
app: prometheus
|
||||
prometheus: prometheus-operator-prometheus
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
As we can see from the service above, its name is ``prometheus-operator-prometheus`` and it's listening on port ``9090``. So let's write the ingress configuration for it.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: prometheus-dashboard
|
||||
namespace: kube-prometheus
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: prometheus.kube.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: prometheus-operator-prometheus
|
||||
servicePort: 9090
|
||||
|
||||
Save the file as ``kube-prometheus-ingress.yaml`` or some such and deploy.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl apply -f kube-prometheus-ingress.yaml
|
||||
ingress.extensions/prometheus-dashboard created
|
||||
|
||||
And then add the service host to our ``/etc/hosts``.
|
||||
|
||||
.. code:: text
|
||||
|
||||
192.168.39.78 prometheus.kube.local
|
||||
|
||||
Now you can access http://prometheus.kube.local from your browser.
|
||||
|
||||
Grafana Console
|
||||
===============
|
||||
|
||||
Much like what we did with the prometheus console previously, we need to do the same to the grafana dashboard.
|
||||
|
||||
First step, let's check the service.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ kubectl get service prometheus-operator-grafana -o yaml -n kube-prometheus
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: "2019-02-09T23:09:55Z"
|
||||
labels:
|
||||
app: grafana
|
||||
chart: grafana-1.25.0
|
||||
heritage: Tiller
|
||||
release: prometheus-operator
|
||||
name: prometheus-operator-grafana
|
||||
namespace: kube-prometheus
|
||||
resourceVersion: "10973"
|
||||
selfLink: /api/v1/namespaces/kube-prometheus/services/prometheus-operator-grafana
|
||||
uid: cffe169b-2cbf-11e9-b74f-48ea5bb87c0b
|
||||
spec:
|
||||
clusterIP: 10.107.125.114
|
||||
ports:
|
||||
- name: service
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 3000
|
||||
selector:
|
||||
app: grafana
|
||||
release: prometheus-operator
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
We get ``prometheus-operator-grafana`` and port ``80``. Next is the ingress configuration.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: prometheus-grafana
|
||||
namespace: kube-prometheus
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: grafana.kube.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: prometheus-operator-grafana
|
||||
servicePort: 80
|
||||
|
||||
Then we deploy.
|
||||
|
||||
.. code:: text
|
||||
|
||||
kubectl apply -f kube-grafana-ingress.yaml
|
||||
ingress.extensions/prometheus-grafana created
|
||||
|
||||
And let's not forget ``/etc/hosts``.
|
||||
|
||||
.. code:: text
|
||||
|
||||
192.168.39.78 grafana.kube.local
|
||||
|
||||
And the grafana dashboard should appear if you visit http://grafana.kube.local.
|
166
posts/misc/a-quick-zfs-overview.org
Normal file
|
@ -0,0 +1,166 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: A quick ZFS overview on Linux
|
||||
.. date: 2020-01-27
|
||||
.. slug: a-quick-zfs-overview-on-linux
|
||||
.. updated: 2020-01-27
|
||||
.. status: published
|
||||
.. tags: misc, zfs, file system
|
||||
.. category: misc
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A overview of Linux on ZFS root
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I have, for years, been interested in /file systems/. Specifically a /file system/ to run my personal systems on. For most people *Ext4* is good enough and that is totally fine. But, as a power user, I like to have more control, more features and more options out of my file system.
|
||||
|
||||
I have played with most of file sytsems on Linux, and have been using *Btrfs* for a few years now. I have worked with NAS systems running on *ZFS* and have been very impressed by it. The only problem is that *ZFS* wasn't been well suppored on Linux at the time. *Btrfs* promissed to be the *ZFS* replacement for Linux nativetly, especially that it was backed up by a bunch of the giants like Oracle and RedHat. My decision at that point was made, and yes that was before RedHat's support for *XFS* which is impressive on its own. Recently though, a new project gave everyone hope. [[http://www.open-zfs.org/wiki/Main_Page][OpenZFS]] came to life and so did [[https://zfsonlinux.org/][ZFS on Linux]].
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
Linux has had *ZFS* support for a while now but mostly to manage a *ZFS* /file system/, so I kept watching until I saw a blog post by *Ubuntu* entitled [[https://ubuntu.com/blog/enhancing-our-zfs-support-on-ubuntu-19-10-an-introduction][Enhancing our ZFS support on Ubuntu 19.10 -- an introduction]].
|
||||
|
||||
In the blog post above, I read the following:
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
We want to support ZFS on root as an experimental installer option, initially for desktop, but keeping the layout extensible for server later on. The desktop will be the first beneficiary in Ubuntu 19.10. Note the use of the term ‘experimental' though!
|
||||
#+END_QUOTE
|
||||
|
||||
My eyes widened at this point. I know that *Ubuntu* has had native *ZFS* support since 2016 but now I could install it with one click. At that point I was all in, and I went back to *Ubuntu*.
|
||||
|
||||
* Ubuntu on root ZFS
|
||||
You heard me right, the *Ubuntu* installer offers an 'experimental' install on *ZFS*. I made the decision based on the well tested stability of *ZFS* in production environments and its ability to offer me the flexibility and the ability to backup and recover my data easily.
|
||||
In other words, if *Ubuntu* doesn't work, *ZFS* is there and I can install whatever I like on top and if you are familiar with *ZFS* you know exactly what I mean and I have barely scratched the ice on its capabilities.
|
||||
|
||||
So here I was with *Ubuntu* installed on my laptop on root *ZFS*. So I had to do it.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# zpool status -v
|
||||
pool: bpool
|
||||
state: ONLINE
|
||||
status: The pool is formatted using a legacy on-disk format. The pool can
|
||||
still be used, but some features are unavailable.
|
||||
action: Upgrade the pool using 'zpool upgrade'. Once this is done, the
|
||||
pool will no longer be accessible on software that does not support
|
||||
feature flags.
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
bpool ONLINE 0 0 0
|
||||
nvme0n1p4 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
|
||||
pool: rpool
|
||||
state: ONLINE
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
rpool ONLINE 0 0 0
|
||||
nvme0n1p5 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
I have read somewhere in a blog about *Ubuntu* that I should not run an upgrade on the boot pool.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
and it's running on...
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# uname -s -v -i -o
|
||||
Linux #28-Ubuntu SMP Wed Dec 18 05:37:46 UTC 2019 x86_64 GNU/Linux
|
||||
#+END_EXAMPLE
|
||||
|
||||
Well that was pretty easy.
|
||||
|
||||
* ZFS Pools
|
||||
Let's take a look at how the installer has configured the /pools/.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# zpool list
|
||||
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
|
||||
bpool 1,88G 158M 1,72G - - - 8% 1.00x ONLINE -
|
||||
rpool 472G 7,91G 464G - - 0% 1% 1.00x ONLINE -
|
||||
#+END_EXAMPLE
|
||||
|
||||
So it creates a /boot/ pool and a /root/ pool. Maybe looking at the
|
||||
*datasets* would give us a better idea.
|
||||
|
||||
* ZFS Datasets
|
||||
Let's look at the sanitized version of the datasets.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# zfs list
|
||||
NAME USED AVAIL REFER MOUNTPOINT
|
||||
bpool 158M 1,60G 176K /boot
|
||||
bpool/BOOT 157M 1,60G 176K none
|
||||
bpool/BOOT/ubuntu_xxxxxx 157M 1,60G 157M /boot
|
||||
rpool 7,92G 449G 96K /
|
||||
rpool/ROOT 4,53G 449G 96K none
|
||||
rpool/ROOT/ubuntu_xxxxxx 4,53G 449G 3,37G /
|
||||
rpool/ROOT/ubuntu_xxxxxx/srv 96K 449G 96K /srv
|
||||
rpool/ROOT/ubuntu_xxxxxx/usr 208K 449G 96K /usr
|
||||
rpool/ROOT/ubuntu_xxxxxx/usr/local 112K 449G 112K /usr/local
|
||||
rpool/ROOT/ubuntu_xxxxxx/var 1,16G 449G 96K /var
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/games 96K 449G 96K /var/games
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib 1,15G 449G 1,04G /var/lib
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/AccountServices 96K 449G 96K /var/lib/AccountServices
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/NetworkManager 152K 449G 152K /var/lib/NetworkManager
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/apt 75,2M 449G 75,2M /var/lib/apt
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/dpkg 36,5M 449G 36,5M /var/lib/dpkg
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/log 11,0M 449G 11,0M /var/log
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/mail 96K 449G 96K /var/mail
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/snap 128K 449G 128K /var/snap
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/spool 112K 449G 112K /var/spool
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/www 96K 449G 96K /var/www
|
||||
rpool/USERDATA 3,38G 449G 96K /
|
||||
rpool/USERDATA/user_yyyyyy 3,37G 449G 3,37G /home/user
|
||||
rpool/USERDATA/root_yyyyyy 7,52M 449G 7,52M /root
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
The installer have created some random IDs that I have not figured out if they are totally random or mapped to something so I have sanitized them.
|
||||
I also sanitized the user, of course. ;)
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
It looks like the installer created a bunch of datasets with their respective mountpoints.
|
||||
|
||||
* ZFS Properties
|
||||
*ZFS* has a list of features and they are tunable in different ways, one of them is through the properties, let's have a look.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
# zfs get all rpool
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
rpool type filesystem -
|
||||
rpool creation vr jan 24 23:04 2020 -
|
||||
rpool used 7,91G -
|
||||
rpool available 449G -
|
||||
rpool referenced 96K -
|
||||
rpool compressratio 1.43x -
|
||||
rpool mounted no -
|
||||
rpool quota none default
|
||||
rpool reservation none default
|
||||
rpool recordsize 128K default
|
||||
rpool mountpoint / local
|
||||
...
|
||||
#+END_EXAMPLE
|
||||
|
||||
This gives us an idea on properties set on the dataset specified, in this case, the /rpool/ root dataset.
|
||||
|
||||
* Conclusion
|
||||
I read in a blog post that the *Ubuntu* team responsible for the *ZFS* support has followed all the *ZFS* best practices in the installer.
|
||||
I have no way of verifying that as I am not a *ZFS* expert but I'll be happy to take their word for it until I learn more.
|
||||
What is certain for now is that I am running on *ZFS*, and I will be enjoying its features to the fullest.
|
|
@ -1,173 +0,0 @@
|
|||
.. title: A quick ZFS overview on Linux
|
||||
.. date: 2020-01-27
|
||||
.. slug: a-quick-zfs-overview-on-linux
|
||||
.. updated: 2020-01-27
|
||||
.. status: published
|
||||
.. tags: misc, zfs, file system
|
||||
.. category: misc
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: A overview of Linux on ZFS root
|
||||
.. type: text
|
||||
|
||||
I have, for years, been interested in *file systems*. Specifically a *file system* to run my personal systems on.
|
||||
For most people **Ext4** is good enough and that is totally fine. But, as a power user, I like to have more control, more features and more options out of my file system.
|
||||
|
||||
I have played with most of file sytsems on Linux, and have been using **Btrfs** for a few years now. I have worked with NAS systems running on **ZFS** and have been very impressed by it.
|
||||
The only problem is that **ZFS** wasn't been well suppored on Linux at the time. **Btrfs** promissed to be the **ZFS** replacement for Linux nativetly, especially that it was backed up by a bunch of the giants like Oracle and RedHat.
|
||||
My decision at that point was made, and yes that was before RedHat's support for **XFS** which is impressive on its own.
|
||||
Recently though, a new project gave everyone hope. `OpenZFS <http://www.open-zfs.org/wiki/Main_Page>`_ came to life and so did `ZFS on Linux <https://zfsonlinux.org/>`_.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Linux has had **ZFS** support for a while now but mostly to manage a **ZFS** *file system*, so I kept watching until I saw a blog post by **Ubuntu** entitled `Enhancing our ZFS support on Ubuntu 19.10 – an introduction <https://ubuntu.com/blog/enhancing-our-zfs-support-on-ubuntu-19-10-an-introduction>`_.
|
||||
|
||||
In the blog post above, I read the following:
|
||||
|
||||
We want to support ZFS on root as an experimental installer option, initially for desktop,
|
||||
but keeping the layout extensible for server later on. The desktop will be the first
|
||||
beneficiary in Ubuntu 19.10. Note the use of the term ‘experimental’ though!
|
||||
|
||||
My eyes widened at this point. I know that **Ubuntu** has had native **ZFS** support since 2016 but now I could install it with one click. At that point I was all in, and I went back to **Ubuntu**.
|
||||
|
||||
|
||||
Ubuntu on root ZFS
|
||||
==================
|
||||
|
||||
You heard me right, the **Ubuntu** installer offers an 'experimental' install on **ZFS**. I made the decision based on the well tested stability of **ZFS** in production environments and its ability to offer me the flexibility and the ability to backup and recover my data easily.
|
||||
In other words, if **Ubuntu** doesn't work, **ZFS** is there and I can install whatever I like on top and if you are familiar with **ZFS** you know exactly what I mean and I have barely scratched the ice on its capabilities.
|
||||
|
||||
|
||||
So here I was with **Ubuntu** installed on my laptop on root **ZFS**. So I had to do it.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# zpool status -v
|
||||
pool: bpool
|
||||
state: ONLINE
|
||||
status: The pool is formatted using a legacy on-disk format. The pool can
|
||||
still be used, but some features are unavailable.
|
||||
action: Upgrade the pool using 'zpool upgrade'. Once this is done, the
|
||||
pool will no longer be accessible on software that does not support
|
||||
feature flags.
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
bpool ONLINE 0 0 0
|
||||
nvme0n1p4 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
|
||||
pool: rpool
|
||||
state: ONLINE
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
rpool ONLINE 0 0 0
|
||||
nvme0n1p5 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
|
||||
.. note::
|
||||
|
||||
I have read somewhere in a blog about **Ubuntu** that I should not run an upgrade on the boot pool.
|
||||
|
||||
and it's running on...
|
||||
|
||||
.. code:: text
|
||||
|
||||
# uname -s -v -i -o
|
||||
Linux #28-Ubuntu SMP Wed Dec 18 05:37:46 UTC 2019 x86_64 GNU/Linux
|
||||
|
||||
Well that was pretty easy.
|
||||
|
||||
|
||||
ZFS Pools
|
||||
=========
|
||||
|
||||
Let's take a look at how the installer has configured the *pools*.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# zpool list zo 23:21
|
||||
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
|
||||
bpool 1,88G 158M 1,72G - - - 8% 1.00x ONLINE -
|
||||
rpool 472G 7,91G 464G - - 0% 1% 1.00x ONLINE -
|
||||
|
||||
So it creates a *boot* pool and a *root* pool. Maybe looking at the **datasets** would give us a better idea.
|
||||
|
||||
|
||||
ZFS Datasets
|
||||
============
|
||||
|
||||
Let's look at the sanitized version of the datasets.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# zfs list
|
||||
NAME USED AVAIL REFER MOUNTPOINT
|
||||
bpool 158M 1,60G 176K /boot
|
||||
bpool/BOOT 157M 1,60G 176K none
|
||||
bpool/BOOT/ubuntu_xxxxxx 157M 1,60G 157M /boot
|
||||
rpool 7,92G 449G 96K /
|
||||
rpool/ROOT 4,53G 449G 96K none
|
||||
rpool/ROOT/ubuntu_xxxxxx 4,53G 449G 3,37G /
|
||||
rpool/ROOT/ubuntu_xxxxxx/srv 96K 449G 96K /srv
|
||||
rpool/ROOT/ubuntu_xxxxxx/usr 208K 449G 96K /usr
|
||||
rpool/ROOT/ubuntu_xxxxxx/usr/local 112K 449G 112K /usr/local
|
||||
rpool/ROOT/ubuntu_xxxxxx/var 1,16G 449G 96K /var
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/games 96K 449G 96K /var/games
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib 1,15G 449G 1,04G /var/lib
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/AccountServices 96K 449G 96K /var/lib/AccountServices
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/NetworkManager 152K 449G 152K /var/lib/NetworkManager
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/apt 75,2M 449G 75,2M /var/lib/apt
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/lib/dpkg 36,5M 449G 36,5M /var/lib/dpkg
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/log 11,0M 449G 11,0M /var/log
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/mail 96K 449G 96K /var/mail
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/snap 128K 449G 128K /var/snap
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/spool 112K 449G 112K /var/spool
|
||||
rpool/ROOT/ubuntu_xxxxxx/var/www 96K 449G 96K /var/www
|
||||
rpool/USERDATA 3,38G 449G 96K /
|
||||
rpool/USERDATA/user_yyyyyy 3,37G 449G 3,37G /home/user
|
||||
rpool/USERDATA/root_yyyyyy 7,52M 449G 7,52M /root
|
||||
|
||||
.. note::
|
||||
|
||||
The installer have created some random IDs that I have not figured out if they are totally random or mapped to something so I have sanitized them.
|
||||
I also sanitized the user, of course. ;)
|
||||
|
||||
It looks like the installer created a bunch of datasets with their respective mountpoints.
|
||||
|
||||
|
||||
ZFS Properties
|
||||
==============
|
||||
|
||||
**ZFS** has a list of features and they are tunable in different ways, one of them is through the properties, let's have a look.
|
||||
|
||||
.. code:: text
|
||||
|
||||
# zfs get all rpool
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
rpool type filesystem -
|
||||
rpool creation vr jan 24 23:04 2020 -
|
||||
rpool used 7,91G -
|
||||
rpool available 449G -
|
||||
rpool referenced 96K -
|
||||
rpool compressratio 1.43x -
|
||||
rpool mounted no -
|
||||
rpool quota none default
|
||||
rpool reservation none default
|
||||
rpool recordsize 128K default
|
||||
rpool mountpoint / local
|
||||
...
|
||||
|
||||
This gives us an idea on properties set on the dataset specified, in this case, the *rpool* root dataset.
|
||||
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
I read in a blog post that the **Ubuntu** team responsible for the **ZFS** support has followed all the **ZFS** best practices in the installer.
|
||||
I have no way of verifying that as I am not a **ZFS** expert but I'll be happy to take their word for it until I learn more.
|
||||
What is certain for now is that I am running on **ZFS**, and I will be enjoying its features to the fullest.
|
|
@ -0,0 +1,112 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: My path down the road of cloudflare's redirect loop
|
||||
.. date: 2020-01-27 22:00:00
|
||||
.. slug: my_path_down_the_road_of_cloudflare_s_redirect_loop
|
||||
.. updated: 2020-01-27 22:00:00
|
||||
.. status: published
|
||||
.. tags: misc, cloudflare, cdn
|
||||
.. category: misc
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: I have had issues with cloudflare's CDN causing redirect loop errors, here's how I solved it.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I have used *Cloudflare* as my /DNS manager/ for years, specifically because it offers *API* that works with *certbot*.
|
||||
This setup has worked very well for me so far.
|
||||
The only thing that kept bothering me is that every time I turn on the /CDN/ capability on my *Cloudflare* , I get a loor error.
|
||||
That's weird.
|
||||
|
||||
* Setup
|
||||
Let's talk about my setup for a little bit.
|
||||
I use *certbot* to generate and maintain my fleet of certificates.
|
||||
I use *Nginx* as a web-server.
|
||||
|
||||
Let's say I want to host a static content off of my server.
|
||||
My *nginx* configuration would look something like the following.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name server.example.com;
|
||||
|
||||
ssl_certificate /path/to/the/fullchain.pem;
|
||||
ssl_certificate_key /path/to/the/privkey.pem;
|
||||
|
||||
root /path/to/data/root/;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
}
|
||||
#+END_EXAMPLE
|
||||
|
||||
This is a static site, of course.
|
||||
Now you may ask about /non-SSL/.
|
||||
Well, I don't do /non-SSL/.
|
||||
In other words, I have something like this in my config.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
#+END_EXAMPLE
|
||||
|
||||
So, all /http/ traffic gets redirected to /https/.
|
||||
|
||||
* Problem
|
||||
Considering the regular setup above, once I enable the "proxy" feature of *Cloudflare* I get the following error.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/my-path-down-the-road-of-cloudflare-s-redirect-loop/too-many-redirects.png" alt="Too Many Redirects Error" algin="center">
|
||||
<img src="/images/my-path-down-the-road-of-cloudflare-s-redirect-loop/too-many-redirects.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
|
||||
|
||||
That baffled me for a bit.
|
||||
There is no reason for this to happen.
|
||||
I decided to dig deeper.
|
||||
|
||||
* Solution
|
||||
As I was digging through the *Cloudflare* configuration, I stumbled upon this page.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/my-path-down-the-road-of-cloudflare-s-redirect-loop/flexible-encryption.png" alt="Flexible Encryption" algin="center">
|
||||
<img src="/images/my-path-down-the-road-of-cloudflare-s-redirect-loop/flexible-encryption.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
This is interesting.
|
||||
It says that the connection is encrypted between the broswer and *Cloudflare*.
|
||||
Does that mean that between *Cloudflare* and my server, the connection is unencrypted ?
|
||||
|
||||
If that's the case, it means that the request coming from *Cloudflare* to my server is coming on /http/.
|
||||
If it is coming on /http/, it is getting redirected to /https/ which goes back to *Cloudflare* and so on.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
THIS IS IT ! I FOUND MY ANSWER...
|
||||
#+END_EXAMPLE
|
||||
|
||||
Alright, let's move this to what they call "Full Encryption", which calls my server on /https/ as it should.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href=" /images/my-path-down-the-road-of-cloudflare-s-redirect-loop/full-encryption.png" alt="Full Encryption" algin="center">
|
||||
<img src="/images/my-path-down-the-road-of-cloudflare-s-redirect-loop/full-encryption.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
After this change, all the errors cleared up and got my blog up and
|
||||
running again.
|
|
@ -1,104 +0,0 @@
|
|||
.. title: My path down the road of cloudflare's redirect loop
|
||||
.. date: 2020-01-27 22:00:00
|
||||
.. slug: my_path_down_the_road_of_cloudflare_s_redirect_loop
|
||||
.. updated: 2020-01-27 22:00:00
|
||||
.. status: published
|
||||
.. tags: misc, cloudflare, cdn
|
||||
.. category: misc
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: I have had issues with cloudflare's CDN causing redirect loop errors, here's how I solved it.
|
||||
.. type: text
|
||||
|
||||
I have used **Cloudflare** as my *DNS manager* for years, specifically because it offers **API** that works with **certbot**. This setup has worked very well for me so far.
|
||||
The only thing that kept bothering me is that every time I turn on the *CDN* capability on my **Cloudflare** , I get a loor error. That's weird.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Setup
|
||||
=====
|
||||
|
||||
Let's talk about my setup for a little bit. I use **certbot** to generate and maintain my fleet of certificates.
|
||||
I use **Nginx** as a web-server.
|
||||
|
||||
Let's say I want to host a static content off of my server. My **nginx** configuration would look something like the following.
|
||||
|
||||
.. code:: text
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name server.example.com;
|
||||
|
||||
ssl_certificate /path/to/the/fullchain.pem;
|
||||
ssl_certificate_key /path/to/the/privkey.pem;
|
||||
|
||||
root /path/to/data/root/;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
This is a static site, of course. Now you may ask about *non-SSL*. Well, I don't do *non-SSL*.
|
||||
In other words, I have something like this in my config.
|
||||
|
||||
.. code:: text
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
So, all *http* traffic gets redirected to *https*.
|
||||
|
||||
|
||||
Problem
|
||||
=======
|
||||
|
||||
Considering the regular setup above, once I enable the "proxy" feature of **Cloudflare** I get the following error.
|
||||
|
||||
.. thumbnail:: /images/my_path_down_the_road_of_cloudflare_s_redirect_loop/too_many_redirects.png
|
||||
:align: center
|
||||
:alt: Too Many Redirects Error
|
||||
|
||||
|
||||
That baffled me for a bit. There is no reason for this to happen. I decided to dig deeper.
|
||||
|
||||
|
||||
Solution
|
||||
========
|
||||
|
||||
As I was digging through the **Cloudflare** configuration, I stumbled upon this page.
|
||||
|
||||
|
||||
.. thumbnail:: /images/my_path_down_the_road_of_cloudflare_s_redirect_loop/flexible_encryption.png
|
||||
:align: center
|
||||
:alt: Flexible Encryption
|
||||
|
||||
|
||||
This is interesting. It says that the connection is encrypted between the broswer and **Cloudflare**.
|
||||
Does that mean that between **Cloudflare** and my server, the connection is unencrypted ?
|
||||
|
||||
If that's the case, it means that the request coming from **Cloudflare** to my server is coming on *http*.
|
||||
If it is coming on *http*, it is getting redirected to *https* which goes back to **Cloudflare** and so on.
|
||||
|
||||
::
|
||||
|
||||
THIS IS IT ! I FOUND MY ANSWER...
|
||||
|
||||
|
||||
Alright, let's move this to what they call "Full Encryption", which calls my server on *https* as it should.
|
||||
|
||||
|
||||
.. thumbnail:: /images/my_path_down_the_road_of_cloudflare_s_redirect_loop/full_encryption.png
|
||||
:align: center
|
||||
:alt: Full Encryption
|
||||
|
||||
|
||||
After this change, all the errors cleared up and got my blog up and running again.
|
152
posts/misc/the-story-behind-cmw.org
Normal file
|
@ -0,0 +1,152 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: The story behind cmw
|
||||
.. date: 2019-08-31
|
||||
.. slug: the-story-behind-cmw
|
||||
.. updated: 2019-08-31
|
||||
.. status: published
|
||||
.. tags: misc, python, development
|
||||
.. category: misc
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: The story behind cmw, the command line weather application written in python.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
A few days ago, [[https://kushaldas.in][Kushal Das]] shared a curl command.
|
||||
|
||||
The command was as follows:
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ curl https://wttr.in/
|
||||
#+END_EXAMPLE
|
||||
|
||||
I, obviously, was curious.
|
||||
I ran it and it was interesting.
|
||||
So it returns the weather right ? Pretty cool huh!
|
||||
|
||||
* The interest
|
||||
That got me interested to learn how does this work exactly.
|
||||
|
||||
* The investigation
|
||||
I looked at [[https://wttr.in/][https://wttr.in/]] and it seemed to have a GitHub [[https://github.com/chubin/wttr.in][link]] and a repository.
|
||||
That is very interesting.
|
||||
This is a Python application, one can tell by the code or if you prefer the GitHub bar at the top.
|
||||
|
||||
Anyway, one can also tell that this is a [[https://palletsprojects.com/p/flask/][Flask]] application from the following code in the bin/srv.py.
|
||||
|
||||
#+BEGIN_SRC python
|
||||
from flask import Flask, request, send_from_directory
|
||||
APP = Flask(__name__)
|
||||
#+END_SRC
|
||||
|
||||
By reading the README.md of the repository one can read.
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
wttr.in uses [[http://github.com/schachmat/wego][wego]] for
|
||||
visualization and various data sources for weather forecast
|
||||
information.
|
||||
#+END_QUOTE
|
||||
|
||||
Let's jump to the /wego/ repository then.
|
||||
|
||||
/wego/ seems to be a command line application to graph the weather in the terminal.
|
||||
|
||||
Great, so what I did with [[https://gitlab.com/elazkani/cmw][cmw]] is already done in Go and API'fied by a different project.
|
||||
|
||||
My answer to that accusation is obviously this post.
|
||||
|
||||
* The idea
|
||||
I played a bit more with [[https://wttr.in/][https://wttr.in/]] and I found it to an interesting API.
|
||||
I am trying to work on my python development foo so to me that was a perfect little project to work on.
|
||||
From my perspective this was simply an API and I am to consume it to put it back in my terminal.
|
||||
|
||||
* The work
|
||||
The beginning work was very rough and hidden away in a private repository and was moved later [[https://gitlab.com/elazkani/cmw][here]].
|
||||
The only thing left from that work is the =--format= argument which allows you full control over what gets sent.
|
||||
But again, let's not forget what the real purpose of this project was.
|
||||
So I decided to make the whole API as accessible as possible from the command line tool I am writing.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ cmw --help
|
||||
usage: cmw [-h] [-L LOCATION] [-f FORMAT] [-l LANG] [-m] [-u] [-M] [-z] [-o]
|
||||
[-w] [-A] [-F] [-n] [-q] [-Q] [-N] [-P] [-p] [-T] [-t TRANSPARENCY]
|
||||
[--v2] [--version]
|
||||
|
||||
Get the weather!
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-L LOCATION, --location LOCATION
|
||||
Location (look at epilog for more information)
|
||||
-f FORMAT, --format FORMAT
|
||||
Query formatting
|
||||
-l LANG, --lang LANG The language to use
|
||||
-m, --metric Units: Metric (SI) (default outside US)
|
||||
-u, --uscs Units: USCS (default in US)
|
||||
-M, --meter-second Units: Show wind speed in m/s
|
||||
-z, --zero View: Only current weather
|
||||
-o, --one View: Current weather & one day
|
||||
-w, --two View: Current weather & two days
|
||||
-A, --ignore-user-agent
|
||||
View: Force ANSI output format
|
||||
-F, --follow-link View: Show the 'Follow' line from upstream
|
||||
-n, --narrow View: Narrow version
|
||||
-q, --quiet View: Quiet version
|
||||
-Q, --super-quiet View: Super quiet version
|
||||
-N, --no-colors View: Switch terminal sequences off
|
||||
-P, --png PNG: Generate PNG file
|
||||
-p, --add-frame PNG: Add frame around output
|
||||
-T, --mid-transparency
|
||||
PNG: Make transparency 150
|
||||
-t TRANSPARENCY, --transparency TRANSPARENCY
|
||||
PNG: Set transparency between 0 and 255
|
||||
--v2 v2 interface of the day
|
||||
--version show program's version number and exit
|
||||
|
||||
Supported Location Types
|
||||
------------------------
|
||||
City name: Paris
|
||||
Unicode name: Москва
|
||||
Airport code (3 letters): muc
|
||||
Domain name: @stackoverflow.com
|
||||
Area code: 94107
|
||||
GPS coordinates: -78.46,106.79
|
||||
|
||||
Special Location
|
||||
----------------
|
||||
Moon phase (add ,+US
|
||||
or ,+France
|
||||
for these cities): moon
|
||||
Moon phase for a date: moon@2016-10-25
|
||||
|
||||
Supported languages
|
||||
-------------------
|
||||
|
||||
Supported: af da de el et fr fa hu id it nb nl pl pt-br ro ru tr uk vi
|
||||
#+END_EXAMPLE
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ cmw --location London --lang nl --one
|
||||
Weerbericht voor: London
|
||||
|
||||
\ / Zonnig
|
||||
.-. 20 °C
|
||||
― ( ) ― → 19 km/h
|
||||
`-’ 10 km
|
||||
/ \ 0.0 mm
|
||||
┌─────────────┐
|
||||
┌──────────────────────────────┬───────────────────────┤ za 31 aug ├───────────────────────┬──────────────────────────────┐
|
||||
│ 's Ochtends │ 's Middags └──────┬──────┘ 's Avonds │ 's Nachts │
|
||||
├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤
|
||||
│ \ / Gedeeltelijk b…│ \ / Gedeeltelijk b…│ Bewolkt │ \ / Gedeeltelijk b…│
|
||||
│ _ /"".-. 21 °C │ _ /"".-. 23..24 °C │ .--. 20 °C │ _ /"".-. 18 °C │
|
||||
│ \_( ). ↗ 12-14 km/h │ \_( ). ↗ 18-20 km/h │ .-( ). ↗ 20-25 km/h │ \_( ). → 16-19 km/h │
|
||||
│ /(___(__) 10 km │ /(___(__) 10 km │ (___.__)__) 10 km │ /(___(__) 10 km │
|
||||
│ 0.0 mm | 0% │ 0.0 mm | 0% │ 0.0 mm | 0% │ 0.0 mm | 0% │
|
||||
└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘
|
||||
Locatie: London [51.509648,-0.099076]
|
||||
#+END_EXAMPLE
|
||||
|
||||
* Conclusion
|
||||
All I got to say in conclusion is that it was a lot of fun working on [[https://gitlab.com/elazkani/cmw][cmw]] and I learned a lot.
|
||||
I'm not going to publish the package on [[https://pypi.org/][PyPI]] because seriously, what's the point.
|
||||
But if you are interested in making changes to the repository, make an MR.
|
|
@ -1,159 +0,0 @@
|
|||
.. title: The story behind cmw
|
||||
.. date: 2019-08-31
|
||||
.. slug: the-story-behind-cmw
|
||||
.. updated: 2019-08-31
|
||||
.. status: published
|
||||
.. tags: misc, python, development
|
||||
.. category: misc
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: The story behind cmw, the command line weather application written in python.
|
||||
.. type: text
|
||||
|
||||
A few days ago, `Kushal Das <https://kushaldas.in>`_ shared a `curl` command.
|
||||
|
||||
The command was as follows:
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ curl https://wttr.in/
|
||||
|
||||
I, obviously, was curious.
|
||||
I ran it and it was interesting.
|
||||
So it returns the weather right ? Pretty cool huh!
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
The interest
|
||||
============
|
||||
|
||||
That got me interested to learn how does this work exactly.
|
||||
|
||||
|
||||
The investigation
|
||||
=================
|
||||
|
||||
I looked at `https://wttr.in/ <https://wttr.in/>`_ and it seemed to have a GitHub `link <https://github.com/chubin/wttr.in>`_ and a repository.
|
||||
That is very interesting.
|
||||
This is a Python application, one can tell by the code or if you prefer the GitHub bar at the top.
|
||||
|
||||
Anyway, one can also tell that this is a `Flask <https://palletsprojects.com/p/flask/>`_ application from the following code in the `bin/srv.py`.
|
||||
|
||||
.. code:: python
|
||||
|
||||
from flask import Flask, request, send_from_directory
|
||||
APP = Flask(__name__)
|
||||
|
||||
By reading the `README.md` of the repository one can read.
|
||||
|
||||
wttr.in uses `wego <http://github.com/schachmat/wego>`_ for visualization and various data sources for weather forecast information.
|
||||
|
||||
Let's jump to the ``wego`` repository then.
|
||||
|
||||
``wego`` seems to be a command line application to graph the weather in the terminal.
|
||||
|
||||
Great, so what I did with `cmw <https://gitlab.com/elazkani/cmw>`_ is already done in Go and API'fied by a different project.
|
||||
|
||||
My answer to that accusation is obviously this post.
|
||||
|
||||
The idea
|
||||
========
|
||||
|
||||
I played a bit more with `https://wttr.in/ <https://wttr.in/>`_ and I found it to an interesting API.
|
||||
I am trying to work on my python development foo so to me that was a perfect little project to work on.
|
||||
From my perspective this was simply an API and I am to consume it to put it back in my terminal.
|
||||
|
||||
The work
|
||||
========
|
||||
|
||||
The beginning work was very rough and hidden away in a private repository and was moved later `here <https://gitlab.com/elazkani/cmw>`_.
|
||||
The only thing left from that work is the `--format` argument which allows you full control over what gets sent.
|
||||
But again, let's not forget what the real purpose of this project was.
|
||||
So I decided to make the whole API as accessible as possible from the command line tool I am writing.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ cmw --help
|
||||
usage: cmw [-h] [-L LOCATION] [-f FORMAT] [-l LANG] [-m] [-u] [-M] [-z] [-o]
|
||||
[-w] [-A] [-F] [-n] [-q] [-Q] [-N] [-P] [-p] [-T] [-t TRANSPARENCY]
|
||||
[--v2] [--version]
|
||||
|
||||
Get the weather!
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-L LOCATION, --location LOCATION
|
||||
Location (look at epilog for more information)
|
||||
-f FORMAT, --format FORMAT
|
||||
Query formatting
|
||||
-l LANG, --lang LANG The language to use
|
||||
-m, --metric Units: Metric (SI) (default outside US)
|
||||
-u, --uscs Units: USCS (default in US)
|
||||
-M, --meter-second Units: Show wind speed in m/s
|
||||
-z, --zero View: Only current weather
|
||||
-o, --one View: Current weather & one day
|
||||
-w, --two View: Current weather & two days
|
||||
-A, --ignore-user-agent
|
||||
View: Force ANSI output format
|
||||
-F, --follow-link View: Show the 'Follow' line from upstream
|
||||
-n, --narrow View: Narrow version
|
||||
-q, --quiet View: Quiet version
|
||||
-Q, --super-quiet View: Super quiet version
|
||||
-N, --no-colors View: Switch terminal sequences off
|
||||
-P, --png PNG: Generate PNG file
|
||||
-p, --add-frame PNG: Add frame around output
|
||||
-T, --mid-transparency
|
||||
PNG: Make transparency 150
|
||||
-t TRANSPARENCY, --transparency TRANSPARENCY
|
||||
PNG: Set transparency between 0 and 255
|
||||
--v2 v2 interface of the day
|
||||
--version show program's version number and exit
|
||||
|
||||
Supported Location Types
|
||||
------------------------
|
||||
City name: Paris
|
||||
Unicode name: Москва
|
||||
Airport code (3 letters): muc
|
||||
Domain name: @stackoverflow.com
|
||||
Area code: 94107
|
||||
GPS coordinates: -78.46,106.79
|
||||
|
||||
Special Location
|
||||
----------------
|
||||
Moon phase (add ,+US
|
||||
or ,+France
|
||||
for these cities): moon
|
||||
Moon phase for a date: moon@2016-10-25
|
||||
|
||||
Supported languages
|
||||
-------------------
|
||||
|
||||
Supported: af da de el et fr fa hu id it nb nl pl pt-br ro ru tr uk vi
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ cmw --location London --lang nl --one
|
||||
Weerbericht voor: London
|
||||
|
||||
\ / Zonnig
|
||||
.-. 20 °C
|
||||
― ( ) ― → 19 km/h
|
||||
`-’ 10 km
|
||||
/ \ 0.0 mm
|
||||
┌─────────────┐
|
||||
┌──────────────────────────────┬───────────────────────┤ za 31 aug ├───────────────────────┬──────────────────────────────┐
|
||||
│ 's Ochtends │ 's Middags └──────┬──────┘ 's Avonds │ 's Nachts │
|
||||
├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤
|
||||
│ \ / Gedeeltelijk b…│ \ / Gedeeltelijk b…│ Bewolkt │ \ / Gedeeltelijk b…│
|
||||
│ _ /"".-. 21 °C │ _ /"".-. 23..24 °C │ .--. 20 °C │ _ /"".-. 18 °C │
|
||||
│ \_( ). ↗ 12-14 km/h │ \_( ). ↗ 18-20 km/h │ .-( ). ↗ 20-25 km/h │ \_( ). → 16-19 km/h │
|
||||
│ /(___(__) 10 km │ /(___(__) 10 km │ (___.__)__) 10 km │ /(___(__) 10 km │
|
||||
│ 0.0 mm | 0% │ 0.0 mm | 0% │ 0.0 mm | 0% │ 0.0 mm | 0% │
|
||||
└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘
|
||||
Locatie: London [51.509648,-0.099076]
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
All I got to say in conclusion is that it was a lot of fun working on `cmw <https://gitlab.com/elazkani/cmw>`_ and I learned a lot.
|
||||
I'm not going to publish the package on `PyPI <https://pypi.org/>`_ because seriously, what's the point.
|
||||
But if you are interested in making changes to the repository, make an MR.
|
|
@ -0,0 +1,126 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Building up simple monitoring on Healthchecks
|
||||
.. date: 2020-02-11
|
||||
.. slug: building-up-simple-monitoring-on-healthchecks
|
||||
.. updated: 2020-02-11
|
||||
.. status: published
|
||||
.. tags: monitoring, healthchecks, cron, curl
|
||||
.. category: monitoring
|
||||
.. authors: Elia el Lazkani
|
||||
.. description:
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I talked previously in "{{% doc %}}simple-cron-monitoring-with-healthchecks{{% /doc %}}" about deploying my own simple monitoring system.
|
||||
|
||||
Now that it's up, I'm only using it for my backups. That's a good use, for sure, but I know I can do better.
|
||||
|
||||
So I went digging.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Introduction
|
||||
I host a list of services, some are public like my blog while others private.
|
||||
These services are not critical, some can be down for short periods of time.
|
||||
Some services might even be down for longer periods without causing any loss in functionality.
|
||||
|
||||
That being said, I'm a /DevOps engineer/. That means, I need to know.
|
||||
|
||||
Yea, it doesn't mean I'll do something about it right away, but I'd like to be in the know.
|
||||
|
||||
Which got me thinking...
|
||||
|
||||
* Healthchecks Endpoints
|
||||
Watching *borg* use its /healthchecks/ hook opened my eyes on another functionality of *Healthchecks*.
|
||||
|
||||
It seems that if you ping
|
||||
#+BEGIN_EXAMPLE
|
||||
https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219/start
|
||||
#+END_EXAMPLE
|
||||
|
||||
It will start a counter that will measure the time until you ping
|
||||
#+BEGIN_EXAMPLE
|
||||
https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
#+END_EXAMPLE
|
||||
|
||||
This way, you can find out how long it is taking you to check on the status of a service. Or maybe, how long a service is taking to backup.
|
||||
|
||||
It turns out that /healthchecks/ also offers a different endpoint to ping. You can report a failure straight away by pinging
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219/fail
|
||||
#+END_EXAMPLE
|
||||
|
||||
This way, you do not have to wait until the time expires before you get notified of a failure.
|
||||
|
||||
With those pieces of knowledge, we can do a lot.
|
||||
|
||||
* A lot ?
|
||||
Yes, a lot...
|
||||
|
||||
Let's put what we have learned so far into action.
|
||||
|
||||
#+BEGIN_SRC sh :noeval
|
||||
#!/bin/bash
|
||||
|
||||
WEB_HOST=$1
|
||||
CHECK_ID=$2
|
||||
|
||||
HEALTHCHECKS_HOST="https://healthchecks.example.com/ping"
|
||||
|
||||
curl -fsS --retry 3 "${HEALTHCHECKS_HOST}/${CHECK_ID}/start" > /dev/null
|
||||
|
||||
OUTPUT=`curl -sS "${WEB_HOST}"`
|
||||
STATUS=$?
|
||||
|
||||
if [[ $STATUS -eq 0 ]]; then
|
||||
curl -fsS --retry 3 "${HEALTHCHECKS_HOST}/${CHECK_ID}" > /dev/null
|
||||
else
|
||||
curl -fsS --retry 3 "${HEALTHCHECKS_HOST}/${CHECK_ID}/fail" > /dev/null
|
||||
fi
|
||||
#+END_SRC
|
||||
|
||||
We start by defining a few variables for the website hostname to monitor, the check ID provided by /healthchecks/ and finally the /healthchecks/ base link for the monitors.
|
||||
|
||||
Once those are set, we simply use =curl= with a couple of special flags to make sure that it fails properly if something goes wrong.
|
||||
|
||||
We start the /healthchecks/ timer, run the website check and either call the passing or the failing /healthchecks/ endpoint depending on the outcomes.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ chmod +x https_healthchecks_monitor.sh
|
||||
$ ./https_healthchecks_monitor.sh https://healthchecks.example.com 84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
#+END_EXAMPLE
|
||||
|
||||
Test it out.
|
||||
|
||||
* Okay, that's nice but now what !
|
||||
Now, let's hook it up to our cron.
|
||||
|
||||
Start with =crontab -e= which should open your favorite text editor.
|
||||
|
||||
Then create a cron entry (a new line) like the following:
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
*/15 * * * * /path/to/https_healthchecks_monitor.sh https://healthchecks.example.com 84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
#+END_EXAMPLE
|
||||
|
||||
This will run the script every 15 minutes. Make sure that your timeout is 15 minutes for this check, with a grace period of 5 minutes.
|
||||
That configuration will guarantee that you will get notified 20 minutes after any failure, at the worst.
|
||||
|
||||
Be aware, I said any failure.
|
||||
Getting notified does not guarantee that your website is down.
|
||||
It can only guarantee that /healthchecks/ wasn't pinged on time.
|
||||
|
||||
Getting notified covers a bunch of cases. Some of them are:
|
||||
- The server running the cron is down
|
||||
- The cron services is not running
|
||||
- The server running the cron lost internet access
|
||||
- Your certificate expired
|
||||
- Your website is down
|
||||
|
||||
You can create checks to cover most of these if you care to make it a full monitoring system.
|
||||
If you want to go that far, maybe you should invest in a monitoring system with more features.
|
||||
|
||||
* Conclusion
|
||||
Don't judge something by its simplicity. Somethings, out of simple components tied together you can make something interesting and useful.
|
||||
With a little of scripting, couple of commands and the power of cron we were able to make /healthchecks/ monitor our websites.
|
|
@ -1,129 +0,0 @@
|
|||
.. title: Building up simple monitoring on Healthchecks
|
||||
.. date: 2020-02-11
|
||||
.. slug: building-up-simple-monitoring-on-healthchecks
|
||||
.. updated: 2020-02-11
|
||||
.. status: published
|
||||
.. tags: monitoring, healthchecks, cron, curl
|
||||
.. category: monitoring
|
||||
.. authors: Elia el Lazkani
|
||||
.. description:
|
||||
.. type: text
|
||||
|
||||
I talked :doc:`previously <simple-cron-monitoring-with-healthchecks>` about deploying my own simple monitoring system.
|
||||
|
||||
Now that it's up, I'm only using it for my backups. That's a good use, for sure, but I know I can do better.
|
||||
|
||||
So I went digging.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
I host a list of services, some are public like my blog while others private.
|
||||
These services are not critical, some can be down for short periods of time.
|
||||
Some services might even be down for longer periods without causing any loss in functionality.
|
||||
|
||||
That being said, I'm a *DevOps engineer*. That means, I need to know.
|
||||
|
||||
Yea, it doesn't mean I'll do something about it right away, but I'd like to be in the know.
|
||||
|
||||
Which got me thinking...
|
||||
|
||||
|
||||
Healthchecks Endpoints
|
||||
======================
|
||||
|
||||
Watching **borg** use its *healthchecks* hook opened my eyes on another functionality of **Healthchecks**.
|
||||
|
||||
It seems that if you ping ``https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219/start``,
|
||||
it will start a counter that will measure the time until you ping ``https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219``.
|
||||
This way, you can find out how long it is taking you to check on the status of a service. Or maybe, how long a service is taking to backup.
|
||||
|
||||
It turns out that *healthchecks* also offers a different endpoint to ping.
|
||||
You can report a failure straight away by pinging ``https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219/fail``.
|
||||
This way, you do not have to wait until the time expires before you get notified of a failure.
|
||||
|
||||
With those pieces of knowledge, we can do a lot.
|
||||
|
||||
|
||||
A lot ?
|
||||
=======
|
||||
|
||||
Yes, a lot...
|
||||
|
||||
Let's put what we have learned so far into action.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
WEB_HOST=$1
|
||||
CHECK_ID=$2
|
||||
|
||||
HEALTHCHECKS_HOST="https://healthchecks.example.com/ping"
|
||||
|
||||
curl -fsS --retry 3 "${HEALTHCHECKS_HOST}/${CHECK_ID}/start" > /dev/null
|
||||
|
||||
OUTPUT=`curl -sS "${WEB_HOST}"`
|
||||
STATUS=$?
|
||||
|
||||
if [[ $STATUS -eq 0 ]]; then
|
||||
curl -fsS --retry 3 "${HEALTHCHECKS_HOST}/${CHECK_ID}" > /dev/null
|
||||
else
|
||||
curl -fsS --retry 3 "${HEALTHCHECKS_HOST}/${CHECK_ID}/fail" > /dev/null
|
||||
fi
|
||||
|
||||
|
||||
We start by defining a few variables for the website hostname to monitor, the check ID provided by *healthchecks* and finally
|
||||
the *healthchecks* base link for the monitors.
|
||||
|
||||
Once those are set, we simply use ``curl`` with a couple of special flags to make sure that it fails properly if something goes wrong.
|
||||
|
||||
We start the *healthchecks* timer, run the website check and either call the passing or the failing *healthchecks* endpoint depending on the outcomes.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ chmod +x https_healthchecks_monitor.sh
|
||||
$ ./https_healthchecks_monitor.sh https://healthchecks.example.com 84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
|
||||
Test it out.
|
||||
|
||||
|
||||
Okay, that's nice but now what !
|
||||
================================
|
||||
|
||||
Now, let's hook it up to our cron.
|
||||
|
||||
Start with ``crontab -e`` which should open your favorite text editor.
|
||||
|
||||
Then create a cron entry (a new line) like the following:
|
||||
|
||||
.. code:: text
|
||||
|
||||
*/15 * * * * /path/to/https_healthchecks_monitor.sh https://healthchecks.example.com 84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
|
||||
This will run the script every 15 minutes. Make sure that your timeout is 15 minutes for this check, with a grace period of 5 minutes.
|
||||
That configuration will guarantee that you will get notified 20 minutes after any failure, at the worst.
|
||||
|
||||
Be aware, I said any failure.
|
||||
Getting notified does not guarantee that your website is down.
|
||||
It can only guarantee that *healthchecks* wasn't pinged on time.
|
||||
|
||||
Getting notified covers a bunch of cases. Some of them are:
|
||||
* The server running the cron is down
|
||||
* The cron services is not running
|
||||
* The server running the cron lost internet access
|
||||
* Your certificate expired
|
||||
* Your website is down
|
||||
|
||||
You can create checks to cover most of these if you care to make it a full monitoring system.
|
||||
If you want to go that far, maybe you should invest in a monitoring system with more features.
|
||||
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
Don't judge something by its simplicity. Somethings, out of simple components tied together you can make something interesting and useful.
|
||||
With a little of scripting, couple of commands and the power of cron we were able to make *healthchecks* monitor our websites.
|
154
posts/monitoring/simple-cron-monitoring-with-healthchecks.org
Normal file
|
@ -0,0 +1,154 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Simple cron monitoring with HealthChecks
|
||||
.. date: 2020-02-09
|
||||
.. slug: simple-cron-monitoring-with-healthchecks
|
||||
.. updated: 2020-02-09
|
||||
.. status: published
|
||||
.. tags: monitoring, healthchecks, cron
|
||||
.. category: monitoring
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Ever needed to monitor simple things ? Well, HealthChecks seems perfect for that.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In a previous post entitled "{{% doc %}}automating-borg{{% /doc %}}", I showed you how you can automate your *borg* backups with *borgmatic*.
|
||||
|
||||
After I started using *borgmatic* for my backups and hooked it to a /cron/ running every 2 hours, I got interested into knowing what's happening to my backups at all times.
|
||||
|
||||
My experience comes handy in here, I know I need a monitoring system. I also know that traditional monitoring systems are too complex for my use case.
|
||||
|
||||
I need something simple. I need something I can deploy myself.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Choosing a monitoring system
|
||||
I already know I don't want a traditional monitoring system like /nagios/ or /sensu/ or /prometheus/. It is not needed, it's an overkill.
|
||||
|
||||
I went through the list of hooks that *borgmatic* offers out of the box and checked each project.
|
||||
|
||||
I came across [[https://healthchecks.io/][HealthChecks]].
|
||||
|
||||
* HealthChecks
|
||||
The [[https://healthchecks.io/][HealthChecks]] project works in a simple manner.
|
||||
It simply offers syou an endpoint which you need to ping within a certain period, otherwise you get paged.
|
||||
|
||||
It has a lot of integrations from simple emails to other third party services that will call or message you or even trigger push notifications to your phone.
|
||||
|
||||
In my case, a simple email is enough. After all, they are simply backups and if they failed now, they will work when cron runs again in 2 hours.
|
||||
|
||||
* Deploy
|
||||
Let's create a docker-compose service configuration that looks like the
|
||||
following:
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
healthchecks:
|
||||
container_name: healthchecks
|
||||
image: linuxserver/healthchecks:v1.12.0-ls48
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:8000:8000"
|
||||
volumes:
|
||||
- "./healthchecks/data:/config"
|
||||
environment:
|
||||
PUID: "5000"
|
||||
PGID: "5000"
|
||||
SECRET_KEY: "super-secret-key"
|
||||
ALLOWED_HOSTS: '["*"]'
|
||||
DEBUG: "False"
|
||||
DEFAULT_FROM_EMAIL: "noreply@healthchecks.example.com"
|
||||
USE_PAYMENTS: "False"
|
||||
REGISTRATION_OPEN: "False"
|
||||
EMAIL_HOST: "smtp.example.com"
|
||||
EMAIL_PORT: "587"
|
||||
EMAIL_HOST_USER: "smtp@healthchecks.example.com"
|
||||
EMAIL_HOST_PASSWORD: "super-secret-password"
|
||||
EMAIL_USE_TLS: "True"
|
||||
SITE_ROOT: "https://healthchecks.example.com"
|
||||
SITE_NAME: "HealthChecks"
|
||||
MASTER_BADGE_LABEL: "HealthChecks"
|
||||
PING_ENDPOINT: "https://healthchecks.example.com/ping/"
|
||||
PING_EMAIL_DOMAIN: "healthchecks.example.com"
|
||||
TWILIO_ACCOUNT: "None"
|
||||
TWILIO_AUTH: "None"
|
||||
TWILIO_FROM: "None"
|
||||
PD_VENDOR_KEY: "None"
|
||||
TRELLO_APP_KEY: "None"
|
||||
#+END_SRC
|
||||
|
||||
This will create a docker container exposing it locally on =127.0.0.1:8000=.
|
||||
Let's point nginx to it and expose it using something similar to the following.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name healthchecks.example.com;
|
||||
|
||||
ssl_certificate /path/to/the/fullchain.pem;
|
||||
ssl_certificate_key /path/to/the/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
|
||||
add_header X-Frame-Options SAMEORIGIN;
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_read_timeout 90;
|
||||
}
|
||||
|
||||
}
|
||||
#+END_EXAMPLE
|
||||
|
||||
This should do nicely.
|
||||
|
||||
* Usage
|
||||
Now it's a simple matter of creating a checks.
|
||||
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/simple-cron-monitoring-with-healthchecks/borgbackup-healthchecks.png" alt="HealthChecks monitoring for BorgBackup" algin="center">
|
||||
<img src="/images/simple-cron-monitoring-with-healthchecks/borgbackup-healthchecks.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
This will give you a link that looks like the following
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
#+END_EXAMPLE
|
||||
|
||||
Let's feed it to *borgmatic*.
|
||||
|
||||
#+BEGIN_SRC yaml
|
||||
hooks:
|
||||
healthchecks: https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
#+END_SRC
|
||||
|
||||
After you configure the *borgmatic* hook to keep /HealthChecks/ in the know of what's going on.
|
||||
We can take a look at the log to see what happened and when.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<a class="reference" href="/images/simple-cron-monitoring-with-healthchecks/borgbackup-healthchecks-logs.png" alt="HealthChecks monitoring log for BorgBackup" algin="center">
|
||||
<img src="/images/simple-cron-monitoring-with-healthchecks/borgbackup-healthchecks-logs.thumbnail.png">
|
||||
#+END_EXPORT
|
||||
#+BEGIN_EXPORT html
|
||||
</a>
|
||||
#+END_EXPORT
|
||||
|
||||
|
||||
* Conclusion
|
||||
As we saw in the blog post, now I am always in the know about my backups.
|
||||
If my backup fails, I get an email to notify me of a failure.
|
||||
I can also monitor how much time it takes my backups to run.
|
||||
This is a very important feature for me to have.
|
||||
|
||||
The question of deploying one's own monitoring system is a personal choice.
|
||||
After all, one can use free third party services if they would like.
|
||||
The correct answer though is to always monitor.
|
|
@ -1,154 +0,0 @@
|
|||
.. title: Simple cron monitoring with HealthChecks
|
||||
.. date: 2020-02-09
|
||||
.. slug: simple-cron-monitoring-with-healthchecks
|
||||
.. updated: 2020-02-09
|
||||
.. status: published
|
||||
.. tags: monitoring, healthchecks, cron
|
||||
.. category: monitoring
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Ever needed to monitor simple things ? Well, HealthChecks seems perfect for that.
|
||||
.. type: text
|
||||
|
||||
In a :doc:`previous post <automating-borg>`, I showed you how you can automate your **borg** backups with **borgmatic**.
|
||||
|
||||
After I started using **borgmatic** for my backups and hooked it to a *cron* running every 2 hours, I got interested into knowing what's happening to my backups at all times.
|
||||
|
||||
My experience comes handy in here, I know I need a monitoring system.
|
||||
I also know that traditional monitoring systems are too complex for my use case.
|
||||
|
||||
I need something simple. I need something I can deploy myself.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Choosing a monitoring system
|
||||
============================
|
||||
|
||||
I already know I don't want a traditional monitoring system like *nagios* or *sensu* or *prometheus*.
|
||||
It is not needed, it's an overkill.
|
||||
|
||||
I went through the list of hooks that **borgmatic** offers out of the box and checked each project.
|
||||
|
||||
I came across `HealthChecks <https://healthchecks.io/>`_.
|
||||
|
||||
HealthChecks
|
||||
============
|
||||
|
||||
The `HealthChecks <https://healthchecks.io/>`_ project works in a simple manner.
|
||||
It simply offers syou an endpoint which you need to ping within a certain period, otherwise you get paged.
|
||||
|
||||
It has a lot of integrations from simple emails to other third party services that will call or message you or even trigger push notifications to your phone.
|
||||
|
||||
In my case, a simple email is enough. After all, they are simply backups and if they failed now, they will work when cron runs again in 2 hours.
|
||||
|
||||
Deploy
|
||||
======
|
||||
|
||||
Let's create a `docker-compose` service configuration that looks like the following:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
healthchecks:
|
||||
container_name: healthchecks
|
||||
image: linuxserver/healthchecks:v1.12.0-ls48
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:8000:8000"
|
||||
volumes:
|
||||
- "./healthchecks/data:/config"
|
||||
environment:
|
||||
PUID: "5000"
|
||||
PGID: "5000"
|
||||
SECRET_KEY: "super-secret-key"
|
||||
ALLOWED_HOSTS: '["*"]'
|
||||
DEBUG: "False"
|
||||
DEFAULT_FROM_EMAIL: "noreply@healthchecks.example.com"
|
||||
USE_PAYMENTS: "False"
|
||||
REGISTRATION_OPEN: "False"
|
||||
EMAIL_HOST: "smtp.example.com"
|
||||
EMAIL_PORT: "587"
|
||||
EMAIL_HOST_USER: "smtp@healthchecks.example.com"
|
||||
EMAIL_HOST_PASSWORD: "super-secret-password"
|
||||
EMAIL_USE_TLS: "True"
|
||||
SITE_ROOT: "https://healthchecks.example.com"
|
||||
SITE_NAME: "HealthChecks"
|
||||
MASTER_BADGE_LABEL: "HealthChecks"
|
||||
PING_ENDPOINT: "https://healthchecks.example.com/ping/"
|
||||
PING_EMAIL_DOMAIN: "healthchecks.example.com"
|
||||
TWILIO_ACCOUNT: "None"
|
||||
TWILIO_AUTH: "None"
|
||||
TWILIO_FROM: "None"
|
||||
PD_VENDOR_KEY: "None"
|
||||
TRELLO_APP_KEY: "None"
|
||||
|
||||
This will create a docker container exposing it locally on ``127.0.0.1:8000``.
|
||||
Let's point nginx to it and expose it using something similar to the following.
|
||||
|
||||
.. code:: text
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name healthchecks.example.com;
|
||||
|
||||
ssl_certificate /path/to/the/fullchain.pem;
|
||||
ssl_certificate_key /path/to/the/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
|
||||
add_header X-Frame-Options SAMEORIGIN;
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_read_timeout 90;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
This should do nicely.
|
||||
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
Now it's a simple matter of creating a checks.
|
||||
|
||||
.. thumbnail:: /images/simple_cron_monitoring_with_healthchecks/borgbackup_healthchecks.png
|
||||
:align: center
|
||||
:alt: HealthChecks monitoring for BorgBackup
|
||||
|
||||
|
||||
This will give you a link that looks like the following ``https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219``
|
||||
|
||||
Let's feed it to **borgmatic**.
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
hooks:
|
||||
healthchecks: https://healthchecks.example.com/ping/84b2a834-02f5-524f-4c27-a2f24562b219
|
||||
|
||||
|
||||
After you configure the **borgmatic** hook to keep *HealthChecks* in the know of what's going on.
|
||||
We can take a look at the log to see what happened and when.
|
||||
|
||||
.. thumbnail:: /images/simple_cron_monitoring_with_healthchecks/borgbackup_healthchecks_logs.png
|
||||
:align: center
|
||||
:alt: HealthChecks monitoring log for BorgBackup
|
||||
|
||||
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
As we saw in the blog post, now I am always in the know about my backups.
|
||||
If my backup fails, I get an email to notify me of a failure.
|
||||
I can also monitor how much time it takes my backups to run.
|
||||
This is a very important feature for me to have.
|
||||
|
||||
The question of deploying one's own monitoring system is a personal choice.
|
||||
After all, one can use free third party services if they would like.
|
||||
The correct answer though is to always monitor.
|
230
posts/revision-control/git-branching-and-merging.org
Normal file
|
@ -0,0 +1,230 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Git! Branching and Merging
|
||||
.. date: 2019-08-01
|
||||
.. slug: git-branching-and-merging
|
||||
.. updated: 2019-08-01
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Explaining branches, branching and merging strategies.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In the previous post about /git/, we had a look at what /git/ is and got our feet wet with a bit of it.
|
||||
In this post, I will be moving forward with the topic, I will be talking about branches, how to work with them and finally what merging is and how it works.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Requirements
|
||||
|
||||
The same requirement we had from the last post, obviously /git/.
|
||||
|
||||
* Branching and Merging
|
||||
|
||||
** What is a branch?
|
||||
|
||||
/git/ [[https://git-scm.com/book/en/v1/Git-Branching-What-a-Branch-Is][documentation]] describes it as:
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
"A branch in Git is simply a lightweight movable pointer to one of the[se] commits."
|
||||
#+END_QUOTE
|
||||
|
||||
Usually, people coming from /svn/ think of *branches* differently. In /git/, a branch is simply a pointer to a commit.
|
||||
|
||||
So let's verify that claim to see if it's true.
|
||||
|
||||
Remember our example repository from the last post ? We'll be using it here.
|
||||
|
||||
First let's create a new branch.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git checkout -b mybranch
|
||||
Switched to a new branch 'mybranch'
|
||||
#+END_EXAMPLE
|
||||
|
||||
That was simple, wasn't it ?
|
||||
Alright, let's test our hypothesis.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log
|
||||
commit 643a353370d74c26d7cbf5c80a0d73988a75e09e (HEAD -> mybranch, master)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 19:50:45 2019 +0200
|
||||
|
||||
Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
The commit is, of course, different because this is a different computer with a different repository from scratch. Anyway, it seems from the log message that both /mybranch/ and /master/ are pointing to same commit /SHA/. Technically they are pointing to *HEAD*.
|
||||
|
||||
Now let's continue and add a new commit.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ echo "" >> README.md
|
||||
$ git add README.md
|
||||
$ git commit -m "Adding an empty line"
|
||||
[mybranch b30f4e0] Adding an empty line
|
||||
1 file changed, 1 insertion(+)
|
||||
#+END_EXAMPLE
|
||||
|
||||
After this last commit, let's check the log
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log
|
||||
commit b30f4e0fa8f3b5c9f041c9ad1be982b2fed80851 (HEAD -> mybranch)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 20:28:05 2019 +0200
|
||||
|
||||
Adding an empty line
|
||||
|
||||
commit 643a353370d74c26d7cbf5c80a0d73988a75e09e (master)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 19:50:45 2019 +0200
|
||||
|
||||
Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
From reading the output of log, we can see that the /master/ branch points to a different commit than /mybranch/.
|
||||
|
||||
To visualize this, let's look at it in a different way.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log --graph --oneline --all
|
||||
* b30f4e0 (HEAD -> mybranch) Adding an empty line
|
||||
* 643a353 (master) Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
What the above suggests is that our two branches have different contents at this stage. In other words, if I switch back to the /master/ branch what do you think we will find in =README.md= ?
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git checkout master
|
||||
Switched to branch 'master'
|
||||
$ cat README.md
|
||||
# Example
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
$
|
||||
#+END_EXAMPLE
|
||||
|
||||
And if we switch back to /mybranch/.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git checkout mybranch
|
||||
Switched to branch 'mybranch'
|
||||
$ cat README.md
|
||||
# Example
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
|
||||
$
|
||||
#+END_EXAMPLE
|
||||
|
||||
|
||||
Let's add another commit to make easier to see the changes than an empty line.
|
||||
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ echo "Let's add a line to mybranch." >> README.md
|
||||
$ git add README.md
|
||||
$ git commit -m "Adding more commits to mybranch"
|
||||
[mybranch f25dd5d] Adding more commits to mybranch
|
||||
1 file changed, 1 insertion(+)
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now let's check the tree again.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log --graph --oneline --all
|
||||
* f25dd5d (HEAD -> mybranch) Adding more commits to mybranch
|
||||
* b30f4e0 Adding an empty line
|
||||
* 643a353 (master) Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
Let's also check the difference between our /master/ branch and /mybranch/.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git diff master mybranch
|
||||
diff --git a/README.md b/README.md
|
||||
index b4734ad..f07e71e 100644
|
||||
--- a/README.md
|
||||
+++ b/README.md
|
||||
@@ -2,3 +2,5 @@
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
+
|
||||
+Let's add a line to mybranch.
|
||||
#+END_EXAMPLE
|
||||
|
||||
|
||||
The =+= suggests an addition and =-= suggests a deletion of a line. As we can see from the =+= shown before the two lines added to the =README.md= file, /mybranch/ has these additions.
|
||||
|
||||
You can read more about /git/ branches in the /git/ [[https://git-scm.com/book/en/v1/Git-Branching-What-a-Branch-Is][documentation]] page.
|
||||
|
||||
** What is merging ?
|
||||
That's all fine so far, but how do I get these changes from /mybranch/ to the /master/ branch ?
|
||||
|
||||
The answer to that is also as easy as all the steps taken so far. /git/ merges *from* a branch you specify *to* the branch you are currently on.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ # Checking which branch we are on
|
||||
$ git branch
|
||||
master
|
||||
* mybranch
|
||||
$ # We are on mybranch and we need to put these changes into master
|
||||
$ # First we need to move to our master branch
|
||||
$ git checkout master
|
||||
Switched to branch 'master'
|
||||
$ # Now we can merge from mybranch
|
||||
$ git merge mybranch
|
||||
Updating 643a353..f25dd5d
|
||||
Fast-forward
|
||||
README.md | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
#+END_EXAMPLE
|
||||
|
||||
As we can see. The changes in /mybranch/ have been merged into the /master/ branch.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log
|
||||
commit f25dd5da3e6f91d117177782a5811d5086f66799 (HEAD -> master, mybranch)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 20:43:57 2019 +0200
|
||||
|
||||
Adding more commits to mybranch
|
||||
|
||||
commit b30f4e0fa8f3b5c9f041c9ad1be982b2fed80851
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 20:28:05 2019 +0200
|
||||
|
||||
Adding an empty line
|
||||
|
||||
commit 643a353370d74c26d7cbf5c80a0d73988a75e09e
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 19:50:45 2019 +0200
|
||||
|
||||
Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
|
||||
* Merging Strategies
|
||||
I'll explain to you how I like to work and my personal merging strategy. I will keep out some details as they use concepts that are more advanced than what has been discussed so far.
|
||||
|
||||
** /master/ branch
|
||||
To me, the /master/ branch stays always up to date with the *remote* /master/ branch. In other words, I do not make commits against the /master/ branch in the project I'm working on.
|
||||
|
||||
** branch
|
||||
If I want to work on the project, I start by updating the /master/ branch and then branching it as we've seen before. The name of the branch is always indicative on what it holds, or what kind of work I am doing on it.
|
||||
|
||||
As long as I am working on my dev branch, I keep updating the /master/ branch and then porting the changes into my dev branch. This way, at the end the code is compatible and I am testing with the latest version of the code. This is very helpful and makes merging later a breeze.
|
||||
|
||||
** merging
|
||||
After my work is done, I push my branch to the remote server and ask for the maintainer of the project to merge my changes into the /master/ branch after reviewing it, of course. To explain this in a very simple manner, all that mumbo jumpo talk previously simply means someone else did the merge into master.
|
||||
|
||||
|
||||
* Conclusion
|
||||
In this post, I talked about what are branches. We went ahead and worked a little bit with branches and then mentioned merging. At the end of the post I talked a bit about my merging strategy.
|
||||
|
||||
In the next post, I will be talking about remotes.
|
198
posts/revision-control/git-first-steps.org
Normal file
|
@ -0,0 +1,198 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Git! First Steps...
|
||||
.. date: 2019-07-22
|
||||
.. slug: git-first-steps
|
||||
.. updated: 2019-07-23
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Getting your feet wet with git from the beginning.
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
The topic of /git/ came up recently a lot at work. Questions were asked about why I like to do what I do and the reasoning beind.
|
||||
Today, I joined =#dgplug= on [[https://freenode.net/][freenode]] and it turns out it was class time and the topic is /git/ and writing a post on it.
|
||||
|
||||
Which got me thinking... Why not do that ?
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Requirements
|
||||
I'd like to start my post with a requirement, /git/. It has to be installed on your machine, obviously, for you to be able to follow along.
|
||||
|
||||
* A Few Concepts
|
||||
I'm going to try to explain a few concepts in a very simple way. That means I am sacrificing accuracy for ease of understanding.
|
||||
|
||||
** What is revision control?
|
||||
[[https://en.wikipedia.org/wiki/Version_control][Wikipedia]] describes it as:
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
"A component of software configuration management, version control,
|
||||
also known as revision control or source control, is the management
|
||||
of changes to documents, computer programs, large web sites, and
|
||||
other collections of information."
|
||||
#+END_QUOTE
|
||||
|
||||
In simple terms, it keeps track of what you did and when as long as you log that on every change that deserve to be saved.
|
||||
This is a very good way to keep backups of previous changes, also a way to have a history documenting who changed what and for what reason (NO! Not to blame, to understand why and how to fix it).
|
||||
|
||||
** What is a git commit?
|
||||
You can read all about what a commit is on the manual page of [[https://git-scm.com/docs/git-commit][git-commit]].
|
||||
But the simple way to understand this is, it takes a snapshot of your work and names it a /SHA/ number (very long string of letters and numbers). A /SHA/ is a unique name that is derived from information from the current commit and every commit that came before since the beginning of the tree.
|
||||
In other words, there is an extremely low chance that 2 commits would ever have the same /SHA/. Let's not also forget the security implication from this. If you have a clone of a repository and someone changed a commit somewhere in the tree history, every commit including the one changed and newer will have to change names. At that point, your fork will have a mismatch and you can know that the history was changed.
|
||||
|
||||
** What is the =git add= thingy for?
|
||||
Well the [[https://git-scm.com/docs/git-add][git-add]] manual page is very descriptive about the subject but, once again, I'll try to explain it in metaphors.
|
||||
Think of it this way, =git-commit= saves the changes, but what changes ? That's exactly the question to answer. What changes ?
|
||||
What if I want to commit some changes but not others ? What if I want to commit all the code in one commit and all the comments in another ?
|
||||
|
||||
That's where the "staging area" comes in play. You use =git-add= to stage files to be committed. And whenever you run the =git-commit= command, it will commit whatever is staged to be committed, right ?
|
||||
|
||||
* Practice
|
||||
Now that we've already explained a few concepts, let's see how this all fits together.
|
||||
|
||||
** Step 1: Basic git configuration
|
||||
The [[https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup][Getting Started - First-Time Git Setup]] has more detailed setup but I took out what's quick and easy for now.
|
||||
|
||||
First setup your name and email.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git config --global user.name "John Doe"
|
||||
$ git config --global user.email johndoe@example.com
|
||||
#+END_EXAMPLE
|
||||
|
||||
You're done !
|
||||
|
||||
** Step 2: Creating a repository
|
||||
This is easy. If you want to be able to commit, you need to create a project to work on. A "project" can be translated to a repository and everything in that directory will be tracked.
|
||||
So let's create a repository
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ # Navigate to where you'd like to create the repository
|
||||
$ cd ~/Documents/Projects/
|
||||
$ # Create repository directory
|
||||
$ mkdir example
|
||||
$ # Navigate into the newly created directory
|
||||
$ cd example
|
||||
$ # Create the repository
|
||||
$ git init
|
||||
#+END_EXAMPLE
|
||||
|
||||
Yeah, it was only one command =git init=. Told you it was easy, didn't I?
|
||||
|
||||
** Step 3: Make a change
|
||||
Let's create a file called =README.md= in the current directory (=~/Documents/Projects/example=) and put the following in it.
|
||||
|
||||
#+BEGIN_SRC markdown
|
||||
# Example
|
||||
|
||||
This is an example repository.
|
||||
#+END_SRC
|
||||
|
||||
And save it of course.
|
||||
|
||||
** Step 4: Staging changes
|
||||
If you go back to the command line and check the following command, you'll see a similar result.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Untracked files:
|
||||
(use "git add <file>..." to include in what will be committed)
|
||||
|
||||
README.md
|
||||
|
||||
nothing added to commit but untracked files present (use "git add" to track)
|
||||
#+END_EXAMPLE
|
||||
|
||||
and =README.md= is in red (if you have colors enabled). This means that there is file that is not tracked in your repository. We would like to track that one, let's stage it.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git add README.md
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Changes to be committed:
|
||||
(use "git rm --cached <file>..." to unstage)
|
||||
|
||||
new file: README.md
|
||||
#+END_EXAMPLE
|
||||
|
||||
And =README.md= would now become green (if you have colors enabled). This means that if you commit now, this new file will be added and tracked in the future for changes. Technically though, it is being tracked for changes right now.
|
||||
Let's prove it.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ echo "This repository is trying to give you a hands on experience with git to complement the post." >> README.md
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Changes to be committed:
|
||||
(use "git rm --cached <file>..." to unstage)
|
||||
|
||||
new file: README.md
|
||||
|
||||
Changes not staged for commit:
|
||||
(use "git add <file>..." to update what will be committed)
|
||||
(use "git checkout -- <file>..." to discard changes in working directory)
|
||||
|
||||
modified: README.md
|
||||
#+END_EXAMPLE
|
||||
|
||||
As you can see, git figured out that the file has been changed. Now let's add these changes too and move forward.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git add README.md
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Changes to be committed:
|
||||
(use "git rm --cached <file>..." to unstage)
|
||||
|
||||
new file: README.md
|
||||
#+END_EXAMPLE
|
||||
|
||||
|
||||
** Step 5: Committing
|
||||
This will be as easy as the rest. Let's commit these changes with a good commit message to describe the changes.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git commit -m "Second commit"
|
||||
[master (root-commit) 0bd01aa] Second commit
|
||||
1 file changed, 4 insertions(+)
|
||||
create mode 100644 README.md
|
||||
#+END_EXAMPLE
|
||||
|
||||
Very descriptive commit indeed !
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git status
|
||||
On branch master
|
||||
nothing to commit, working tree clean
|
||||
#+END_EXAMPLE
|
||||
|
||||
Of course ! There is nothing to commit !
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log
|
||||
commit 0bd01aa6826675f339c3173d7665ebb44c3894a7 (HEAD -> master)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Mon Jul 22 20:57:40 2019 +0200
|
||||
|
||||
Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
You can definitely see who committed it, when and what the message was. You also have access to the changes made in this commit.
|
||||
|
||||
* Conclusion
|
||||
I'm going to end this post here, and will continue to build up the knowledge in new posts to come. For now, I think it's a good idea to simply work with commits.
|
||||
Next concepts to cover would be branching and merging.
|
157
posts/revision-control/git-rebase-and-strategies.org
Normal file
|
@ -0,0 +1,157 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Git! Rebase and Strategies
|
||||
.. date: 2019-08-10
|
||||
.. slug: git-rebase-and-strategies
|
||||
.. updated: 2019-08-10
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Getting a little handle on git rebase
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In the previous topic, I talked about git remotes because it felt
|
||||
natural after branching and merging.
|
||||
|
||||
Now, the time has come to talk a little bit about =rebase= and some good
|
||||
cases to use it for.
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Requirements
|
||||
This has not changed people, it is still /git/.
|
||||
|
||||
* Rebase
|
||||
In /git/ there are 2 ways of integrating your changes from one branch
|
||||
into another.
|
||||
|
||||
We already talked about one; =git-merge=. For more information about =git-merge= consult the [[https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging#_basic_merging][git basic branching and merging]] manual.
|
||||
|
||||
The other is =git-rebase=.
|
||||
|
||||
While =git-rebase= has a lot of different uses, the basic use of it is described in the [[https://git-scm.com/book/en/v2/Git-Branching-Rebasing][git branching rebasing]] manual as:
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
"With the =rebase= command, you can take all the changes that were committed on one branch and replay them on a different branch."
|
||||
#+END_QUOTE
|
||||
|
||||
In other words, all the commits you have made into the branch you are on will be set aside.
|
||||
Then, all the changes in the branch you are rebasing from will be applied to your branch.
|
||||
Finally, all your changes, that were set aside previously, will be applied back to your branch.
|
||||
|
||||
The beauty about this process is that you can keep your branch updated with upstream, while coding your changes.
|
||||
By the end of the process of adding your feature, your changes are ready to be merged upstream straight away.
|
||||
This is due to the fact that all the conflicts would've been resolved in each rebase.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
Branch and branch often!
|
||||
if you merge, merge and merge often!
|
||||
or rebase, and rebase often!
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
** Usage
|
||||
Rebase is used just like merge in our case.
|
||||
|
||||
First, let's create a branch and make a change in that branch.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git checkout -b rebasing-example
|
||||
Switched to a new branch 'rebasing-example'
|
||||
$ printf "\n# Rebase\n\nThis is a rebase branch.\n" >> README.md
|
||||
$ git add README.md
|
||||
$ git commit -m "Adding rebase section"
|
||||
[rebasing-example 4cd0ffe] Adding rebase section
|
||||
1 file changed, 4 insertions(+)
|
||||
$
|
||||
#+END_EXAMPLE
|
||||
|
||||
Now let's assume someone (or yourself) made a change to the =master= branch.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git checkout master
|
||||
Switched to branch 'master'
|
||||
Your branch is up to date with 'origin/master'.
|
||||
$ printf "# Master\n\nThis is a master branch" >> master.md
|
||||
$ git add master.md
|
||||
$ git commit -m "Adding master file"
|
||||
[master 7fbdab9] Adding master file
|
||||
1 file changed, 3 insertions(+)
|
||||
create mode 100644 master.md
|
||||
$
|
||||
#+END_EXAMPLE
|
||||
|
||||
I want to take a look at how the tree looks like before I attempt any changes.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log --graph --oneline --all
|
||||
* 7fbdab9 (HEAD -> master) Adding master file
|
||||
| * 4cd0ffe (rebasing-example) Adding rebase section
|
||||
|/
|
||||
* 4f6bb31 (origin/master) Adding the git remote section
|
||||
* 0bd01aa Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
After both of our commits, the tree diverged.
|
||||
We are pointing to the *master* branch, I know that because =HEAD= points to /master/.
|
||||
That commit is different than the commit that =rebase-example= branch points to.
|
||||
|
||||
These changes were introduced by someone else while I was adding the rebase section in the =README.md= file and they might be crucial for my application.
|
||||
In short, I was those changes in the code I am working on right now.
|
||||
Let's do that.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git checkout rebasing-example
|
||||
Switched to branch 'rebasing-example'
|
||||
$ git rebase master
|
||||
First, rewinding head to replay your work on top of it...
|
||||
Applying: Adding rebase section
|
||||
#+END_EXAMPLE
|
||||
|
||||
|
||||
And, let's look at the tree of course.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git log --graph --oneline --all
|
||||
* 1b2aa4a (HEAD -> rebasing-example) Adding rebase section
|
||||
* 7fbdab9 (master) Adding master file
|
||||
* 4f6bb31 (origin/master) Adding the git remote section
|
||||
* 0bd01aa Second commit
|
||||
#+END_EXAMPLE
|
||||
|
||||
The tree lookr linear now. =HEAD= is pointing to our branch.
|
||||
That commit points to the =7fbdab9= commit which the /master/ branch also points to.
|
||||
So rebase set aside =1b2aa4a= to apply =7fbdab9= and then re-applied it back. Pretty neat huh ?!
|
||||
|
||||
* My Strategy
|
||||
I'm going to be honest with you. I do not know the different kinds of merge strategies.
|
||||
I've glazed at names of a few but I've never looked at them closely enough to see which one is what.
|
||||
|
||||
What I use, I've used for a while. I learned it from somewhere and changed a few things in it to make it work for me.
|
||||
|
||||
First of all, I always fork a repository.
|
||||
I tend to stay away from creating a branch on the upstream repository unless it's my own personal project.
|
||||
On my fork, I freely roam. I am the king of my own fork and I create as many branches as I please.
|
||||
|
||||
I start with an assumption. The assumption is that my /master/ branch is, for all intents and purposes, upstream.
|
||||
This means I keep it up to date with upstream's main branch.
|
||||
|
||||
When I make a branch, I make a branch from /master/, this way I know it's up to date with upstream.
|
||||
I do my work on my branch. Every few hours, I update my /master/ branch. After I update my /master/
|
||||
branch, I /rebase/ the /master/ branch into my branch and voilà I'm up to date.
|
||||
|
||||
By the time my changes are ready to be merged back into upstream for any
|
||||
reason, they are ready to go.
|
||||
|
||||
That *MR* is gonna be ready to be merged in a jiffy.
|
||||
|
||||
* Conclusion
|
||||
From what I've read, I use one of those strategies described on some
|
||||
website. I don't know which one. But to me, it doesn't matter because it
|
||||
works for me. And if I need to adapt that for one reason or another, I
|
||||
can.
|
189
posts/revision-control/git-remote.org
Normal file
|
@ -0,0 +1,189 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Git! Remotes...
|
||||
.. date: 2019-08-07
|
||||
.. slug: git-remotes
|
||||
.. updated: 2019-08-07
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Let's have a deeper look at remotes
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
In the previous post, we talked about branching and merging. We will say a few last words on branches in this post and dive into remotes.
|
||||
|
||||
What are remotes ? What are they for ? How are they used ?
|
||||
|
||||
Coming right up.
|
||||
|
||||
* Requirements
|
||||
In this post, we will need another requirement.
|
||||
|
||||
- First, you obviously need /git/.
|
||||
- Second, you will need a git repository on a git server. Easier way is to create an account on [[https://gitlab.com][Gitlab]], [[https://github.com][GitHub]] or other similar services.
|
||||
|
||||
* Branches
|
||||
I have a few more things I need to say about branches...
|
||||
|
||||
If you came to the same conclusion that branches in /git/ are /cheap/, you are correct.
|
||||
This is very important because this encourages you to create more branches.
|
||||
A lot of short living branches is a great way to work. Small features added here and there.
|
||||
Small projects to test new features, etc...
|
||||
|
||||
Second conclusion you can come up with from the previous post is that the /master/ branch is not a /special/ branch.
|
||||
People use it as a /special/ branch, or the branch of *truth* by convention /only/.
|
||||
|
||||
I should also note that some services like *Gitlab* offer master branch protection on their own which would not allow master history overwriting.
|
||||
|
||||
The best next topic that comes after /branches/ is a topic extremely similar to it, *remotes*.
|
||||
|
||||
* Remotes
|
||||
The description of =git-remote= from the [[https://git-scm.com/docs/git-remote][manual page]] is simply
|
||||
|
||||
#+BEGIN_QUOTE
|
||||
Manage the set of repositories ("remotes") whose branches you track.
|
||||
#+END_QUOTE
|
||||
|
||||
That's exactly what it is.
|
||||
A way to manage /remote/ repositories.
|
||||
Now we will be talking about managing them in a bit but let's talk about how to use them.
|
||||
I found the best way to think to work with them is that you can think of them as /branches/.
|
||||
That's exactly why I thought this would be best fit after that blog post.
|
||||
|
||||
** Listing
|
||||
Let's list them on our project and see what's what.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git remote -v
|
||||
#+END_EXAMPLE
|
||||
|
||||
Okay! Nothing...
|
||||
|
||||
Alright, let's change that.
|
||||
|
||||
We don't have a /remote/ repository we can manage.
|
||||
We need to create one.
|
||||
|
||||
** Adding a remote
|
||||
So I went to *Gitlab* and I created a new repository.
|
||||
After creating the repository, you will get a box with commands that look similar to the following.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ cd existing_repo
|
||||
$ git remote rename origin old-origin
|
||||
$ git remote add origin git@gitlab.com:elazkani/git-project.git
|
||||
$ git push -u origin --all
|
||||
$ git push -u origin --tags
|
||||
#+END_EXAMPLE
|
||||
|
||||
The first command is useless to us.
|
||||
The second is renaming a remote we do not have.
|
||||
Now the third command is interesting.
|
||||
This one is adding a remote called *origin*.
|
||||
We need that.
|
||||
The last two commands are there to push everything to the remote repository.
|
||||
|
||||
Let's copy that command and put it in our command line.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git remote add origin git@gitlab.com:elazkani/git-project.git
|
||||
$ git remote -v
|
||||
origin git@gitlab.com:elazkani/git-project.git (fetch)
|
||||
origin git@gitlab.com:elazkani/git-project.git (push)
|
||||
#+END_EXAMPLE
|
||||
|
||||
If you look at that output carefully, you will notice that there is a /fetch/ link and a /push/ link.
|
||||
|
||||
Anyway, let's push.
|
||||
|
||||
** Push
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git push -u origin --all
|
||||
Enumerating objects: 3, done.
|
||||
Counting objects: 100% (3/3), done.
|
||||
Delta compression using up to 4 threads
|
||||
Compressing objects: 100% (2/2), done.
|
||||
Writing objects: 100% (3/3), 317 bytes | 317.00 KiB/s, done.
|
||||
Total 3 (delta 0), reused 0 (delta 0)
|
||||
To gitlab.com:elazkani/git-project.git
|
||||
* [new branch] master -> master
|
||||
Branch 'master' set up to track remote branch 'master' from 'origin'.
|
||||
#+END_EXAMPLE
|
||||
|
||||
We have pushed all of our changes to the remote now.
|
||||
If you refresh the web page, you should see the repository.
|
||||
|
||||
So what happens if someone else made a change and pushed to it, or maybe it was you from another computer.
|
||||
|
||||
** Pulling from a remote
|
||||
Most people using git usually do =git pull= and call it a day.
|
||||
We will not, we will dissect what that command is doing.
|
||||
|
||||
You might not know that you can configure =git pull= to do a /rebase/ instead of a /merge/.
|
||||
That's not important for you at this stage but what's important is the clue it gives us.
|
||||
There is a /merge/ in it.
|
||||
|
||||
What =git pull= actually does is a =git fetch= followed by a =git merge=.
|
||||
So just like =git push=, =git fetch= will download the changes from the remote.
|
||||
|
||||
If the /fetch/ is followed by a /merge/, then where are we fetching to and merging from ?
|
||||
|
||||
This is where thinking about remotes as branches comes in.
|
||||
Think of =origin/master= as a branch, a local branch, because in some way it is.
|
||||
|
||||
So let's fetch.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git fetch origin master
|
||||
From gitlab.com:elazkani/git-project
|
||||
* branch master -> FETCH_HEAD
|
||||
#+END_EXAMPLE
|
||||
|
||||
But we don't see any changes to our code !
|
||||
|
||||
Ahaaa ! But it did get the new stuff.
|
||||
Let me show you.
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git diff master origin/master
|
||||
diff --git a/README.md b/README.md
|
||||
index b4734ad..a492bbb 100644
|
||||
--- a/README.md
|
||||
+++ b/README.md
|
||||
@@ -2,3 +2,7 @@
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
+
|
||||
+# Remote
|
||||
+
|
||||
+This is the section on git remotes.
|
||||
#+END_EXAMPLE
|
||||
|
||||
See ! Told you.
|
||||
Now let's get those changes into our master branch.
|
||||
You guessed it, we only need to merge from =origin/master=
|
||||
|
||||
#+BEGIN_EXAMPLE
|
||||
$ git merge origin/master
|
||||
Updating 0bd01aa..4f6bb31
|
||||
Fast-forward
|
||||
README.md | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
#+END_EXAMPLE
|
||||
|
||||
That was easy wasn't it ?
|
||||
|
||||
* Let's have a little chat, you and me !
|
||||
You can have multiple remotes.
|
||||
Make a good use of them.
|
||||
Go through all the different methodologies online to work with /git/ and try them out.
|
||||
|
||||
Find what works for you.
|
||||
Make use of branches and remotes.
|
||||
Make use of merging.
|
||||
|
||||
* Conclusion
|
||||
After talking about remotes in this post, you have some reading to do. I
|
||||
hope I've made your journey much simpler moving forward with this topic.
|
|
@ -1,238 +0,0 @@
|
|||
.. title: Git! Branching and Merging
|
||||
.. date: 2019-08-01
|
||||
.. slug: git-branching-and-merging
|
||||
.. updated: 2019-08-01
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Explaining branches, branching and merging strategies.
|
||||
.. type: text
|
||||
|
||||
In the previous post about ``git``, we had a look at what ``git`` is and got our feet wet with a bit of it.
|
||||
In this post, I will be moving forward with the topic, I will be talking about branches, how to work with them and finally what merging is and how it works.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
The same requirement we had from the last post, obviously ``git``.
|
||||
|
||||
Branching and Merging
|
||||
=====================
|
||||
|
||||
What is a branch?
|
||||
-----------------
|
||||
|
||||
``git`` `documentation <https://git-scm.com/book/en/v1/Git-Branching-What-a-Branch-Is>`_ describes it as:
|
||||
|
||||
"A branch in Git is simply a lightweight movable pointer to one of the[se] commits."
|
||||
|
||||
Usually, people coming from *svn* think of **branches** differently. In ``git``, a branch is simply a pointer to a commit.
|
||||
|
||||
So let's verify that claim to see if it's true.
|
||||
|
||||
Remember our example repository from the last post ? We'll be using it here.
|
||||
|
||||
First let's create a new branch.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git checkout -b mybranch
|
||||
Switched to a new branch 'mybranch'
|
||||
|
||||
That was simple, wasn't it ?
|
||||
Alright, let's test our hypothesis.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log
|
||||
commit 643a353370d74c26d7cbf5c80a0d73988a75e09e (HEAD -> mybranch, master)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 19:50:45 2019 +0200
|
||||
|
||||
Second commit
|
||||
|
||||
The commit is, of course, different because this is a different computer with a different repository from scratch. Anyway, it seems from the log message that both *mybranch* and *master* are pointing to same commit ``SHA``. Technically they are pointing to **HEAD**.
|
||||
|
||||
Now let's continue and add a new commit.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ echo "" >> README.md
|
||||
$ git add README.md
|
||||
$ git commit -m "Adding an empty line"
|
||||
[mybranch b30f4e0] Adding an empty line
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
After this last commit, let's check the log
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log
|
||||
commit b30f4e0fa8f3b5c9f041c9ad1be982b2fed80851 (HEAD -> mybranch)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 20:28:05 2019 +0200
|
||||
|
||||
Adding an empty line
|
||||
|
||||
commit 643a353370d74c26d7cbf5c80a0d73988a75e09e (master)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 19:50:45 2019 +0200
|
||||
|
||||
Second commit
|
||||
|
||||
From reading the output of log, we can see that the *master* branch points to a different commit than *mybranch*.
|
||||
|
||||
To visualize this, let's look at it in a different way.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log --graph --oneline --all
|
||||
* b30f4e0 (HEAD -> mybranch) Adding an empty line
|
||||
* 643a353 (master) Second commit
|
||||
|
||||
What the above suggests is that our two branches have different contents at this stage. In other words, if I switch back to the *master* branch what do you think we will find in ``README.md`` ?
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git checkout master
|
||||
Switched to branch 'master'
|
||||
$ cat README.md
|
||||
# Example
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
$
|
||||
|
||||
And if we switch back to *mybranch*.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git checkout mybranch
|
||||
Switched to branch 'mybranch'
|
||||
$ cat README.md
|
||||
# Example
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
|
||||
$
|
||||
|
||||
|
||||
Let's add another commit to make easier to see the changes than an empty line.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ echo "Let's add a line to mybranch." >> README.md
|
||||
$ git add README.md
|
||||
$ git commit -m "Adding more commits to mybranch"
|
||||
[mybranch f25dd5d] Adding more commits to mybranch
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
Now let's check the tree again.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log --graph --oneline --all
|
||||
* f25dd5d (HEAD -> mybranch) Adding more commits to mybranch
|
||||
* b30f4e0 Adding an empty line
|
||||
* 643a353 (master) Second commit
|
||||
|
||||
Let's also check the difference between our *master* branch and *mybranch*.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git diff master mybranch
|
||||
diff --git a/README.md b/README.md
|
||||
index b4734ad..f07e71e 100644
|
||||
--- a/README.md
|
||||
+++ b/README.md
|
||||
@@ -2,3 +2,5 @@
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
+
|
||||
+Let's add a line to mybranch.
|
||||
|
||||
The ``+`` suggests an addition and ``-`` suggests a deletion of a line. As we can see from the ``+`` shown before the two lines added to the ``README.md`` file, *mybranch* has these additions.
|
||||
|
||||
You can read more about ``git`` branches in the ``git`` `documentation <https://git-scm.com/book/en/v1/Git-Branching-What-a-Branch-Is>`_ page.
|
||||
|
||||
What is merging ?
|
||||
-----------------
|
||||
|
||||
That's all fine so far, but how do I get these changes from *mybranch* to the *master* branch ?
|
||||
|
||||
The answer to that is also as easy as all the steps taken so far. ``git`` merges **from** a branch you specify **to** the branch you are currently on.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ # Checking which branch we are on
|
||||
$ git branch
|
||||
master
|
||||
* mybranch
|
||||
$ # We are on mybranch and we need to put these changes into master
|
||||
$ # First we need to move to our master branch
|
||||
$ git checkout master
|
||||
Switched to branch 'master'
|
||||
$ # Now we can merge from mybranch
|
||||
$ git merge mybranch
|
||||
Updating 643a353..f25dd5d
|
||||
Fast-forward
|
||||
README.md | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
As we can see. The changes in *mybranch* have been merged into the *master* branch.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log
|
||||
commit f25dd5da3e6f91d117177782a5811d5086f66799 (HEAD -> master, mybranch)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 20:43:57 2019 +0200
|
||||
|
||||
Adding more commits to mybranch
|
||||
|
||||
commit b30f4e0fa8f3b5c9f041c9ad1be982b2fed80851
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 20:28:05 2019 +0200
|
||||
|
||||
Adding an empty line
|
||||
|
||||
commit 643a353370d74c26d7cbf5c80a0d73988a75e09e
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Thu Aug 1 19:50:45 2019 +0200
|
||||
|
||||
Second commit
|
||||
|
||||
Merging Strategies
|
||||
==================
|
||||
|
||||
I'll explain to you how I like to work and my personal merging strategy. I will keep out some details as they use concepts that are more advanced than what has been discussed so far.
|
||||
|
||||
*master* branch
|
||||
---------------
|
||||
|
||||
To me, the *master* branch stays always up to date with the **remote** *master* branch. In other words, I do not make commits against the *master* branch in the project I'm working on.
|
||||
|
||||
branch
|
||||
------
|
||||
|
||||
If I want to work on the project, I start by updating the *master* branch and then branching it as we've seen before. The name of the branch is always indicative on what it holds, or what kind of work I am doing on it.
|
||||
|
||||
As long as I am working on my dev branch, I keep updating the *master* branch and then porting the changes into my dev branch. This way, at the end the code is compatible and I am testing with the latest version of the code. This is very helpful and makes merging later a breeze.
|
||||
|
||||
merging
|
||||
-------
|
||||
|
||||
After my work is done, I push my branch to the remote server and ask for the maintainer of the project to merge my changes into the *master* branch after reviewing it, of course. To explain this in a very simple manner, all that mumbo jumpo talk previously simply means someone else did the merge into master.
|
||||
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
In this post, I talked about what are branches. We went ahead and worked a little bit with branches and then mentioned merging. At the end of the post I talked a bit about my merging strategy.
|
||||
|
||||
In the next post, I will be talking about remotes.
|
|
@ -1,218 +0,0 @@
|
|||
.. title: Git! First Steps...
|
||||
.. date: 2019-07-22
|
||||
.. slug: git-first-steps
|
||||
.. updated: 2019-07-23
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Getting your feet wet with git from the beginning.
|
||||
.. type: text
|
||||
|
||||
The topic of ``git`` came up recently a lot at work. Questions were asked about why I like to do what I do and the reasoning beind.
|
||||
Today, I joined ``#dgplug`` on `freenode <https://freenode.net/>`_ and it turns out it was class time and the topic is ``git`` and writing a post on it.
|
||||
|
||||
Which got me thinking... Why not do that ?
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
I'd like to start my post with a requirement, ``git``. It has to be installed on your machine, obviously, for you to be able to follow along.
|
||||
|
||||
A Few Concepts
|
||||
==============
|
||||
|
||||
I'm going to try to explain a few concepts in a very simple way. That means I am sacrificing accuracy for ease of understanding.
|
||||
|
||||
What is revision control?
|
||||
-------------------------
|
||||
|
||||
`Wikipedia <https://en.wikipedia.org/wiki/Version_control>`_ describes it as:
|
||||
|
||||
"A component of software configuration management, version control,
|
||||
also known as revision control or source control, is the management
|
||||
of changes to documents, computer programs, large web sites, and
|
||||
other collections of information."
|
||||
|
||||
In simple terms, it keeps track of what you did and when as long as you log that on every change that deserve to be saved.
|
||||
This is a very good way to keep backups of previous changes, also a way to have a history documenting who changed what and for what reason (NO! Not to blame, to understand why and how to fix it).
|
||||
|
||||
What is a git commit?
|
||||
---------------------
|
||||
|
||||
You can read all about what a commit is on the manual page of `git-commit <https://git-scm.com/docs/git-commit>`_.
|
||||
But the simple way to understand this is, it takes a snapshot of your work and names it a ``SHA`` number (very long string of letters and numbers). A ``SHA`` is a unique name that is derived from information from the current commit and every commit that came before since the beginning of the tree.
|
||||
In other words, there is an extremely low chance that 2 commits would ever have the same ``SHA``. Let's not also forget the security implication from this. If you have a clone of a repository and someone changed a commit somewhere in the tree history, every commit including the one changed and newer will have to change names. At that point, your fork will have a mismatch and you can know that the history was changed.
|
||||
|
||||
What is the ``git add`` thingy for?
|
||||
-----------------------------------
|
||||
|
||||
Well the `git-add <https://git-scm.com/docs/git-add>`_ manual page is very descriptive about the subject but, once again, I'll try to explain it in metaphors.
|
||||
Think of it this way, ``git-commit`` saves the changes, but what changes ? That's exactly the question to answer. What changes ?
|
||||
What if I want to commit some changes but not others ? What if I want to commit all the code in one commit and all the comments in another ?
|
||||
|
||||
That's where the "staging area" comes in play. You use ``git-add`` to stage files to be committed. And whenever you run the ``git-commit`` command, it will commit whatever is staged to be committed, right ?
|
||||
|
||||
Practice
|
||||
========
|
||||
|
||||
Now that we've already explained a few concepts, let's see how this all fits together.
|
||||
|
||||
Step 1: Basic git configuration
|
||||
-------------------------------
|
||||
|
||||
The `Getting Started - First-Time Git Setup <https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup>`_ has more detailed setup but I took out what's quick and easy for now.
|
||||
|
||||
First setup your name and email.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git config --global user.name "John Doe"
|
||||
$ git config --global user.email johndoe@example.com
|
||||
|
||||
You're done !
|
||||
|
||||
Step 2: Creating a repository
|
||||
------------------------------
|
||||
|
||||
This is easy. If you want to be able to commit, you need to create a project to work on. A "project" can be translated to a repository and everything in that directory will be tracked.
|
||||
So let's create a repository
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ # Navigate to where you'd like to create the repository
|
||||
$ cd ~/Documents/Projects/
|
||||
$ # Create repository directory
|
||||
$ mkdir example
|
||||
$ # Navigate into the newly created directory
|
||||
$ cd example
|
||||
$ # Create the repository
|
||||
$ git init
|
||||
|
||||
Yeah, it was only one command ``git init``. Told you it was easy, didn't I?
|
||||
|
||||
Step 3: Make a change
|
||||
---------------------
|
||||
|
||||
Let's create a file called ``README.md`` in the current directory (``~/Documents/Projects/example``) and put the following in it.
|
||||
|
||||
.. code:: md
|
||||
|
||||
# Example
|
||||
|
||||
This is an example repository.
|
||||
|
||||
And save it of course.
|
||||
|
||||
Step 4: Staging changes
|
||||
-----------------------
|
||||
|
||||
If you go back to the command line and check the following command, you'll see a similar result.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Untracked files:
|
||||
(use "git add <file>..." to include in what will be committed)
|
||||
|
||||
README.md
|
||||
|
||||
nothing added to commit but untracked files present (use "git add" to track)
|
||||
|
||||
and ``README.md`` is in red (if you have colors enabled). This means that there is file that is not tracked in your repository. We would like to track that one, let's stage it.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git add README.md
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Changes to be committed:
|
||||
(use "git rm --cached <file>..." to unstage)
|
||||
|
||||
new file: README.md
|
||||
|
||||
And ``README.md`` would now become green (if you have colors enabled). This means that if you commit now, this new file will be added and tracked in the future for changes. Technically though, it is being tracked for changes right now.
|
||||
Let's prove it.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ echo "This repository is trying to give you a hands on experience with git to complement the post." >> README.md
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Changes to be committed:
|
||||
(use "git rm --cached <file>..." to unstage)
|
||||
|
||||
new file: README.md
|
||||
|
||||
Changes not staged for commit:
|
||||
(use "git add <file>..." to update what will be committed)
|
||||
(use "git checkout -- <file>..." to discard changes in working directory)
|
||||
|
||||
modified: README.md
|
||||
|
||||
As you can see, git figured out that the file has been changed. Now let's add these changes too and move forward.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git add README.md
|
||||
$ git status
|
||||
On branch master
|
||||
|
||||
No commits yet
|
||||
|
||||
Changes to be committed:
|
||||
(use "git rm --cached <file>..." to unstage)
|
||||
|
||||
new file: README.md
|
||||
|
||||
|
||||
Step 5: Committing
|
||||
------------------
|
||||
|
||||
This will be as easy as the rest. Let's commit these changes with a good commit message to describe the changes.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git commit -m "Second commit"
|
||||
[master (root-commit) 0bd01aa] Second commit
|
||||
1 file changed, 4 insertions(+)
|
||||
create mode 100644 README.md
|
||||
|
||||
Very descriptive commit indeed !
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git status
|
||||
On branch master
|
||||
nothing to commit, working tree clean
|
||||
|
||||
Of course ! There is nothing to commit !
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log
|
||||
commit 0bd01aa6826675f339c3173d7665ebb44c3894a7 (HEAD -> master)
|
||||
Author: John Doe <johndoe@example.com>
|
||||
Date: Mon Jul 22 20:57:40 2019 +0200
|
||||
|
||||
Second commit
|
||||
|
||||
You can definitely see who committed it, when and what the message was. You also have access to the changes made in this commit.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
I'm going to end this post here, and will continue to build up the knowledge in new posts to come. For now, I think it's a good idea to simply work with commits.
|
||||
Next concepts to cover would be branching and merging.
|
|
@ -1,164 +0,0 @@
|
|||
.. title: Git! Rebase and Strategies
|
||||
.. date: 2019-08-10
|
||||
.. slug: git-rebase-and-strategies
|
||||
.. updated: 2019-08-10
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Getting a little handle on git rebase
|
||||
.. type: text
|
||||
|
||||
In the previous topic, I talked about git remotes because it felt natural after branching and merging.
|
||||
|
||||
Now, the time has come to talk a little bit about ``rebase`` and some good cases to use it for.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
This has not changed people, it is still ``git``.
|
||||
|
||||
Rebase
|
||||
======
|
||||
|
||||
In ``git`` there are 2 ways of integrating your changes from one branch into another.
|
||||
|
||||
|
||||
We already talked about one; ``git-merge``. For more information about ``git-merge`` consult
|
||||
the `git basic branching and merging <https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging#_basic_merging>`_
|
||||
manual.
|
||||
|
||||
|
||||
The other is ``git-rebase``.
|
||||
|
||||
While ``git-rebase`` has a lot of different uses,
|
||||
the basic use of it is described in the `git branching rebasing <https://git-scm.com/book/en/v2/Git-Branching-Rebasing>`_
|
||||
manual as:
|
||||
|
||||
"With the ``rebase`` command, you can take all the changes
|
||||
that were committed on one branch and replay them on
|
||||
a different branch."
|
||||
|
||||
In other words, all the commits you have made into the branch you are on will be set aside.
|
||||
Then, all the changes in the branch you are rebasing from will be applied to your branch.
|
||||
Finally, all your changes, that were set aside previously, will be applied back to your branch.
|
||||
|
||||
The beauty about this process is that you can keep your branch updated with upstream,
|
||||
while coding your changes. By the end of the process of adding your feature, your changes are
|
||||
ready to be merged upstream straight away. This is due to the fact that all the conflicts
|
||||
would've been resolved in each rebase.
|
||||
|
||||
.. note::
|
||||
|
||||
Branch and branch often!
|
||||
|
||||
if you merge, merge and merge often!
|
||||
|
||||
or rebase, and rebase often!
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Rebase is used just like merge in our case.
|
||||
|
||||
First, let's create a branch and make a change in that branch.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git checkout -b rebasing-example
|
||||
Switched to a new branch 'rebasing-example'
|
||||
$ printf "\n# Rebase\n\nThis is a rebase branch.\n" >> README.md
|
||||
$ git add README.md
|
||||
$ git commit -m "Adding rebase section"
|
||||
[rebasing-example 4cd0ffe] Adding rebase section
|
||||
1 file changed, 4 insertions(+)
|
||||
$
|
||||
|
||||
Now let's assume someone (or yourself) made a change to the ``master`` branch.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git checkout master
|
||||
Switched to branch 'master'
|
||||
Your branch is up to date with 'origin/master'.
|
||||
$ printf "# Master\n\nThis is a master branch" >> master.md
|
||||
$ git add master.md
|
||||
$ git commit -m "Adding master file"
|
||||
[master 7fbdab9] Adding master file
|
||||
1 file changed, 3 insertions(+)
|
||||
create mode 100644 master.md
|
||||
$
|
||||
|
||||
I want to take a look at how the tree looks like before I attempt any changes.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log --graph --oneline --all
|
||||
* 7fbdab9 (HEAD -> master) Adding master file
|
||||
| * 4cd0ffe (rebasing-example) Adding rebase section
|
||||
|/
|
||||
* 4f6bb31 (origin/master) Adding the git remote section
|
||||
* 0bd01aa Second commit
|
||||
|
||||
After both of our commits, the tree diverged.
|
||||
We are pointing to the ``master`` branch, I know that because ``HEAD`` points to ``master.
|
||||
That commit is different than the commit that ``rebase-example`` branch points to.
|
||||
|
||||
These changes were introduced by someone else while I was adding the rebase section
|
||||
in the ``README.md`` file and they might be crucial for my application. In short,
|
||||
I was those changes in the code I am working on right now. Let's do that.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git checkout rebasing-example
|
||||
Switched to branch 'rebasing-example'
|
||||
$ git rebase master
|
||||
First, rewinding head to replay your work on top of it...
|
||||
Applying: Adding rebase section
|
||||
|
||||
And, let's look at the tree of course.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git log --graph --oneline --all
|
||||
* 1b2aa4a (HEAD -> rebasing-example) Adding rebase section
|
||||
* 7fbdab9 (master) Adding master file
|
||||
* 4f6bb31 (origin/master) Adding the git remote section
|
||||
* 0bd01aa Second commit
|
||||
|
||||
The tree lookr linear now. ``HEAD`` is pointing to our branch.
|
||||
That commit points to the ``7fbdab9`` commit which the ``master`` branch
|
||||
also points to. So rebase set aside ``1b2aa4a`` to apply ``7fbdab9`` and then
|
||||
re-applied it back. Pretty neat huh ?!
|
||||
|
||||
My Strategy
|
||||
===========
|
||||
|
||||
I'm going to be honest with you. I do not know the different kinds of merge strategies.
|
||||
I've glazed at names of a few but I've never looked at them closely enough to see which one is what.
|
||||
|
||||
What I use, I've used for a while. I learned it from somewhere and changed a few things in it to make it work for me.
|
||||
|
||||
First of all, I always fork a repository.
|
||||
I tend to stay away from creating a branch on the upstream repository unless it's my own personal project.
|
||||
On my fork, I freely roam. I am the king of my own fork and I create as many branches as I please.
|
||||
|
||||
I start with an assumption. The assumption is that my ``master`` branch is, for all intents and purposes,
|
||||
upstream. This means I keep it up to date with upstream's main branch.
|
||||
|
||||
When I make a branch, I make a branch from ``master``, this way I know it's up to date with upstream.
|
||||
I do my work on my branch. Every few hours, I update my ``master`` branch. After I update my ``master``
|
||||
branch, I ``rebase`` the ``master`` branch into my branch and voilà I'm up to date.
|
||||
|
||||
By the time my changes are ready to be merged back into upstream for any reason, they are ready to go.
|
||||
|
||||
That **MR** is gonna be ready to be merged in a jiffy.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
From what I've read, I use one of those strategies described on some website. I don't know which one.
|
||||
But to me, it doesn't matter because it works for me. And if I need to adapt that for one reason or another,
|
||||
I can.
|
|
@ -1,193 +0,0 @@
|
|||
.. title: Git! Remotes...
|
||||
.. date: 2019-08-07
|
||||
.. slug: git-remotes
|
||||
.. updated: 2019-08-07
|
||||
.. status: published
|
||||
.. tags: git, revision-control
|
||||
.. category: revision-control
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: Let's have a deeper look at remotes
|
||||
.. type: text
|
||||
|
||||
In the previous post, we talked about branching and merging. We will say a few last words on branches in this post and dive into remotes.
|
||||
|
||||
What are remotes ? What are they for ? How are they used ?
|
||||
|
||||
Coming right up.
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
In this post, we will need another requirement.
|
||||
|
||||
* First, you obviously need ``git``.
|
||||
* Second, you will need a git repository on a git server. Easier way is to create an account on `Gitlab <https://gitlab.com>`_, `GitHub <https://github.com>`_ or other similar services.
|
||||
|
||||
|
||||
Branches
|
||||
========
|
||||
|
||||
I have a few more things I need to say about branches...
|
||||
|
||||
If you came to the same conclusion that branches in ``git`` are *cheap*, you are correct.
|
||||
This is very important because this encourages you to create more branches.
|
||||
A lot of short living branches is a great way to work.
|
||||
Small features added here and there. Small projects to test new features, etc...
|
||||
|
||||
Second conclusion you can come up with from the previous post is that the ``master`` branch is not a *special* branch.
|
||||
People use it as a *special* branch, or the branch of **truth** by convention *only*.
|
||||
|
||||
I should also note that some services like **Gitlab** offer master branch protection on their own which would not allow master history overwriting.
|
||||
|
||||
The best next topic that comes after *branches* is a topic extremely similar to it, **remotes**.
|
||||
|
||||
Remotes
|
||||
=======
|
||||
|
||||
The description of ``git-remote`` from the `manual page <https://git-scm.com/docs/git-remote>`_ is simply
|
||||
|
||||
Manage the set of repositories ("remotes") whose branches you track.
|
||||
|
||||
That's exactly what it is. A way to manage *remote* repositories. Now we will be talking about managing
|
||||
them in a bit but let's talk about how to use them.
|
||||
I found the best way to think to work with them is that you can think of them as *branches*.
|
||||
That's exactly why I thought this would be best fit after that blog post.
|
||||
|
||||
Listing
|
||||
-------
|
||||
|
||||
Let's list them on our project and see what's what.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git remote -v
|
||||
|
||||
Okay! Nothing...
|
||||
|
||||
Alright, let's change that.
|
||||
|
||||
We don't have a *remote* repository we can manage. We need to create one.
|
||||
|
||||
Adding a remote
|
||||
---------------
|
||||
|
||||
So I went to **Gitlab** and I created a new repository.
|
||||
After creating the repository, you will get a box with commands that look similar to the following.
|
||||
|
||||
.. code:: text
|
||||
|
||||
cd existing_repo
|
||||
git remote rename origin old-origin
|
||||
git remote add origin git@gitlab.com:elazkani/git-project.git
|
||||
git push -u origin --all
|
||||
git push -u origin --tags
|
||||
|
||||
The first command is useless to us.
|
||||
The second is renaming a remote we do not have.
|
||||
Now the third command is interesting. This one is adding a remote called **origin**.
|
||||
We need that. The last two commands are there to push everything to the remote repository.
|
||||
|
||||
Let's copy that command and put it in our command line.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git remote add origin git@gitlab.com:elazkani/git-project.git
|
||||
$ git remote -v
|
||||
origin git@gitlab.com:elazkani/git-project.git (fetch)
|
||||
origin git@gitlab.com:elazkani/git-project.git (push)
|
||||
|
||||
If you look at that output carefully, you will notice that there is a *fetch* link and a *push* link.
|
||||
|
||||
Anyway, let's push.
|
||||
|
||||
Push
|
||||
----
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git push -u origin --all
|
||||
Enumerating objects: 3, done.
|
||||
Counting objects: 100% (3/3), done.
|
||||
Delta compression using up to 4 threads
|
||||
Compressing objects: 100% (2/2), done.
|
||||
Writing objects: 100% (3/3), 317 bytes | 317.00 KiB/s, done.
|
||||
Total 3 (delta 0), reused 0 (delta 0)
|
||||
To gitlab.com:elazkani/git-project.git
|
||||
* [new branch] master -> master
|
||||
Branch 'master' set up to track remote branch 'master' from 'origin'.
|
||||
|
||||
|
||||
We have pushed all of our changes to the remote now. If you refresh the web page, you should see the repository.
|
||||
|
||||
So what happens if someone else made a change and pushed to it, or maybe it was you from another computer.
|
||||
|
||||
Pulling from a remote
|
||||
---------------------
|
||||
|
||||
Most people using git usually do ``git pull`` and call it a day. We will not, we will dissect what that command is doing.
|
||||
|
||||
You might not know that you can configure ``git pull`` to do a *rebase* instead of a *merge*.
|
||||
That's not important for you at this stage but what's important is the clue it gives us. There is a *merge* in it.
|
||||
|
||||
What ``git pull`` actually does is a ``git fetch`` followed by a ``git merge``.
|
||||
So just like ``git push``, ``git fetch`` will download the changes from the remote.
|
||||
|
||||
If the *fetch* is followed by a *merge*, then where are we fetching to and merging from ?
|
||||
|
||||
This is where thinking about remotes as branches comes in.
|
||||
Think of ``origin/master`` as a branch, a local branch, because in some way it is.
|
||||
|
||||
So let's fetch.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git fetch origin master
|
||||
From gitlab.com:elazkani/git-project
|
||||
* branch master -> FETCH_HEAD
|
||||
|
||||
But we don't see any changes to our code !
|
||||
|
||||
Ahaaa ! But it did get the new stuff. Let me show you.
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git diff master origin/master
|
||||
diff --git a/README.md b/README.md
|
||||
index b4734ad..a492bbb 100644
|
||||
--- a/README.md
|
||||
+++ b/README.md
|
||||
@@ -2,3 +2,7 @@
|
||||
|
||||
This is an example repository.
|
||||
This repository is trying to give you a hands on experience with git to complement the post.
|
||||
+
|
||||
+# Remote
|
||||
+
|
||||
+This is the section on git remotes.
|
||||
|
||||
See ! Told you. Now let's get those changes into our master branch.
|
||||
You guessed it, we only need to merge from ``origin/master``
|
||||
|
||||
.. code:: text
|
||||
|
||||
$ git merge origin/master
|
||||
Updating 0bd01aa..4f6bb31
|
||||
Fast-forward
|
||||
README.md | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
That was easy wasn't it ?
|
||||
|
||||
Let's have a little chat, you and me !
|
||||
======================================
|
||||
|
||||
You can have multiple remotes. Make a good use of them. Go through all the different methodologies online to work with ``git`` and try them out.
|
||||
Find what works for you. Make use of branches and remotes. Make use of merging.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
After talking about remotes in this post, you have some reading to do.
|
||||
I hope I've made your journey much simpler moving forward with this topic.
|
236
posts/text-editors/emacs-and-org-mode.org
Normal file
|
@ -0,0 +1,236 @@
|
|||
#+BEGIN_COMMENT
|
||||
.. title: Emacs and Org-mode
|
||||
.. date: 2020-08-22
|
||||
.. slug: emacs-and-org-mode
|
||||
.. updated: 2020-08-30
|
||||
.. status: published
|
||||
.. tags: emacs, org-mode, configuration,
|
||||
.. category: text-editors
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: I ditched VSCode and moved back to Emacs... You heard me !
|
||||
.. type: text
|
||||
#+END_COMMENT
|
||||
|
||||
I have recently found out, late I know, that the /VSCode/ distribution of the so called /Code - OSS/ is exactly that; a distribution.
|
||||
|
||||
Let me make it clear, the /VSCode/ binaries you download from *Microsoft* has an upstream the *GitHub repository* named [[https://github.com/Microsoft/vscode][VSCode]] but in fact is not exactly the same code.
|
||||
*Microsoft* has already added a few gifts for you, including *telemetry*, not cool huh ?!
|
||||
Well, they tell you this in the documentation, urrrmmm [[https://github.com/microsoft/vscode/wiki/Differences-between-the-repository-and-Visual-Studio-Code][somewhere]].
|
||||
|
||||
At the same time, I was giving /Jupyter Notebook/ a try. I worked on my previous post in it before writing down the final result as a blog post.
|
||||
But at the back of my mind, there was always [[https://orgmode.org/][Org-mode]].
|
||||
|
||||
Putting one and one together, you've guessed it. I have moved to *Emacs*... again... for the umm I can't remember time.
|
||||
But this time, it is different ! I hope...
|
||||
|
||||
{{{TEASER_END}}}
|
||||
|
||||
* Back story
|
||||
I was using /Jupyter Notebooks/ as a way to write down notes. Organize things.
|
||||
I had a work around the /output/ and was able to clean it.
|
||||
But let's face it, it might work but it is designed more towards other goals.
|
||||
I want to write notes and the best way to work with notes is to keep in the text, literally.
|
||||
I found a /VSCode/ extension that can handle /Org-mode/ in some capacity (I haven't tested it) so I decided to switch to /Emacs/ and keep the extention as a backup.
|
||||
|
||||
* Emacs Distribution of Doom
|
||||
Haha ! Very funny, I know. I went with [[https://github.com/hlissner/emacs-doom-themes][Doom]].
|
||||
Why? You may ask. I don't really have a good answer for you except the following.
|
||||
|
||||
* I didn't want to start from scratch, I wanted something with batteries included.
|
||||
* At the same time, I've tried /Doom/ before and I like how it does things.
|
||||
It is logical to me while at the same time very configurable.
|
||||
* I was able to get up and running very quickly. Granted, my needs are few.
|
||||
* I got /Python/ and /Golang/ auto-completion and /evil/ mode. I'm good to go !
|
||||
|
||||
Now let's dig down to my main focus here. Sure I switched editors but it was for a reason; *Org-mode*.
|
||||
|
||||
* Org-mode Configuration
|
||||
I will be talking about two different configuartion options here.
|
||||
I am new to emacs so I will try to explain everything.
|
||||
|
||||
The two options are related to the difference between a /vanilla/ configuration and /Doom/'s version of the configuration.
|
||||
The differences are minor but they are worth talking about.
|
||||
|
||||
** New Org File
|
||||
If you've used /Org-mode/ before and created /org files/, you already know that you need to set a few values at the top of the file. These include the /title/, /author/, /description/ and a different other values to change setting and/or behavior.
|
||||
|
||||
It is a bit of a manual labor to write these few lines at the beginning of every file. I wanted to automate that. So I got inspiration from [[https://gitlab.com/shakthimaan/operation-blue-moon][shakthimaan]].
|
||||
|
||||
I used his method to create a small =define-skeleton= for a header.
|
||||
It looks something like this.
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
(define-skeleton generate-new-header-org
|
||||
"Prompt for title, description and tags"
|
||||
nil
|
||||
'(setq title (skeleton-read "Title: "))
|
||||
'(setq author (skeleton-read "Author: "))
|
||||
'(setq description (skeleton-read "Description: "))
|
||||
'(setq tags (skeleton-read "tags: "))
|
||||
"#+TITLE: " title \n
|
||||
"#+AUTHOR: " author \n
|
||||
"#+DESCRIPTION: " description \n
|
||||
"#+TAGS: " tags \n
|
||||
)
|
||||
#+END_SRC
|
||||
|
||||
You can use this later with =M-x= + =genrate-new-header-org=.
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition note">
|
||||
<p class="admonition-title">Note</p>
|
||||
#+END_EXPORT
|
||||
=M-x= is the *Meta* key and *x* combination.
|
||||
Your *Meta* key can differ between the *Alt* on /Linux/ and *Command* on /Mac OS X/.
|
||||
|
||||
=M-x= will open a prompt for you to write in. Write the name you gave the skeleton, in this case it is =generate-new-header-org= and then hit the /Return/.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
|
||||
** New Task
|
||||
[[https://gitlab.com/shakthimaan/operation-blue-moon][shakthimaan]] already created something for this. It looks like the following.
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
;; Create a new skeleton to generate a new =Task=
|
||||
(define-skeleton insert-org-entry
|
||||
"Prompt for task, estimate and category"
|
||||
nil
|
||||
'(setq task (skeleton-read "Task: "))
|
||||
'(setq estimate (skeleton-read "Estimate: "))
|
||||
'(setq owner (skeleton-read "Owner: "))
|
||||
'(setq category (skeleton-read "Category: "))
|
||||
'(setq timestamp (format-time-string "%s"))
|
||||
"** " task \n
|
||||
":PROPERTIES:" \n
|
||||
":ESTIMATED: " estimate \n
|
||||
":ACTUAL:" \n
|
||||
":OWNER: " owner \n
|
||||
":ID: " category "." timestamp \n
|
||||
":TASKID: " category "." timestamp \n
|
||||
":END:")
|
||||
#+END_SRC
|
||||
|
||||
This can also be used like the one above with =M-x= + =insert-org-entry=.
|
||||
|
||||
** Doom specific configuration
|
||||
Whatever defined so far should work if you just add it to your configuration but if you use /Doom/ it would a nice touch to integrate it with the workflow.
|
||||
|
||||
In =~/.doom.d/config.el=, wrap the previous definitions with =(after! org)=.
|
||||
It's a nice touch to add these skeletons after /Org-mode/ has loaded.
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
(after! org
|
||||
;; Create a skeleton to generate header org
|
||||
(define-skeleton generate-new-header-org
|
||||
"Prompt for title, description and tags"
|
||||
nil
|
||||
'(setq title (skeleton-read "Title: "))
|
||||
'(setq author (skeleton-read "Author: "))
|
||||
'(setq description (skeleton-read "Description: "))
|
||||
'(setq tags (skeleton-read "tags: "))
|
||||
"#+TITLE: " title \n
|
||||
"#+AUTHOR: " author \n
|
||||
"#+DESCRIPTION: " description \n
|
||||
"#+TAGS: " tags \n)
|
||||
|
||||
;; Create a new skeleton to generate a new =Task=
|
||||
(define-skeleton insert-org-entry
|
||||
"Prompt for task, estimate and category"
|
||||
nil
|
||||
'(setq task (skeleton-read "Task: "))
|
||||
'(setq estimate (skeleton-read "Estimate: "))
|
||||
'(setq owner (skeleton-read "Owner: "))
|
||||
'(setq category (skeleton-read "Category: "))
|
||||
'(setq timestamp (format-time-string "%s"))
|
||||
"** " task \n
|
||||
":PROPERTIES:" \n
|
||||
":ESTIMATED: " estimate \n
|
||||
":ACTUAL:" \n
|
||||
":OWNER: " owner \n
|
||||
":ID: " category "." timestamp \n
|
||||
":TASKID: " category "." timestamp \n
|
||||
":END:")
|
||||
)
|
||||
#+END_SRC
|
||||
|
||||
#+BEGIN_EXPORT html
|
||||
<div class="admonition warning">
|
||||
<p class="admonition-title">warning</p>
|
||||
#+END_EXPORT
|
||||
If you modify any file in =~/.doom.d/=, do not forget to run =doom sync= and =doom doctor= to update and check your configuration respectively.
|
||||
#+BEGIN_EXPORT html
|
||||
</div>
|
||||
#+END_EXPORT
|
||||
|
||||
** Final touches
|
||||
I wanted to add it to the menu system that comes with /Doom/ so I included the following in my =(after! ...)= block.
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
;; Add keybindings with the leader menu for everything above
|
||||
(map! :map org-mode-map
|
||||
(:leader
|
||||
(:prefix ("m", "+<localleader>")
|
||||
:n :desc "Generate New Header Org" "G" 'generate-new-header-org
|
||||
:n :desc "New Task Entry" "N" 'insert-org-entry
|
||||
))
|
||||
)
|
||||
#+END_SRC
|
||||
|
||||
Making the final configuration look like the following.
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
(after! org
|
||||
;; Create a skeleton to generate header org
|
||||
(define-skeleton generate-new-header-org
|
||||
"Prompt for title, description and tags"
|
||||
nil
|
||||
'(setq title (skeleton-read "Title: "))
|
||||
'(setq author (skeleton-read "Author: "))
|
||||
'(setq description (skeleton-read "Description: "))
|
||||
'(setq tags (skeleton-read "tags: "))
|
||||
"#+TITLE: " title \n
|
||||
"#+AUTHOR: " author \n
|
||||
"#+DESCRIPTION: " description \n
|
||||
"#+TAGS: " tags \n)
|
||||
|
||||
;; Create a new skeleton to generate a new =Task=
|
||||
(define-skeleton insert-org-entry
|
||||
"Prompt for task, estimate and category"
|
||||
nil
|
||||
'(setq task (skeleton-read "Task: "))
|
||||
'(setq estimate (skeleton-read "Estimate: "))
|
||||
'(setq owner (skeleton-read "Owner: "))
|
||||
'(setq category (skeleton-read "Category: "))
|
||||
'(setq timestamp (format-time-string "%s"))
|
||||
"** " task \n
|
||||
":PROPERTIES:" \n
|
||||
":ESTIMATED: " estimate \n
|
||||
":ACTUAL:" \n
|
||||
":OWNER: " owner \n
|
||||
":ID: " category "." timestamp \n
|
||||
":TASKID: " category "." timestamp \n
|
||||
":END:")
|
||||
|
||||
(map! (:when (featurep! :lang org)
|
||||
(:map org-mode-map
|
||||
(:localleader
|
||||
:n :desc "Generate New Header Org" "G" 'generate-new-header-org
|
||||
:n :desc "New Task Entry" "N" 'insert-org-entry
|
||||
))
|
||||
))
|
||||
)
|
||||
#+END_SRC
|
||||
|
||||
* What do I do now ?
|
||||
You might be asking yourself at this point, what does this all mean ?
|
||||
What do I do with this ? Where do I go ?
|
||||
|
||||
Well here's the thing. You find yourself wanting to create a new /org file/.
|
||||
You do so in emacs and follow it with =M-x= + =generate-new-header-org= (or =SPC m G= in *Doom*). /Emacs/ will ask you a few questions in the bottom left corner and once you answer then, your header should be all set.
|
||||
|
||||
You can follow that with =M-x= + =insert-org-entry= (or =SPC m N=) to generate a task. This will also ask you for input in the bottom left corner.
|
||||
|
||||
* Conclusion
|
||||
This should help me pick up the usage of /Org-mode/ faster. It is also a good idea if you've already configured your /Emacs/ to read all your /org file/ for a wider *agenda* view.
|
|
@ -1,241 +0,0 @@
|
|||
.. title: Emacs and Org-mode
|
||||
.. date: 2020-08-22
|
||||
.. slug: emacs-and-org-mode
|
||||
.. updated: 2020-08-22
|
||||
.. status: published
|
||||
.. tags: emacs, org-mode, configuration,
|
||||
.. category: text-editors
|
||||
.. authors: Elia el Lazkani
|
||||
.. description: I ditched VSCode and moved back to Emacs... You heard me !
|
||||
.. type: text
|
||||
|
||||
I have recently found out, late I know, that the *VSCode* distribution of the so called *Code - OSS* is exactly that; a distribution.
|
||||
|
||||
Let me make it clear, the *VSCode* binaries you download from **Microsoft** has an upstream the **GitHub repository*** named `VSCode <https://github.com/Microsoft/vscode>`_ but in fact is not exactly the same code.
|
||||
**Microsoft** has already added a few gifts for you, including *telemetry*, not cool huh ?!
|
||||
Well, they tell you this in the documentation, urrrmmm `somewhere <https://github.com/microsoft/vscode/wiki/Differences-between-the-repository-and-Visual-Studio-Code>`_.
|
||||
|
||||
At the same time, I was giving *Jupyter Notebook* a try. I worked on my previous post in it before writing down the final result as a blog post.
|
||||
But at the back of my mind, there was always `Org-mode <https://orgmode.org/>`_.
|
||||
|
||||
Putting one and one together, you've guessed it. I have moved to *Emacs*... again... for the umm I can't remember time.
|
||||
But this time, it is different ! I hope...
|
||||
|
||||
.. TEASER_END
|
||||
|
||||
Back story
|
||||
==========
|
||||
|
||||
I was using *Jupyter Notebooks* as a way to write down notes. Organize things.
|
||||
I had a work around the *output* and was able to clean it.
|
||||
But let's face it, it might work but it is designed more towards other goals.
|
||||
I want to write notes and the best way to work with notes is to keep in the text, literally.
|
||||
I found a *VSCode* extension that can handle *Org-mode* in some capacity (I haven't tested it) so I decided to switch to *Emacs* and keep the extention as a backup.
|
||||
|
||||
Emacs Distribution of Doom
|
||||
==========================
|
||||
|
||||
Haha ! Very funny, I know. I went with `Doom <https://github.com/hlissner/emacs-doom-themes>`_.
|
||||
Why? You may ask. I don't really have a good answer for you except the following.
|
||||
|
||||
* I didn't want to start from scratch, I wanted something with batteries included.
|
||||
* At the same time, I've tried *Doom* before and I like how it does things.
|
||||
It is logical to me while at the same time very configurable.
|
||||
* I was able to get up and running very quickly. Granted, my needs are few.
|
||||
* I got *Python* and *Golang* auto-completion and *evil* mode. I'm good to go !
|
||||
|
||||
Now let's dig down to my main focus here. Sure I switched editors but it was for a reason; **Org-mode**.
|
||||
|
||||
Org-mode Configuration
|
||||
======================
|
||||
|
||||
I will be talking about two different configuartion options here.
|
||||
I am new to emacs so I will try to explain everything.
|
||||
|
||||
The two options are related to the difference between a *vanilla* configuration and *Doom*'s version of the configuration.
|
||||
The differences are minor but they are worth talking about.
|
||||
|
||||
New Org File
|
||||
------------
|
||||
|
||||
If you've used *Org-mode* before and created *org files*, you already know that you need to set a few values at the top of the file. These include the *title*, *author*, *description* and a different other values to change setting and/or behavior.
|
||||
|
||||
It is a bit of a manual labor to write these few lines at the beginning of every file. I wanted to automate that. So I got inspiration from `shakthimaan <https://gitlab.com/shakthimaan/operation-blue-moon>`_.
|
||||
|
||||
I used his method to create a small ``define-skeleton`` for a header.
|
||||
It looks something like this.
|
||||
|
||||
.. code:: lisp
|
||||
|
||||
(define-skeleton generate-new-header-org
|
||||
"Prompt for title, description and tags"
|
||||
nil
|
||||
'(setq title (skeleton-read "Title: "))
|
||||
'(setq author (skeleton-read "Author: "))
|
||||
'(setq description (skeleton-read "Description: "))
|
||||
'(setq tags (skeleton-read "tags: "))
|
||||
"#+TITLE: " title \n
|
||||
"#+AUTHOR: " author \n
|
||||
"#+DESCRIPTION: " description \n
|
||||
"#+TAGS: " tags \n
|
||||
)
|
||||
|
||||
You can use this later with ``M-x`` + ``genrate-new-header-org``.
|
||||
|
||||
.. note::
|
||||
|
||||
``M-x`` is the **Meta** key and **x** combination.
|
||||
Your **Meta** key can differ between the **Alt** on *Linux* and **Command** on *Mac OS X*.
|
||||
|
||||
``M-x`` will open a prompt for you to write in. Write the name you gave the skeleton, in this case it is ``generate-new-header-org`` and then hit the *Return*.
|
||||
|
||||
New Task
|
||||
--------
|
||||
|
||||
`shakthimaan <https://gitlab.com/shakthimaan/operation-blue-moon>`_ already created something for this. It looks like the following.
|
||||
|
||||
.. code:: lisp
|
||||
|
||||
;; Create a new skeleton to generate a new =Task=
|
||||
(define-skeleton insert-org-entry
|
||||
"Prompt for task, estimate and category"
|
||||
nil
|
||||
'(setq task (skeleton-read "Task: "))
|
||||
'(setq estimate (skeleton-read "Estimate: "))
|
||||
'(setq owner (skeleton-read "Owner: "))
|
||||
'(setq category (skeleton-read "Category: "))
|
||||
'(setq timestamp (format-time-string "%s"))
|
||||
"** " task \n
|
||||
":PROPERTIES:" \n
|
||||
":ESTIMATED: " estimate \n
|
||||
":ACTUAL:" \n
|
||||
":OWNER: " owner \n
|
||||
":ID: " category "." timestamp \n
|
||||
":TASKID: " category "." timestamp \n
|
||||
":END:")
|
||||
|
||||
This can also be used like the one above with ``M-x`` + ``insert-org-entry``.
|
||||
|
||||
Doom specific configuration
|
||||
---------------------------
|
||||
|
||||
Whatever defined so far should work if you just add it to your configuration but if you use *Doom* it would a nice touch to integrate it with the workflow.
|
||||
|
||||
In ``~/.doom.d/config.el``, wrap the previous definitions with ``(after! org)``.
|
||||
It's a nice touch to add these skeletons after *Org-mode* has loaded.
|
||||
|
||||
.. code:: lisp
|
||||
|
||||
(after! org
|
||||
;; Create a skeleton to generate header org
|
||||
(define-skeleton generate-new-header-org
|
||||
"Prompt for title, description and tags"
|
||||
nil
|
||||
'(setq title (skeleton-read "Title: "))
|
||||
'(setq author (skeleton-read "Author: "))
|
||||
'(setq description (skeleton-read "Description: "))
|
||||
'(setq tags (skeleton-read "tags: "))
|
||||
"#+TITLE: " title \n
|
||||
"#+AUTHOR: " author \n
|
||||
"#+DESCRIPTION: " description \n
|
||||
"#+TAGS: " tags \n)
|
||||
|
||||
;; Create a new skeleton to generate a new =Task=
|
||||
(define-skeleton insert-org-entry
|
||||
"Prompt for task, estimate and category"
|
||||
nil
|
||||
'(setq task (skeleton-read "Task: "))
|
||||
'(setq estimate (skeleton-read "Estimate: "))
|
||||
'(setq owner (skeleton-read "Owner: "))
|
||||
'(setq category (skeleton-read "Category: "))
|
||||
'(setq timestamp (format-time-string "%s"))
|
||||
"** " task \n
|
||||
":PROPERTIES:" \n
|
||||
":ESTIMATED: " estimate \n
|
||||
":ACTUAL:" \n
|
||||
":OWNER: " owner \n
|
||||
":ID: " category "." timestamp \n
|
||||
":TASKID: " category "." timestamp \n
|
||||
":END:")
|
||||
)
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
If you modify any file in ``~/.doom.d/``, do not forget to run ``doom sync`` and ``doom doctor`` to update and check your configuration respectively.
|
||||
|
||||
Final touches
|
||||
-------------
|
||||
|
||||
I wanted to add it to the menu system that comes with *Doom* so I included the following in my ``(after! ...)`` block.
|
||||
|
||||
.. code:: lisp
|
||||
|
||||
;; Add keybindings with the leader menu for everything above
|
||||
(map! :map org-mode-map
|
||||
(:leader
|
||||
(:prefix ("m", "+<localleader>")
|
||||
:n :desc "Generate New Header Org" "G" 'generate-new-header-org
|
||||
:n :desc "New Task Entry" "N" 'insert-org-entry
|
||||
))
|
||||
)
|
||||
|
||||
Making the final configuration look like the following.
|
||||
|
||||
.. code:: lisp
|
||||
|
||||
(after! org
|
||||
;; Create a skeleton to generate header org
|
||||
(define-skeleton generate-new-header-org
|
||||
"Prompt for title, description and tags"
|
||||
nil
|
||||
'(setq title (skeleton-read "Title: "))
|
||||
'(setq author (skeleton-read "Author: "))
|
||||
'(setq description (skeleton-read "Description: "))
|
||||
'(setq tags (skeleton-read "tags: "))
|
||||
"#+TITLE: " title \n
|
||||
"#+AUTHOR: " author \n
|
||||
"#+DESCRIPTION: " description \n
|
||||
"#+TAGS: " tags \n)
|
||||
|
||||
;; Create a new skeleton to generate a new =Task=
|
||||
(define-skeleton insert-org-entry
|
||||
"Prompt for task, estimate and category"
|
||||
nil
|
||||
'(setq task (skeleton-read "Task: "))
|
||||
'(setq estimate (skeleton-read "Estimate: "))
|
||||
'(setq owner (skeleton-read "Owner: "))
|
||||
'(setq category (skeleton-read "Category: "))
|
||||
'(setq timestamp (format-time-string "%s"))
|
||||
"** " task \n
|
||||
":PROPERTIES:" \n
|
||||
":ESTIMATED: " estimate \n
|
||||
":ACTUAL:" \n
|
||||
":OWNER: " owner \n
|
||||
":ID: " category "." timestamp \n
|
||||
":TASKID: " category "." timestamp \n
|
||||
":END:")
|
||||
|
||||
(map! :map org-mode-map
|
||||
(:leader
|
||||
(:prefix ("m", "+<localleader>")
|
||||
:n :desc "Generate New Header Org" "G" 'generate-new-header-org
|
||||
:n :desc "New Task Entry" "N" 'insert-org-entry
|
||||
)))
|
||||
)
|
||||
|
||||
What do I do now ?
|
||||
==================
|
||||
|
||||
You might be asking yourself at this point, what does this all mean ?
|
||||
What do I do with this ? Where do I go ?
|
||||
|
||||
Well here's the thing. You find yourself wanting to create a new *org file*.
|
||||
You do so in emacs and follow it with ``M-x`` + ``generate-new-header-org`` (or ``SPC m G`` in **Doom**). *Emacs* will ask you a few questions in the bottom left corner and once you answer then, your header should be all set.
|
||||
|
||||
You can follow that with ``M-x`` + ``insert-org-entry`` (or ``SPC m N``) to generate a task. This will also ask you for input in the bottom left corner.
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
This should help me pick up the usage of *Org-mode* faster. It is also a good idea if you've already configured your *Emacs* to read all your *org file* for a wider **agenda** view.
|
10751
themes/custom/assets/css/bootstrap.css
vendored
12
themes/custom/assets/css/bootstrap.min.css
vendored
|
@ -1,12 +0,0 @@
|
|||
[Theme]
|
||||
engine = mako
|
||||
parent = bootstrap4
|
||||
author = The Nikola Contributors
|
||||
author_url = https://getnikola.com/
|
||||
license = MIT
|
||||
based_on = Bootstrap 4 <http://getbootstrap.com/>
|
||||
tags = bootstrap
|
||||
|
||||
[Family]
|
||||
family = bootstrap4
|
||||
|
1
themes/willy-theme
Symbolic link
|
@ -0,0 +1 @@
|
|||
nikola-themes/v8/willy-theme
|