From 6a2393d8053ab9ff9651a6f9c3b243cced7e4439 Mon Sep 17 00:00:00 2001 From: Bob Gilligan Date: Tue, 14 Oct 2008 13:54:35 -0700 Subject: Bugfix: 3744 When a new member is added to a RAID group that holds the root filesystem, we need to re-install grub so that the new disk will be bootable. But this can only be done after the RAID set has completed rebuilding. Added mechanism that uses the event notification infrastructure of "mdadm" to trigger the re-installation of grub after the rebuild completes. --- scripts/vyatta-raid-event | 104 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 scripts/vyatta-raid-event (limited to 'scripts') diff --git a/scripts/vyatta-raid-event b/scripts/vyatta-raid-event new file mode 100644 index 00000000..f279a57d --- /dev/null +++ b/scripts/vyatta-raid-event @@ -0,0 +1,104 @@ +#!/bin/bash +# +# **** License **** +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# This code was originally developed by Vyatta, Inc. +# Portions created by Vyatta are Copyright (C) 2006, 2007 Vyatta, Inc. +# All Rights Reserved. +# +# Author: Bob Gilligan +# Date: 2008 +# Description: A script to handle events from the Linux Software RAID +# subsystem. +# +# **** End License **** +# +# This script is called by the "mdadm" daemon running in "monitor" mode +# whenever an event occurs in in the RAID subsytem. The script is called +# with two or three arguments: The first argument is always the name of +# the event, e.g. "RebuildFinished". The second argument is the name of +# the RAID set device that the event pertains to, e.g. "/dev/md0". The +# third argument is provided for some events, and gives the name of the +# RAID set member that the event pertains to, e.g. "/dev/sda2". +# +# See the mdadm(8) man page for more details on the events that it provides. +# + +# Script will be called with 2 or 3 arguments, depending on the event +if [ $# -lt 2 ]; then + logger -t "RAID" -p local0.warning "vyatta-raid-event: Error: Not enough args: $*" + # We can't do anything if we don't know event and RAID device it + # pertains to. + exit 1 +fi +if [ $# -gt 3 ]; then + logger -t "RAID" -p local0.warning "vyatta-raid-event: Warning: too many args: $*" + # Be Robust: Try to complete task with args we know about +fi + +event=$1 +raid_set=$2 + +case $event in + + RebuildFinished) + logger -t "RAID" -p local0.warning "event ${event} ${raid_set}" + + # We need to update grub at the time that a resync completes + # on the root filesystem so that the new member disk will be + # bootable. + mounted_on=`mount | grep "^${raid_set}" | awk '{ print $3 }'` + if [ "$mounted_on" = "/" ]; then + raid_set_dev=${raid_set##*/} + if [ -e /sys/block/${raid_set_dev}/md/degraded ]; then + degraded=`cat /sys/block/${raid_set_dev}/md/degraded` + else + degraded=0 + fi + if [ $degraded -eq 0 ]; then + drive=${member_to_add%%[0-9]*} + logger -t "RAID" -p local0.warning \ + "RAID set ${raid_set} holds root filesystem. Updating grub." + touch /tmp/raid-grub-install-log + grub-install --no-floppy --recheck --root-directory=/ ${raid_set} \ + >> /tmp/raid-grub-install-log 2>&1 + if [ $? -ne 0 ]; then + logger -t "RAID" -p local0.warning \ + "grub-installed failed for $raid_set" + fi + else + logger -t "RAID" -p local0.warning \ + "RAID set ${raid_set} is still degraded. No action taken." + fi + else + logger -t "RAID" -p local0.warning \ + "RAID set ${raid_set} does not hold root filesystem. No action taken" + fi + ;; + + DeviceDisappeared | RebuildStarted | Rebuild?? | NewArray | \ + DegradedArray | MoveSpare | SparesMissing | TestMessage) + logger -t "RAID" -p local0.warning \ + "event ${event} ${raid_set}: No action taken" + ;; + + Fail | FailSpare | SpareActive) + member=$3 + logger -t "RAID" -p local0.warning \ + "event ${event} ${raid_set} ${member}: No action taken" + ;; + + *) + logger -t "RAID" -p local0.warning \ + "event ${event} unknown. No action taken" + ;; + + esac -- cgit v1.2.3 From c9c4552ca7545d3916f64b5c9fbef553d80930f6 Mon Sep 17 00:00:00 2001 From: Mohit Mehta Date: Sat, 11 Oct 2008 17:37:11 -0700 Subject: - redo internal snmpv3 user creation for linkUpDownNotifications --- scripts/snmp/vyatta-snmp.pl | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'scripts') diff --git a/scripts/snmp/vyatta-snmp.pl b/scripts/snmp/vyatta-snmp.pl index b43485b3..6de63fb0 100644 --- a/scripts/snmp/vyatta-snmp.pl +++ b/scripts/snmp/vyatta-snmp.pl @@ -49,11 +49,11 @@ sub snmp_init { } sub snmp_restart { - system("$snmp_init restart"); + system("$snmp_init restart > /dev/null 2>&1 &"); } sub snmp_stop { - system("$snmp_init stop"); + system("$snmp_init stop > /dev/null 2>&1"); } sub snmp_get_constants { @@ -127,10 +127,18 @@ sub snmp_get_values { my @trap_targets = $config->returnValues("trap-target"); if ($#trap_targets >= 0) { - # code for creating a snmpv3 user, setting access-level for it and use user to do internal snmpv3 requests - snmp_create_snmpv3_user(); - snmp_write_snmpv3_user(); - $output .= "iquerySecName vyatta\n"; + + # linkUpDownNotifications configure the Event MIB tables to monitor the ifTable for network interfaces being taken up or down + # for making internal queries to retrieve any necessary information a snmpv3 user needs to be created + # we write appropriate values to /var/lib/snmp/snmpd.conf and /usr/share/snmp/snmpd.conf to do so + # any external snmpv3 queries (from localhost or any other ip) using this username will not be responded to + + my $generate_vyatta_user_append_string = join "", map { unpack "H*", chr(rand(256)) } 1..8; #generate a random 16 character hex string + #create an internal snmpv3 user of the form 'vyattaxxxxxxxxxxxxxxxx' + my $vyatta_user = "vyatta" . "$generate_vyatta_user_append_string"; + snmp_create_snmpv3_user($vyatta_user); + snmp_write_snmpv3_user($vyatta_user); + $output .= "iquerySecName $vyatta_user\n"; # code to activate link up down traps $output .= "linkUpDownNotifications yes\n"; } @@ -143,7 +151,9 @@ sub snmp_get_values { sub snmp_create_snmpv3_user { - my $createuser = "createUser vyatta MD5 \"vyatta\" DES"; + my $vyatta_user = shift; + my $passphrase = join "", map { unpack "H*", chr(rand(256)) } 1..16; #generate a random 32 character hex string + my $createuser = "createUser $vyatta_user MD5 \"$passphrase\" DES"; open(my $fh, '>>', $snmp_snmpv3_createuser_conf) || die "Couldn't open $snmp_snmpv3_createuser_conf - $!"; print $fh $createuser; close $fh; @@ -151,8 +161,10 @@ sub snmp_create_snmpv3_user { sub snmp_write_snmpv3_user { - my $user = "rwuser vyatta"; - open(my $fh, '>', $snmp_snmpv3_user_conf) || die "Couldn't open $snmp_snmpv3_user_conf - $!"; + my $vyatta_user = shift; + my $user = "rouser $vyatta_user\n"; + system ("sed -i '/user[[:space:]]*vyatta[[:alnum:]]*/d' $snmp_snmpv3_user_conf;"); + open(my $fh, '>>', $snmp_snmpv3_user_conf) || die "Couldn't open $snmp_snmpv3_user_conf - $!"; print $fh $user; close $fh; } -- cgit v1.2.3 From 523d1b674931a3bf8c97706d237da06b4b425e63 Mon Sep 17 00:00:00 2001 From: Bob Gilligan Date: Mon, 20 Oct 2008 10:57:33 -0700 Subject: Bugfix: 3775: Allow RAID-1 to be configured on disks with different sizes --- scripts/install-system | 61 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 21 deletions(-) (limited to 'scripts') diff --git a/scripts/install-system b/scripts/install-system index 9980e210..f5484e9a 100644 --- a/scripts/install-system +++ b/scripts/install-system @@ -305,9 +305,11 @@ check_for_new_raid () { numdrives=`echo $drives | wc -w` + # Need at least two drives for RAID-1. We don't yet have the code + # to handle selection of two from a set of 3 or more, so for now, we + # only support two drives. + # if [ $numdrives -ne 2 ]; then - # debug - echo "check_for_new_raid: don't have 2 drives" return fi @@ -317,13 +319,14 @@ check_for_new_raid () { drivesize1=$(get_drive_size $drive1) drivesize2=$(get_drive_size $drive2) - if [ $drivesize1 -ne $drivesize2 ]; then - # debug - echo "check_for_new_raid: have 2 drives, but different sizes" + # Both drives must have enough space to hold our minimum root filesystem + # + if [ $drivesize1 -lt $ROOT_MIN -o $drivesize2 -lt $ROOT_MIN ]; then return fi - echo "You have two identical disk drives:" + + echo "You have two disk drives:" echo -e "\t$drive1 \t$drivesize1 MB" echo -e "\t$drive2 \t$drivesize2 MB" @@ -334,6 +337,13 @@ check_for_new_raid () { return fi + if [ $drivesize1 -ne $drivesize2 ]; then + echo "Since the disks are not the same size, we will use the smaller" + echo "of the two sizes in configuring the RAID-1 set. This will" + echo "waste some space on the larger drive." + echo "" + fi + # Configure RAID-1 echo "This process will erase all data on both drives." echo -n "Are you sure you want to do this? (Yes/No) [No]: " @@ -358,27 +368,36 @@ check_for_new_raid () { part_start_offset=2 part_diag_size=60 - echo "Would you like me to create a $part_diag_size MB partition for diagnostics?" - echo -n "(Yes/No) [No]: " - diag_response=$(get_response "No" "Yes No Y N") - if [ "$diag_response" == "yes" ] || [ "$diag_response" == "y" ]; then - for drive in $drives - do - echo "Creating diag partition on drive $drive" - create_partitions "$drive" $part_diag_size $part_start_offset "no" - sfdisk --change-id /dev/$drive 1 0x6 - done - data_dev=2 - let part_start_offset+=$part_diag_size + if [ $drivesize1 -gt $drivesize2 ]; then + size=$drivesize1 else - data_dev=1 + size=$drivesize2 fi + let min_size_with_diag=${MIN_ROOT}+${part_diag_size} + if [ $size -ge $min_size_with_diag ]; then + echo "Would you like me to create a $part_diag_size MB partition for diagnostics?" + echo -n "(Yes/No) [No]: " + diag_response=$(get_response "No" "Yes No Y N") + if [ "$diag_response" == "yes" ] || [ "$diag_response" == "y" ]; then + for drive in $drives + do + echo "Creating diag partition on drive $drive" + create_partitions "$drive" $part_diag_size $part_start_offset "no" + sfdisk --change-id /dev/$drive 1 0x6 + done + data_dev=2 + let part_start_offset+=$part_diag_size + else + data_dev=1 + fi + fi + + let size-=$part_start_offset + for drive in $drives do echo "Creating data partition: /dev/${drive}${data_dev}" - size=$(get_drive_size $drive) - let size-=$part_start_offset create_partitions "$drive" $size $part_start_offset "no" sfdisk --change-id /dev/$drive $data_dev 0xfd done -- cgit v1.2.3 From 43c3c737ae7dc7c89d46359e8cd2ada2bc281f5b Mon Sep 17 00:00:00 2001 From: Bob Gilligan Date: Mon, 20 Oct 2008 14:31:52 -0700 Subject: Bugfix: 3687: Only start mdadm if we have a RAID group as root filesystem. --- scripts/install-system | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'scripts') diff --git a/scripts/install-system b/scripts/install-system index f5484e9a..ff7a5d41 100644 --- a/scripts/install-system +++ b/scripts/install-system @@ -1320,6 +1320,21 @@ if [ -z $UNION ]; then sed -i 's/requisite[ \t][ \t]*pam_securetty.so/required pam_securetty.so/' $rootfsdir/etc/pam.d/login fi +# +# Only start the mdadm daemon if we have the root filesystem running +# on a RAID set. Since this script is the only way that the root filesystem +# ever gets set up, we can do this configuration here. +# +MDADM_CONFIG_FILE=$rootfsdir/etc/default/mdadm +if [ -e $MDADM_CONFIG_FILE ]; then + if [ ${INSTALL_DRIVE:0:2} = "md" ]; then + sed -i 's/^START_DAEMON.*$/START_DAEMON=true/' $MDADM_CONFIG_FILE + else + sed -i 's/^START_DAEMON.*$/START_DAEMON=false/' $MDADM_CONFIG_FILE + fi +fi + + # postinst hook if [ -e /opt/vyatta/etc/install-system/postinst ]; then echo "running post-install script" -- cgit v1.2.3