Merge pull request !2 from hy/work
This commit is contained in:
openeuler-ci-bot 2020-01-12 17:10:48 +08:00 committed by Gitee
commit 876f4004f9
12 changed files with 27 additions and 487 deletions

View File

@ -1,52 +0,0 @@
From c36baf7f81651a72be899d9eb62ed77db28cb608 Mon Sep 17 00:00:00 2001
From: openEuler Buildteam <buildteam@openeuler.org>
Date: Tue, 31 Dec 2019 14:00:22 +0000
Subject: [PATCH] use 65-md-incremental.rules to add the block device to
the array pool and do things like anaconda command line options
---
udev-md-raid-assembly.rules | 30 ------------------------------
1 file changed, 30 deletions(-)
diff --git a/udev-md-raid-assembly.rules b/udev-md-raid-assembly.rules
index 9f055ed..aa67af4 100644
--- a/udev-md-raid-assembly.rules
+++ b/udev-md-raid-assembly.rules
@@ -5,34 +5,4 @@
ENV{ANACONDA}=="?*", GOTO="md_inc_end"
# assemble md arrays
-SUBSYSTEM!="block", GOTO="md_inc_end"
-
-# skip non-initialized devices
-ENV{SYSTEMD_READY}=="0", GOTO="md_inc_end"
-
-# handle potential components of arrays (the ones supported by md)
-ENV{ID_FS_TYPE}=="linux_raid_member", GOTO="md_inc"
-
-# "noiswmd" on kernel command line stops mdadm from handling
-# "isw" (aka IMSM - Intel RAID).
-# "nodmraid" on kernel command line stops mdadm from handling
-# "isw" or "ddf".
-IMPORT{cmdline}="noiswmd"
-IMPORT{cmdline}="nodmraid"
-
-ENV{nodmraid}=="?*", GOTO="md_inc_end"
-ENV{ID_FS_TYPE}=="ddf_raid_member", GOTO="md_inc"
-ENV{noiswmd}=="?*", GOTO="md_inc_end"
-ENV{ID_FS_TYPE}=="isw_raid_member", GOTO="md_inc"
-GOTO="md_inc_end"
-
-LABEL="md_inc"
-
-# remember you can limit what gets auto/incrementally assembled by
-# mdadm.conf(5)'s 'AUTO' and selectively whitelist using 'ARRAY'
-ACTION=="add|change", IMPORT{program}="BINDIR/mdadm --incremental --export $devnode --offroot $env{DEVLINKS}"
-ACTION=="add|change", ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer"
-ACTION=="remove", ENV{ID_PATH}=="?*", RUN+="BINDIR/mdadm -If $name --path $env{ID_PATH}"
-ACTION=="remove", ENV{ID_PATH}!="?*", RUN+="BINDIR/mdadm -If $name"
-
LABEL="md_inc_end"
--
1.7.12.4

View File

@ -1,23 +0,0 @@
--- mdadm-3.2.1/Makefile.static 2011-03-27 22:31:20.000000000 -0400
+++ mdadm-3.2.1/Makefile 2011-03-28 10:16:55.277900184 -0400
@@ -238,16 +238,16 @@ install : mdadm mdmon install-man instal
$(INSTALL) -D $(STRIP) -m 755 mdmon $(DESTDIR)$(BINDIR)/mdmon
install-static : mdadm.static install-man
- $(INSTALL) -D $(STRIP) -m 755 mdadm.static $(DESTDIR)$(BINDIR)/mdadm
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.static $(DESTDIR)$(BINDIR)/mdadm.static
install-tcc : mdadm.tcc install-man
- $(INSTALL) -D $(STRIP) -m 755 mdadm.tcc $(DESTDIR)$(BINDIR)/mdadm
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.tcc $(DESTDIR)$(BINDIR)/mdadm.tcc
install-uclibc : mdadm.uclibc install-man
- $(INSTALL) -D $(STRIP) -m 755 mdadm.uclibc $(DESTDIR)$(BINDIR)/mdadm
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.uclibc $(DESTDIR)$(BINDIR)/mdadm.uclibc
install-klibc : mdadm.klibc install-man
- $(INSTALL) -D $(STRIP) -m 755 mdadm.klibc $(DESTDIR)$(BINDIR)/mdadm
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.klibc $(DESTDIR)$(BINDIR)/mdadm.klibc
install-man: mdadm.8 md.4 mdadm.conf.5 mdmon.8
$(INSTALL) -D -m 644 mdadm.8 $(DESTDIR)$(MAN8DIR)/mdadm.8

View File

@ -1,3 +0,0 @@
# Run system wide raid-check once a week on Sunday at 1am by default
0 1 * * Sun root /usr/sbin/raid-check

View File

@ -1,60 +0,0 @@
#!/bin/bash
#
# Configuration file for /etc/cron.weekly/raid-check
#
# options:
# ENABLED - must be yes in order for the raid check to proceed
# CHECK - can be either check or repair depending on the type of
# operation the user desires. A check operation will scan
# the drives looking for bad sectors and automatically
# repairing only bad sectors. If it finds good sectors that
# contain bad data (meaning that the data in a sector does
# not agree with what the data from another disk indicates
# the data should be, for example the parity block + the other
# data blocks would cause us to think that this data block
# is incorrect), then it does nothing but increments the
# counter in the file /sys/block/$dev/md/mismatch_count.
# This allows the sysadmin to inspect the data in the sector
# and the data that would be produced by rebuilding the
# sector from redundant information and pick the correct
# data to keep. The repair option does the same thing, but
# when it encounters a mismatch in the data, it automatically
# updates the data to be consistent. However, since we really
# don't know whether it's the parity or the data block that's
# correct (or which data block in the case of raid1), it's
# luck of the draw whether or not the user gets the right
# data instead of the bad data. This option is the default
# option for devices not listed in either CHECK_DEVS or
# REPAIR_DEVS.
# CHECK_DEVS - a space delimited list of devs that the user specifically
# wants to run a check operation on.
# REPAIR_DEVS - a space delimited list of devs that the user
# specifically wants to run a repair on.
# SKIP_DEVS - a space delimited list of devs that should be skipped
# NICE - Change the raid check CPU and IO priority in order to make
# the system more responsive during lengthy checks. Valid
# values are high, normal, low, idle.
# MAXCONCURENT - Limit the number of devices to be checked at a time.
# By default all devices will be checked at the same time.
#
# Note: the raid-check script intentionaly runs last in the cron.weekly
# sequence. This is so we can wait for all the resync operations to complete
# and then check the mismatch_count on each array without unduly delaying
# other weekly cron jobs. If any arrays have a non-0 mismatch_count after
# the check completes, we echo a warning to stdout which will then me emailed
# to the admin as long as mails from cron jobs have not been redirected to
# /dev/null. We do not wait for repair operations to complete as the
# md stack will correct any mismatch_cnts automatically.
#
# Note2: you can not use symbolic names for the raid devices, such as you
# /dev/md/root. The names used in this file must match the names seen in
# /proc/mdstat and in /sys/block.
ENABLED=yes
CHECK=check
NICE=low
# To check devs /dev/md0 and /dev/md3, use "md0 md3"
CHECK_DEVS=""
REPAIR_DEVS=""
SKIP_DEVS=""
MAXCONCURRENT=

View File

@ -1 +0,0 @@
d /var/run/mdadm 0710 root root -

View File

@ -1,67 +0,0 @@
# This file causes block devices with Linux RAID (mdadm) signatures to
# automatically cause mdadm to be run.
# See udev(8) for syntax
# Don't process any events if anaconda is running as anaconda brings up
# raid devices manually
ENV{ANACONDA}=="?*", GOTO="md_end"
# Also don't process disks that are slated to be a multipath device
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end"
# We process add events on block devices (since they are ready as soon as
# they are added to the system), but we must process change events as well
# on any dm devices (like LUKS partitions or LVM logical volumes) and on
# md devices because both of these first get added, then get brought live
# and trigger a change event. The reason we don't process change events
# on bare hard disks is because if you stop all arrays on a disk, then
# run fdisk on the disk to change the partitions, when fdisk exits it
# triggers a change event, and we want to wait until all the fdisks on
# all member disks are done before we do anything. Unfortunately, we have
# no way of knowing that, so we just have to let those arrays be brought
# up manually after fdisk has been run on all of the disks.
# First, process all add events (md and dm devices will not really do
# anything here, just regular disks, and this also won't get any imsm
# array members either)
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
IMPORT{program}="/sbin/mdadm -I $env{DEVNAME} --export $devnode --offroot ${DEVLINKS}"
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
ENV{ID_FS_TYPE}=="linux_raid_member", \
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
ENV{ID_FS_TYPE}=="linux_raid_member", \
RUN+="/sbin/mdadm -If $name"
# Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline
IMPORT{cmdline}="noiswmd"
IMPORT{cmdline}="nodmraid"
ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -I $env{DEVNAME}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -If $name"
LABEL="md_imsm_inc_end"
# Next make sure that this isn't a dm device we should skip for some reason
ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end"
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="dm_change_end"
ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end"
KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
LABEL="dm_change_end"
# Finally catch any nested md raid arrays. If we brought up an md raid
# array that's part of another md raid array, it won't be ready to be used
# until the change event that occurs when it becomes live
KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
LABEL="md_end"

View File

@ -1,20 +1,13 @@
Name: mdadm
Version: 4.1
Release: rc2.0.5
Release: rc2.0.6
Summary: The software RAID arrays user manage tools
License: GPLv2+
URL: http://www.kernel.org/pub/linux/utils/raid/mdadm/
Source0: http://www.kernel.org/pub/linux/utils/raid/mdadm/mdadm-%{version}-rc2.tar.xz
Source1: mdadm.conf
Source2: raid-check
Source3: mdadm.rules
Source4: mdadm-raid-check-sysconfig
Source5: mdadm-cron
Source6: mdmonitor.service
Patch0: 0000-use-incremental-assembly-rules.patch
Patch1: 0001-mdadm-2.5.2-static.patch
Source1: mdcheck-cron
Source2: mdmonitor.service
Patch6000: 6000-Assemble-free-resources-in-load_devices.patch
Patch6001: 6001-Assemble-set-devices-to-NULL-when-load_devices-can-t.patch
@ -56,15 +49,12 @@ This contains man files for the using of mdadm.
%install
make DESTDIR=%{buildroot} MANDIR=%{_mandir} BINDIR=%{_sbindir} SYSTEMD_DIR=%{_unitdir} install install-systemd
install -Dp -m 755 %{SOURCE2} %{buildroot}%{_sbindir}/raid-check
install -Dp -m 644 %{SOURCE3} %{buildroot}%{_udevrulesdir}/65-md-incremental.rules
install -Dp -m 644 %{SOURCE4} %{buildroot}%{_sysconfdir}/sysconfig/raid-check
install -Dp -m 644 %{SOURCE5} %{buildroot}%{_sysconfdir}/cron.d/raid-check
install -Dp -m 755 misc/mdcheck %{buildroot}%{_sbindir}/mdcheck
install -Dp -m 644 %{SOURCE1} %{buildroot}%{_sysconfdir}/cron.d/mdcheck
#install mdmonitor.service from local file
install -D -m 644 %{SOURCE6} %{buildroot}%{_unitdir}
install -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}
install -D -m 644 %{SOURCE1} %{buildroot}%{_tmpfilesdir}/mdadm.conf
install -d -m 710 %{buildroot}/var/run/mdadm/
%post
@ -84,15 +74,19 @@ install -d -m 710 %{buildroot}/var/run/mdadm/
%{_sbindir}/*
%{_unitdir}/*
/usr/lib/systemd/system-shutdown/mdadm.shutdown
%config(noreplace) %{_sysconfdir}/cron.d/raid-check
%config(noreplace) %{_sysconfdir}/sysconfig/raid-check
%config(noreplace) %{_sysconfdir}/cron.d/mdcheck
%dir %{_localstatedir}/run/mdadm/
%config(noreplace) %{_tmpfilesdir}/mdadm.conf
%files help
%{_mandir}/man*/*
%changelog
* Sat Jan 11 2020 openEuler Buildteam <buildteam@openeuler.org> - 4.1-rc2.0.6
- Type:enhancemnet
- ID:NA
- SUG:NA
- DESC:Repackage
* Tue Dec 31 2019 openEuler Buildteam <buildteam@openeuler.org> - 4.1-rc2.0.5
- Type:enhancement
- ID:NA

View File

@ -1,5 +0,0 @@
# Save /proc/mdstat in case of crash in mdadm/mdmon
EVENT=post-create component=mdadm
cat /proc/mdstat >> mdstat_data
echo "Saved output of /proc/mdstat"

2
mdcheck-cron Normal file
View File

@ -0,0 +1,2 @@
# Run the mdcheck script once on every Saturday for 5 hours
0 0 * * 6 root /usr/sbin/mdcheck --duration "5 hours"

View File

@ -1,118 +0,0 @@
#!/bin/bash
#
# mdmonitor This starts, stops, and reloads the mdadm-based
# software RAID monitoring and management facility
#
# chkconfig: 2345 15 85
# description: software RAID monitoring and management
# config: /etc/mdadm.conf
#
# Copyright 2002 Red Hat, Inc.
#
### BEGIN INIT INFO
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start and stop the MD software RAID monitor
# Description: The mdmonitor service checks the status of all software
# RAID arrays on the system. In the event that any of the arrays
# transition into a degraded state, it notifies the system
# administrator. Other options are available, see the mdadm.conf
# and mdadm man pages for possible ways to configure this service.
### END INIT INFO
PIDPATH=/var/run/mdadm
PIDFILE=/var/run/mdadm/mdadm.pid
PATH=/sbin:/usr/sbin:$PATH
RETVAL=0
OPTIONS="--monitor --scan -f --pid-file=$PIDFILE"
prog=mdmonitor
# Source function library.
. /etc/rc.d/init.d/functions
usage ()
{
echo "Usage: service $prog {start|stop|status|restart|try-restart|force-reload}"
RETVAL=1
}
start ()
{
# (Re)start mdmon to take over monitoring of mdmon started from the initrd
for i in /dev/md/*.pid; do
if [ -r $i ]; then
origprog="$prog"; prog="mdmon"
action $"Starting $prog: " /sbin/mdmon --takeover --all
prog="$origprog"
break
fi
done
# Make sure configuration file exists and has information we can use
# MAILADDR or PROGRAM or both must be set in order to run mdadm --monitor
[ -f /etc/mdadm.conf ] || return 6
grep '^\(MAILADDR\|PROGRAM\) .' /etc/mdadm.conf >/dev/null 2>&1 || return 6
# Create our directory if it isn't there yet
if [ ! -d $PIDPATH ]; then
mkdir -m 0700 $PIDPATH >&/dev/null
RC=$?
[ -x /sbin/restorecon ] && /sbin/restorecon $PIDPATH
if [ $RC -ne 0 ]; then
echo -n "Failed to create /var/run/mdadm"
failure
echo
return 1
fi
fi
if [ -f "$PIDFILE" ]; then
checkpid `cat $PIDFILE` && return 0
fi
echo -n $"Starting $prog: "
cd /
daemon --user=root mdadm ${OPTIONS}
ret=$?
[ $ret -eq "0" ] && touch /var/lock/subsys/$prog
echo
return $ret
}
stop ()
{
[ -f /var/lock/subsys/$prog ] || return 0
echo -n "Killing $prog: "
killproc mdadm
echo
rm -f $PIDFILE
rm -f /var/lock/subsys/$prog
}
restart ()
{
stop
start
}
condrestart ()
{
[ -e /var/lock/subsys/$prog ] && restart || return 0
}
case "$1" in
start|stop|restart|condrestart|try-restart|force-reload)
[ `id -u` != "0" ] && exit 4 ;;
esac
case "$1" in
start) start; RETVAL=$? ;;
stop) stop; RETVAL=$? ;;
status) status -p $PIDFILE $prog ; RETVAL=$? ;;
restart) restart; RETVAL=$? ;;
reload) RETVAL=3 ;;
condrestart|try-restart|force-reload) condrestart; RETVAL=$? ;;
*) usage ; RETVAL=2 ;;
esac
exit $RETVAL

View File

@ -1,12 +1,20 @@
# This file is part of mdadm.
#
# mdadm is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
[Unit]
Description=Software RAID monitoring and management
ConditionPathExists=/etc/mdadm.conf
Description=MD array monitor
DefaultDependencies=no
[Service]
Type=forking
PIDFile=/var/run/mdadm/mdadm.pid
EnvironmentFile=-/etc/sysconfig/mdmonitor
ExecStart=/sbin/mdadm --monitor --scan -f --pid-file=/var/run/mdadm/mdadm.pid
Environment= MDADM_MONITOR_ARGS=--scan
EnvironmentFile=-/etc/sysconfig/mdadm
ExecStart=/sbin/mdadm --monitor $MDADM_MONITOR_ARGS -f --pid-file=/var/run/mdadm/mdadm.pid
[Install]
WantedBy=multi-user.target

View File

@ -1,135 +0,0 @@
#!/bin/bash
#
# This script reads it's configuration from /etc/sysconfig/raid-check
# Please use that file to enable/disable this script or to set the
# type of check you wish performed.
# We might be on a kernel with no raid support at all, exit if so
[ -f /proc/mdstat ] || exit 0
# and exit if we haven't been set up properly
[ -f /etc/sysconfig/raid-check ] || exit 0
. /etc/sysconfig/raid-check
# Wait until no more than arg1 arrays in arg2 list are busy
waitbusy() {
local threshold=$(($1 + 1))
local dev_list="$2"
while true
do
local busy=0
local dev=""
for dev in $dev_list; do
local sync_action=`cat /sys/block/$dev/md/sync_action`
if [ "$sync_action" != "idle" ]; then
let busy++
fi
done
[ $busy -lt $threshold ] && break
sleep 60
done
}
[ "$ENABLED" != "yes" ] && exit 0
case "$CHECK" in
check) ;;
repair) ;;
*) exit 0;;
esac
ionice=""
renice=""
case $NICE in
high)
renice="-n -5"
;;
low)
renice="-n 5"
ionice="-c2 -n7"
;;
idle)
renice="-n 15"
ionice="-c3"
;;
*)
;;
esac
active_list=`grep "^md.*: active" /proc/mdstat | cut -f 1 -d ' '`
[ -z "$active_list" ] && exit 0
declare -A check
dev_list=""
check_list=""
for dev in $active_list; do
echo $SKIP_DEVS | grep -w $dev >&/dev/null && continue
if [ -f /sys/block/$dev/md/sync_action ]; then
# Only perform the checks on idle, healthy arrays, but delay
# actually writing the check field until the next loop so we
# don't switch currently idle arrays to active, which happens
# when two or more arrays are on the same physical disk
array_state=`cat /sys/block/$dev/md/array_state`
if [ "$array_state" != "clean" -a "$array_state" != "active" ]; then
continue
fi
sync_action=`cat /sys/block/$dev/md/sync_action`
if [ "$sync_action" != idle ]; then
continue
fi
ck=""
echo $REPAIR_DEVS | grep -w $dev >&/dev/null && ck="repair"
echo $CHECK_DEVS | grep -w $dev >&/dev/null && ck="check"
[ -z "$ck" ] && ck=$CHECK
dev_list="$dev_list $dev"
check[$dev]=$ck
[ "$ck" = "check" ] && check_list="$check_list $dev"
fi
done
[ -z "$dev_list" ] && exit 0
for dev in $dev_list; do
#Only run $MAXCONCURRENT checks at a time
if [ -n "$MAXCONCURRENT" ]; then
waitbusy $((MAXCONCURRENT - 1)) "$dev_list"
fi
echo "${check[$dev]}" > /sys/block/$dev/md/sync_action
resync_pid=""
wait=10
while [ $wait -gt 0 -a -z "$resync_pid" ]; do
sleep 6
let wait--
resync_pid=$(ps -ef | awk -v mddev=$dev 'BEGIN { pattern = "^\\[" mddev "_resync]$" } $8 ~ pattern { print $2 }')
done
[ -n "$resync_pid" -a -n "$renice" ] &&
renice $renice -p $resync_pid >&/dev/null
[ -n "$resync_pid" -a -n "$ionice" ] &&
ionice $ionice -p $resync_pid >&/dev/null
done
[ -z "$check_list" ] && exit 0
waitbusy 0 "$check_list"
for dev in $check_list; do
mismatch_cnt=`cat /sys/block/$dev/md/mismatch_cnt`
# Due to the fact that raid1/10 writes in the kernel are unbuffered,
# a raid1 array can have non-0 mismatch counts even when the
# array is healthy. These non-0 counts will only exist in
# transient data areas where they don't pose a problem. However,
# since we can't tell the difference between a non-0 count that
# is just in transient data or a non-0 count that signifies a
# real problem, simply don't check the mismatch_cnt on raid1
# devices as it's providing far too many false positives. But by
# leaving the raid1 device in the check list and performing the
# check, we still catch and correct any bad sectors there might
# be in the device.
raid_lvl=`cat /sys/block/$dev/md/level`
if [ "$raid_lvl" = "raid1" -o "$raid_lvl" = "raid10" ]; then
continue
fi
if [ "$mismatch_cnt" -ne 0 ]; then
echo "WARNING: mismatch_cnt is not 0 on /dev/$dev"
fi
done