Total coverage: 319899 (18%)of 1842124
3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> */ #include "pvrusb2-std.h" #include "pvrusb2-debug.h" #include <asm/string.h> #include <linux/slab.h> struct std_name { const char *name; v4l2_std_id id; }; #define CSTD_PAL \ (V4L2_STD_PAL_B| \ V4L2_STD_PAL_B1| \ V4L2_STD_PAL_G| \ V4L2_STD_PAL_H| \ V4L2_STD_PAL_I| \ V4L2_STD_PAL_D| \ V4L2_STD_PAL_D1| \ V4L2_STD_PAL_K| \ V4L2_STD_PAL_M| \ V4L2_STD_PAL_N| \ V4L2_STD_PAL_Nc| \ V4L2_STD_PAL_60) #define CSTD_NTSC \ (V4L2_STD_NTSC_M| \ V4L2_STD_NTSC_M_JP| \ V4L2_STD_NTSC_M_KR| \ V4L2_STD_NTSC_443) #define CSTD_ATSC \ (V4L2_STD_ATSC_8_VSB| \ V4L2_STD_ATSC_16_VSB) #define CSTD_SECAM \ (V4L2_STD_SECAM_B| \ V4L2_STD_SECAM_D| \ V4L2_STD_SECAM_G| \ V4L2_STD_SECAM_H| \ V4L2_STD_SECAM_K| \ V4L2_STD_SECAM_K1| \ V4L2_STD_SECAM_L| \ V4L2_STD_SECAM_LC) #define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B) #define TSTD_B1 (V4L2_STD_PAL_B1) #define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D) #define TSTD_D1 (V4L2_STD_PAL_D1) #define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G) #define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H) #define TSTD_I (V4L2_STD_PAL_I) #define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K) #define TSTD_K1 (V4L2_STD_SECAM_K1) #define TSTD_L (V4L2_STD_SECAM_L) #define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M) #define TSTD_N (V4L2_STD_PAL_N) #define TSTD_Nc (V4L2_STD_PAL_Nc) #define TSTD_60 (V4L2_STD_PAL_60) #define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_ATSC|CSTD_SECAM) /* Mapping of standard bits to color system */ static const struct std_name std_groups[] = { {"PAL",CSTD_PAL}, {"NTSC",CSTD_NTSC}, {"SECAM",CSTD_SECAM}, {"ATSC",CSTD_ATSC}, }; /* Mapping of standard bits to modulation system */ static const struct std_name std_items[] = { {"B",TSTD_B}, {"B1",TSTD_B1}, {"D",TSTD_D}, {"D1",TSTD_D1}, {"G",TSTD_G}, {"H",TSTD_H}, {"I",TSTD_I}, {"K",TSTD_K}, {"K1",TSTD_K1}, {"L",TSTD_L}, {"LC",V4L2_STD_SECAM_LC}, {"M",TSTD_M}, {"Mj",V4L2_STD_NTSC_M_JP}, {"443",V4L2_STD_NTSC_443}, {"Mk",V4L2_STD_NTSC_M_KR}, {"N",TSTD_N}, {"Nc",TSTD_Nc}, {"60",TSTD_60}, {"8VSB",V4L2_STD_ATSC_8_VSB}, {"16VSB",V4L2_STD_ATSC_16_VSB}, }; // Search an array of std_name structures and return a pointer to the // element with the matching name. static const struct std_name *find_std_name(const struct std_name *arrPtr, unsigned int arrSize, const char *bufPtr, unsigned int bufSize) { unsigned int idx; const struct std_name *p; for (idx = 0; idx < arrSize; idx++) { p = arrPtr + idx; if (strlen(p->name) != bufSize) continue; if (!memcmp(bufPtr,p->name,bufSize)) return p; } return NULL; } int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr, unsigned int bufSize) { v4l2_std_id id = 0; v4l2_std_id cmsk = 0; v4l2_std_id t; int mMode = 0; unsigned int cnt; char ch; const struct std_name *sp; while (bufSize) { if (!mMode) { cnt = 0; while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++; if (cnt >= bufSize) return 0; // No more characters sp = find_std_name(std_groups, ARRAY_SIZE(std_groups), bufPtr,cnt); if (!sp) return 0; // Illegal color system name cnt++; bufPtr += cnt; bufSize -= cnt; mMode = !0; cmsk = sp->id; continue; } cnt = 0; while (cnt < bufSize) { ch = bufPtr[cnt]; if (ch == ';') { mMode = 0; break; } if (ch == '/') break; cnt++; } sp = find_std_name(std_items, ARRAY_SIZE(std_items), bufPtr,cnt); if (!sp) return 0; // Illegal modulation system ID t = sp->id & cmsk; if (!t) return 0; // Specific color + modulation system illegal id |= t; if (cnt < bufSize) cnt++; bufPtr += cnt; bufSize -= cnt; } if (idPtr) *idPtr = id; return !0; } unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, v4l2_std_id id) { unsigned int idx1,idx2; const struct std_name *ip,*gp; int gfl,cfl; unsigned int c1,c2; cfl = 0; c1 = 0; for (idx1 = 0; idx1 < ARRAY_SIZE(std_groups); idx1++) { gp = std_groups + idx1; gfl = 0; for (idx2 = 0; idx2 < ARRAY_SIZE(std_items); idx2++) { ip = std_items + idx2; if (!(gp->id & ip->id & id)) continue; if (!gfl) { if (cfl) { c2 = scnprintf(bufPtr,bufSize,";"); c1 += c2; bufSize -= c2; bufPtr += c2; } cfl = !0; c2 = scnprintf(bufPtr,bufSize, "%s-",gp->name); gfl = !0; } else { c2 = scnprintf(bufPtr,bufSize,"/"); } c1 += c2; bufSize -= c2; bufPtr += c2; c2 = scnprintf(bufPtr,bufSize, ip->name); c1 += c2; bufSize -= c2; bufPtr += c2; } } return c1; } // Template data for possible enumerated video standards. Here we group // standards which share common frame rates and resolution. static struct v4l2_standard generic_standards[] = { { .id = (TSTD_B|TSTD_B1| TSTD_D|TSTD_D1| TSTD_G| TSTD_H| TSTD_I| TSTD_K|TSTD_K1| TSTD_L| V4L2_STD_SECAM_LC | TSTD_N|TSTD_Nc), .frameperiod = { .numerator = 1, .denominator= 25 }, .framelines = 625, .reserved = {0,0,0,0} }, { .id = (TSTD_M| V4L2_STD_NTSC_M_JP| V4L2_STD_NTSC_M_KR), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is a total wild guess .id = (TSTD_60), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is total wild guess .id = V4L2_STD_NTSC_443, .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} } }; static struct v4l2_standard *match_std(v4l2_std_id id) { unsigned int idx; for (idx = 0; idx < ARRAY_SIZE(generic_standards); idx++) { if (generic_standards[idx].id & id) { return generic_standards + idx; } } return NULL; } static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id) { struct v4l2_standard *template; int idx; unsigned int bcnt; template = match_std(id); if (!template) return 0; idx = std->index; memcpy(std,template,sizeof(*template)); std->index = idx; std->id = id; bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id); std->name[bcnt] = 0; pvr2_trace(PVR2_TRACE_STD,"Set up standard idx=%u name=%s", std->index,std->name); return !0; } /* These are special cases of combined standards that we should enumerate separately if the component pieces are present. */ static v4l2_std_id std_mixes[] = { V4L2_STD_PAL_B | V4L2_STD_PAL_G, V4L2_STD_PAL_D | V4L2_STD_PAL_K, V4L2_STD_SECAM_B | V4L2_STD_SECAM_G, V4L2_STD_SECAM_D | V4L2_STD_SECAM_K, }; struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, v4l2_std_id id) { unsigned int std_cnt = 0; unsigned int idx,bcnt,idx2; v4l2_std_id idmsk,cmsk,fmsk; struct v4l2_standard *stddefs; if (pvrusb2_debug & PVR2_TRACE_STD) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id); pvr2_trace( PVR2_TRACE_STD,"Mapping standards mask=0x%x (%.*s)", (int)id,bcnt,buf); } *countptr = 0; std_cnt = 0; fmsk = 0; for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (match_std(idmsk)) { std_cnt++; continue; } fmsk |= idmsk; } for (idx2 = 0; idx2 < ARRAY_SIZE(std_mixes); idx2++) { if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++; } /* Don't complain about ATSC standard values */ fmsk &= ~CSTD_ATSC; if (fmsk) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk); pvr2_trace( PVR2_TRACE_ERROR_LEGS, "***WARNING*** Failed to classify the following standard(s): %.*s", bcnt,buf); } pvr2_trace(PVR2_TRACE_STD,"Setting up %u unique standard(s)", std_cnt); if (!std_cnt) return NULL; // paranoia stddefs = kcalloc(std_cnt, sizeof(struct v4l2_standard), GFP_KERNEL); if (!stddefs) return NULL; for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx; idx = 0; /* Enumerate potential special cases */ for (idx2 = 0; (idx2 < ARRAY_SIZE(std_mixes)) && (idx < std_cnt); idx2++) { if (!(id & std_mixes[idx2])) continue; if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++; } /* Now enumerate individual pieces */ for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (!pvr2_std_fill(stddefs+idx,idmsk)) continue; idx++; } *countptr = std_cnt; return stddefs; } v4l2_std_id pvr2_std_get_usable(void) { return CSTD_ALL; }
18 4 3 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 // SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/module.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/icmp.h> #include <net/ipv6.h> #include <net/tcp.h> #include <net/udp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_tcpudp.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> MODULE_DESCRIPTION("Xtables: TCP, UDP and UDP-Lite match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xt_tcp"); MODULE_ALIAS("xt_udp"); MODULE_ALIAS("ipt_udp"); MODULE_ALIAS("ipt_tcp"); MODULE_ALIAS("ip6t_udp"); MODULE_ALIAS("ip6t_tcp"); MODULE_ALIAS("ipt_icmp"); MODULE_ALIAS("ip6t_icmp6"); /* Returns 1 if the port is matched by the range, 0 otherwise */ static inline bool port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert) { return (port >= min && port <= max) ^ invert; } static bool tcp_find_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff, unsigned int optlen, bool invert, bool *hotdrop) { /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ const u_int8_t *op; u_int8_t _opt[60 - sizeof(struct tcphdr)]; unsigned int i; pr_debug("finding option\n"); if (!optlen) return invert; /* If we don't have the whole header, drop packet. */ op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr), optlen, _opt); if (op == NULL) { *hotdrop = true; return false; } for (i = 0; i < optlen; ) { if (op[i] == option) return !invert; if (op[i] < 2) i++; else i += op[i+1]?:1; } return invert; } static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct tcphdr *th; struct tcphdr _tcph; const struct xt_tcp *tcpinfo = par->matchinfo; if (par->fragoff != 0) { /* To quote Alan: Don't allow a fragment of TCP 8 bytes in. Nobody normal causes this. Its a cracker trying to break in by doing a flag overwrite to pass the direction checks. */ if (par->fragoff == 1) { pr_debug("Dropping evil TCP offset=1 frag.\n"); par->hotdrop = true; } /* Must not be a fragment. */ return false; } th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph); if (th == NULL) { /* We've been asked to examine this packet, and we can't. Hence, no choice but to drop. */ pr_debug("Dropping evil TCP offset=0 tinygram.\n"); par->hotdrop = true; return false; } if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1], ntohs(th->source), !!(tcpinfo->invflags & XT_TCP_INV_SRCPT))) return false; if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1], ntohs(th->dest), !!(tcpinfo->invflags & XT_TCP_INV_DSTPT))) return false; if (!NF_INVF(tcpinfo, XT_TCP_INV_FLAGS, (((unsigned char *)th)[13] & tcpinfo->flg_mask) == tcpinfo->flg_cmp)) return false; if (tcpinfo->option) { if (th->doff * 4 < sizeof(_tcph)) { par->hotdrop = true; return false; } if (!tcp_find_option(tcpinfo->option, skb, par->thoff, th->doff*4 - sizeof(_tcph), tcpinfo->invflags & XT_TCP_INV_OPTION, &par->hotdrop)) return false; } return true; } static int tcp_mt_check(const struct xt_mtchk_param *par) { const struct xt_tcp *tcpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0; } static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct udphdr *uh; struct udphdr _udph; const struct xt_udp *udpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; uh = skb_header_pointer(skb, par->thoff, sizeof(_udph), &_udph); if (uh == NULL) { /* We've been asked to examine this packet, and we can't. Hence, no choice but to drop. */ pr_debug("Dropping evil UDP tinygram.\n"); par->hotdrop = true; return false; } return port_match(udpinfo->spts[0], udpinfo->spts[1], ntohs(uh->source), !!(udpinfo->invflags & XT_UDP_INV_SRCPT)) && port_match(udpinfo->dpts[0], udpinfo->dpts[1], ntohs(uh->dest), !!(udpinfo->invflags & XT_UDP_INV_DSTPT)); } static int udp_mt_check(const struct xt_mtchk_param *par) { const struct xt_udp *udpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0; } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static bool type_code_in_range(u8 test_type, u8 min_code, u8 max_code, u8 type, u8 code) { return type == test_type && code >= min_code && code <= max_code; } static bool icmp_type_code_match(u8 test_type, u8 min_code, u8 max_code, u8 type, u8 code, bool invert) { return (test_type == 0xFF || type_code_in_range(test_type, min_code, max_code, type, code)) ^ invert; } static bool icmp6_type_code_match(u8 test_type, u8 min_code, u8 max_code, u8 type, u8 code, bool invert) { return type_code_in_range(test_type, min_code, max_code, type, code) ^ invert; } static bool icmp_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmphdr *ic; struct icmphdr _icmph; const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (!ic) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ par->hotdrop = true; return false; } return icmp_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->type, ic->code, !!(icmpinfo->invflags & IPT_ICMP_INV)); } static bool icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmp6hdr *ic; struct icmp6hdr _icmph; const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (!ic) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ par->hotdrop = true; return false; } return icmp6_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->icmp6_type, ic->icmp6_code, !!(icmpinfo->invflags & IP6T_ICMP_INV)); } static int icmp_checkentry(const struct xt_mtchk_param *par) { const struct ipt_icmp *icmpinfo = par->matchinfo; return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; } static int icmp6_checkentry(const struct xt_mtchk_param *par) { const struct ip6t_icmp *icmpinfo = par->matchinfo; return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; } static struct xt_match tcpudp_mt_reg[] __read_mostly = { { .name = "tcp", .family = NFPROTO_IPV4, .checkentry = tcp_mt_check, .match = tcp_mt, .matchsize = sizeof(struct xt_tcp), .proto = IPPROTO_TCP, .me = THIS_MODULE, }, { .name = "tcp", .family = NFPROTO_IPV6, .checkentry = tcp_mt_check, .match = tcp_mt, .matchsize = sizeof(struct xt_tcp), .proto = IPPROTO_TCP, .me = THIS_MODULE, }, { .name = "udp", .family = NFPROTO_IPV4, .checkentry = udp_mt_check, .match = udp_mt, .matchsize = sizeof(struct xt_udp), .proto = IPPROTO_UDP, .me = THIS_MODULE, }, { .name = "udp", .family = NFPROTO_IPV6, .checkentry = udp_mt_check, .match = udp_mt, .matchsize = sizeof(struct xt_udp), .proto = IPPROTO_UDP, .me = THIS_MODULE, }, { .name = "udplite", .family = NFPROTO_IPV4, .checkentry = udp_mt_check, .match = udp_mt, .matchsize = sizeof(struct xt_udp), .proto = IPPROTO_UDPLITE, .me = THIS_MODULE, }, { .name = "udplite", .family = NFPROTO_IPV6, .checkentry = udp_mt_check, .match = udp_mt, .matchsize = sizeof(struct xt_udp), .proto = IPPROTO_UDPLITE, .me = THIS_MODULE, }, { .name = "icmp", .match = icmp_match, .matchsize = sizeof(struct ipt_icmp), .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, .me = THIS_MODULE, }, { .name = "icmp6", .match = icmp6_match, .matchsize = sizeof(struct ip6t_icmp), .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, .me = THIS_MODULE, }, }; static int __init tcpudp_mt_init(void) { return xt_register_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg)); } static void __exit tcpudp_mt_exit(void) { xt_unregister_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg)); } module_init(tcpudp_mt_init); module_exit(tcpudp_mt_exit);
132 87 55 157 2 155 108 109 109 44 1 43 172 1 150 46 49 49 44 1 3 40 22 1 1 21 15 1 1 4 9 30 28 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfsplus/attributes.c * * Vyacheslav Dubeyko <slava@dubeyko.com> * * Handling of records in attributes tree */ #include "hfsplus_fs.h" #include "hfsplus_raw.h" static struct kmem_cache *hfsplus_attr_tree_cachep; int __init hfsplus_create_attr_tree_cache(void) { if (hfsplus_attr_tree_cachep) return -EEXIST; hfsplus_attr_tree_cachep = kmem_cache_create("hfsplus_attr_cache", sizeof(hfsplus_attr_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (!hfsplus_attr_tree_cachep) return -ENOMEM; return 0; } void hfsplus_destroy_attr_tree_cache(void) { kmem_cache_destroy(hfsplus_attr_tree_cachep); } int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1, const hfsplus_btree_key *k2) { __be32 k1_cnid, k2_cnid; k1_cnid = k1->attr.cnid; k2_cnid = k2->attr.cnid; if (k1_cnid != k2_cnid) return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1; return hfsplus_strcmp( (const struct hfsplus_unistr *)&k1->attr.key_name, (const struct hfsplus_unistr *)&k2->attr.key_name); } int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key, u32 cnid, const char *name) { int len; memset(key, 0, sizeof(struct hfsplus_attr_key)); key->attr.cnid = cpu_to_be32(cnid); if (name) { int res = hfsplus_asc2uni(sb, (struct hfsplus_unistr *)&key->attr.key_name, HFSPLUS_ATTR_MAX_STRLEN, name, strlen(name)); if (res) return res; len = be16_to_cpu(key->attr.key_name.length); } else { key->attr.key_name.length = 0; len = 0; } /* The length of the key, as stored in key_len field, does not include * the size of the key_len field itself. * So, offsetof(hfsplus_attr_key, key_name) is a trick because * it takes into consideration key_len field (__be16) of * hfsplus_attr_key structure instead of length field (__be16) of * hfsplus_attr_unistr structure. */ key->key_len = cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) + 2 * len); return 0; } hfsplus_attr_entry *hfsplus_alloc_attr_entry(void) { return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL); } void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry) { if (entry) kmem_cache_free(hfsplus_attr_tree_cachep, entry); } #define HFSPLUS_INVALID_ATTR_RECORD -1 static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type, u32 cnid, const void *value, size_t size) { if (record_type == HFSPLUS_ATTR_FORK_DATA) { /* * Mac OS X supports only inline data attributes. * Do nothing */ memset(entry, 0, sizeof(*entry)); return sizeof(struct hfsplus_attr_fork_data); } else if (record_type == HFSPLUS_ATTR_EXTENTS) { /* * Mac OS X supports only inline data attributes. * Do nothing. */ memset(entry, 0, sizeof(*entry)); return sizeof(struct hfsplus_attr_extents); } else if (record_type == HFSPLUS_ATTR_INLINE_DATA) { u16 len; memset(entry, 0, sizeof(struct hfsplus_attr_inline_data)); entry->inline_data.record_type = cpu_to_be32(record_type); if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE) len = size; else return HFSPLUS_INVALID_ATTR_RECORD; entry->inline_data.length = cpu_to_be16(len); memcpy(entry->inline_data.raw_bytes, value, len); /* * Align len on two-byte boundary. * It needs to add pad byte if we have odd len. */ len = round_up(len, 2); return offsetof(struct hfsplus_attr_inline_data, raw_bytes) + len; } else /* invalid input */ memset(entry, 0, sizeof(*entry)); return HFSPLUS_INVALID_ATTR_RECORD; } int hfsplus_find_attr(struct super_block *sb, u32 cnid, const char *name, struct hfs_find_data *fd) { int err = 0; hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } if (name) { err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name); if (err) goto failed_find_attr; err = hfs_brec_find(fd, hfs_find_rec_by_key); if (err) goto failed_find_attr; } else { err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL); if (err) goto failed_find_attr; err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid); if (err) goto failed_find_attr; } failed_find_attr: return err; } int hfsplus_attr_exists(struct inode *inode, const char *name) { int err = 0; struct super_block *sb = inode->i_sb; struct hfs_find_data fd; if (!HFSPLUS_SB(sb)->attr_tree) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) return 0; err = hfsplus_find_attr(sb, inode->i_ino, name, &fd); if (err) goto attr_not_found; hfs_find_exit(&fd); return 1; attr_not_found: hfs_find_exit(&fd); return 0; } int hfsplus_create_attr(struct inode *inode, const char *name, const void *value, size_t size) { struct super_block *sb = inode->i_sb; struct hfs_find_data fd; hfsplus_attr_entry *entry_ptr; int entry_size; int err; hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n", name ? name : NULL, inode->i_ino); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } entry_ptr = hfsplus_alloc_attr_entry(); if (!entry_ptr) return -ENOMEM; err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) goto failed_init_create_attr; /* Fail early and avoid ENOSPC during the btree operation */ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1); if (err) goto failed_create_attr; if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); if (err) goto failed_create_attr; } else { err = -EINVAL; goto failed_create_attr; } /* Mac OS X supports only inline data attributes. */ entry_size = hfsplus_attr_build_record(entry_ptr, HFSPLUS_ATTR_INLINE_DATA, inode->i_ino, value, size); if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) { err = -EINVAL; goto failed_create_attr; } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err != -ENOENT) { if (!err) err = -EEXIST; goto failed_create_attr; } err = hfs_brec_insert(&fd, entry_ptr, entry_size); if (err) goto failed_create_attr; hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY); failed_create_attr: hfs_find_exit(&fd); failed_init_create_attr: hfsplus_destroy_attr_entry(entry_ptr); return err; } static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, struct hfs_find_data *fd) { int err = 0; __be32 found_cnid, record_type; hfs_bnode_read(fd->bnode, &found_cnid, fd->keyoffset + offsetof(struct hfsplus_attr_key, cnid), sizeof(__be32)); if (cnid != be32_to_cpu(found_cnid)) return -ENOENT; hfs_bnode_read(fd->bnode, &record_type, fd->entryoffset, sizeof(record_type)); switch (be32_to_cpu(record_type)) { case HFSPLUS_ATTR_INLINE_DATA: /* All is OK. Do nothing. */ break; case HFSPLUS_ATTR_FORK_DATA: case HFSPLUS_ATTR_EXTENTS: pr_err("only inline data xattr are supported\n"); return -EOPNOTSUPP; default: pr_err("invalid extended attribute record\n"); return -ENOENT; } /* Avoid btree corruption */ hfs_bnode_read(fd->bnode, fd->search_key, fd->keyoffset, fd->keylength); err = hfs_brec_remove(fd); if (err) return err; hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY); return err; } int hfsplus_delete_attr(struct inode *inode, const char *name) { int err = 0; struct super_block *sb = inode->i_sb; struct hfs_find_data fd; hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n", name ? name : NULL, inode->i_ino); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) return err; /* Fail early and avoid ENOSPC during the btree operation */ err = hfs_bmap_reserve(fd.tree, fd.tree->depth); if (err) goto out; if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); if (err) goto out; } else { pr_err("invalid extended attribute name\n"); err = -EINVAL; goto out; } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err) goto out; err = __hfsplus_delete_attr(inode, inode->i_ino, &fd); if (err) goto out; out: hfs_find_exit(&fd); return err; } int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid) { int err = 0; struct hfs_find_data fd; hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid); if (!HFSPLUS_SB(dir->i_sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd); if (err) return err; for (;;) { err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd); if (err) { if (err != -ENOENT) pr_err("xattr search failed\n"); goto end_delete_all; } err = __hfsplus_delete_attr(dir, cnid, &fd); if (err) goto end_delete_all; } end_delete_all: hfs_find_exit(&fd); return err; }
2 27 2 16 4 19 19 25 23 2 27 27 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved. */ #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> #include <uapi/linux/mdio.h> #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> #include <linux/atomic.h> #include <linux/acpi.h> #include <linux/firmware.h> #include <crypto/hash.h> #include <linux/usb/r8152.h> #include <net/gso.h> /* Information for net-next */ #define NETNEXT_VERSION "12" /* Information for net */ #define NET_VERSION "13" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" #define R8152_PHY_ID 32 #define PLA_IDR 0xc000 #define PLA_RCR 0xc010 #define PLA_RCR1 0xc012 #define PLA_RMS 0xc016 #define PLA_RXFIFO_CTRL0 0xc0a0 #define PLA_RXFIFO_FULL 0xc0a2 #define PLA_RXFIFO_CTRL1 0xc0a4 #define PLA_RX_FIFO_FULL 0xc0a6 #define PLA_RXFIFO_CTRL2 0xc0a8 #define PLA_RX_FIFO_EMPTY 0xc0aa #define PLA_DMY_REG0 0xc0b0 #define PLA_FMC 0xc0b4 #define PLA_CFG_WOL 0xc0b6 #define PLA_TEREDO_CFG 0xc0bc #define PLA_TEREDO_WAKE_BASE 0xc0c4 #define PLA_MAR 0xcd00 #define PLA_BACKUP 0xd000 #define PLA_BDC_CR 0xd1a0 #define PLA_TEREDO_TIMER 0xd2cc #define PLA_REALWOW_TIMER 0xd2e8 #define PLA_UPHY_TIMER 0xd388 #define PLA_SUSPEND_FLAG 0xd38a #define PLA_INDICATE_FALG 0xd38c #define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */ #define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */ #define PLA_EXTRA_STATUS 0xd398 #define PLA_GPHY_CTRL 0xd3ae #define PLA_POL_GPIO_CTRL 0xdc6a #define PLA_EFUSE_DATA 0xdd00 #define PLA_EFUSE_CMD 0xdd02 #define PLA_LEDSEL 0xdd90 #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 #define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEE_TXTWSYS 0xe04c #define PLA_EEE_TXTWSYS_2P5G 0xe058 #define PLA_EEEP_CR 0xe080 #define PLA_MAC_PWR_CTRL 0xe0c0 #define PLA_MAC_PWR_CTRL2 0xe0ca #define PLA_MAC_PWR_CTRL3 0xe0cc #define PLA_MAC_PWR_CTRL4 0xe0ce #define PLA_WDT6_CTRL 0xe428 #define PLA_TCR0 0xe610 #define PLA_TCR1 0xe612 #define PLA_MTPS 0xe615 #define PLA_TXFIFO_CTRL 0xe618 #define PLA_TXFIFO_FULL 0xe61a #define PLA_RSTTALLY 0xe800 #define PLA_CR 0xe813 #define PLA_CRWECR 0xe81c #define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */ #define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */ #define PLA_CONFIG5 0xe822 #define PLA_PHY_PWR 0xe84c #define PLA_OOB_CTRL 0xe84f #define PLA_CPCR 0xe854 #define PLA_MISC_0 0xe858 #define PLA_MISC_1 0xe85a #define PLA_OCP_GPHY_BASE 0xe86c #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 #define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_USB_CFG 0xe952 #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a #define PLA_BP_2 0xfc2c #define PLA_BP_3 0xfc2e #define PLA_BP_4 0xfc30 #define PLA_BP_5 0xfc32 #define PLA_BP_6 0xfc34 #define PLA_BP_7 0xfc36 #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e #define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_L1_CTRL 0xb45e #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 #define USB_CSR_DUMMY2 0xb466 #define USB_DEV_STAT 0xb808 #define USB_CONNECT_TIMER 0xcbf8 #define USB_MSC_TIMER 0xcbfc #define USB_BURST_SIZE 0xcfc0 #define USB_FW_FIX_EN0 0xcfca #define USB_FW_FIX_EN1 0xcfcc #define USB_LPM_CONFIG 0xcfd8 #define USB_ECM_OPTION 0xcfee #define USB_CSTMR 0xcfef /* RTL8153A */ #define USB_MISC_2 0xcfff #define USB_ECM_OP 0xd26b #define USB_GPHY_CTRL 0xd284 #define USB_SPEED_OPTION 0xd32a #define USB_FW_CTRL 0xd334 /* RTL8153B */ #define USB_FC_TIMER 0xd340 #define USB_USB_CTRL 0xd406 #define USB_PHY_CTRL 0xd408 #define USB_TX_AGG 0xd40a #define USB_RX_BUF_TH 0xd40c #define USB_USB_TIMER 0xd428 #define USB_RX_EARLY_TIMEOUT 0xd42c #define USB_RX_EARLY_SIZE 0xd42e #define USB_PM_CTRL_STATUS 0xd432 /* RTL8153A */ #define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */ #define USB_TX_DMA 0xd434 #define USB_UPT_RXDMA_OWN 0xd437 #define USB_UPHY3_MDCMDIO 0xd480 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a #define USB_BMU_RESET 0xd4b0 #define USB_BMU_CONFIG 0xd4b4 #define USB_U1U2_TIMER 0xd4da #define USB_FW_TASK 0xd4e8 /* RTL8153B */ #define USB_RX_AGGR_NUM 0xd4ee #define USB_UPS_CTRL 0xd800 #define USB_POWER_CUT 0xd80a #define USB_MISC_0 0xd81a #define USB_MISC_1 0xd81f #define USB_AFE_CTRL2 0xd824 #define USB_UPHY_XTAL 0xd826 #define USB_UPS_CFG 0xd842 #define USB_UPS_FLAGS 0xd848 #define USB_WDT1_CTRL 0xe404 #define USB_WDT11_CTRL 0xe43c #define USB_BP_BA PLA_BP_BA #define USB_BP_0 PLA_BP_0 #define USB_BP_1 PLA_BP_1 #define USB_BP_2 PLA_BP_2 #define USB_BP_3 PLA_BP_3 #define USB_BP_4 PLA_BP_4 #define USB_BP_5 PLA_BP_5 #define USB_BP_6 PLA_BP_6 #define USB_BP_7 PLA_BP_7 #define USB_BP_EN PLA_BP_EN /* RTL8153A */ #define USB_BP_8 0xfc38 /* RTL8153B */ #define USB_BP_9 0xfc3a #define USB_BP_10 0xfc3c #define USB_BP_11 0xfc3e #define USB_BP_12 0xfc40 #define USB_BP_13 0xfc42 #define USB_BP_14 0xfc44 #define USB_BP_15 0xfc46 #define USB_BP2_EN 0xfc48 /* OCP Registers */ #define OCP_ALDPS_CONFIG 0x2010 #define OCP_EEE_CONFIG1 0x2080 #define OCP_EEE_CONFIG2 0x2092 #define OCP_EEE_CONFIG3 0x2094 #define OCP_BASE_MII 0xa400 #define OCP_EEE_AR 0xa41a #define OCP_EEE_DATA 0xa41c #define OCP_PHY_STATUS 0xa420 #define OCP_INTR_EN 0xa424 #define OCP_NCTL_CFG 0xa42c #define OCP_POWER_CFG 0xa430 #define OCP_EEE_CFG 0xa432 #define OCP_SRAM_ADDR 0xa436 #define OCP_SRAM_DATA 0xa438 #define OCP_DOWN_SPEED 0xa442 #define OCP_EEE_ABLE 0xa5c4 #define OCP_EEE_ADV 0xa5d0 #define OCP_EEE_LPABLE 0xa5d2 #define OCP_10GBT_CTRL 0xa5d4 #define OCP_10GBT_STAT 0xa5d6 #define OCP_EEE_ADV2 0xa6d4 #define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ #define OCP_PHY_PATCH_STAT 0xb800 #define OCP_PHY_PATCH_CMD 0xb820 #define OCP_PHY_LOCK 0xb82e #define OCP_ADC_IOFFSET 0xbcfc #define OCP_ADC_CFG 0xbc06 #define OCP_SYSCLK_CFG 0xc416 /* SRAM Register */ #define SRAM_GREEN_CFG 0x8011 #define SRAM_LPF_CFG 0x8012 #define SRAM_GPHY_FW_VER 0x801e #define SRAM_10M_AMP1 0x8080 #define SRAM_10M_AMP2 0x8082 #define SRAM_IMPEDANCE 0x8084 #define SRAM_PHY_LOCK 0xb82e /* PLA_RCR */ #define RCR_AAP 0x00000001 #define RCR_APM 0x00000002 #define RCR_AM 0x00000004 #define RCR_AB 0x00000008 #define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) #define SLOT_EN BIT(11) /* PLA_RCR1 */ #define OUTER_VLAN BIT(7) #define INNER_VLAN BIT(6) /* PLA_RXFIFO_CTRL0 */ #define RXFIFO_THR1_NORMAL 0x00080002 #define RXFIFO_THR1_OOB 0x01800003 /* PLA_RXFIFO_FULL */ #define RXFIFO_FULL_MASK 0xfff /* PLA_RXFIFO_CTRL1 */ #define RXFIFO_THR2_FULL 0x00000060 #define RXFIFO_THR2_HIGH 0x00000038 #define RXFIFO_THR2_OOB 0x0000004a #define RXFIFO_THR2_NORMAL 0x00a0 /* PLA_RXFIFO_CTRL2 */ #define RXFIFO_THR3_FULL 0x00000078 #define RXFIFO_THR3_HIGH 0x00000048 #define RXFIFO_THR3_OOB 0x0000005a #define RXFIFO_THR3_NORMAL 0x0110 /* PLA_TXFIFO_CTRL */ #define TXFIFO_THR_NORMAL 0x00400008 #define TXFIFO_THR_NORMAL2 0x01000008 /* PLA_DMY_REG0 */ #define ECM_ALDPS 0x0002 /* PLA_FMC */ #define FMC_FCR_MCU_EN 0x0001 /* PLA_EEEP_CR */ #define EEEP_CR_EEEP_TX 0x0002 /* PLA_WDT6_CTRL */ #define WDT6_SET_MODE 0x0010 /* PLA_TCR0 */ #define TCR0_TX_EMPTY 0x0800 #define TCR0_AUTO_FIFO 0x0080 /* PLA_TCR1 */ #define VERSION_MASK 0x7cf0 #define IFG_MASK (BIT(3) | BIT(9) | BIT(8)) #define IFG_144NS BIT(9) #define IFG_96NS (BIT(9) | BIT(8)) /* PLA_MTPS */ #define MTPS_JUMBO (12 * 1024 / 64) #define MTPS_DEFAULT (6 * 1024 / 64) /* PLA_RSTTALLY */ #define TALLY_RESET 0x0001 /* PLA_CR */ #define CR_RST 0x10 #define CR_RE 0x08 #define CR_TE 0x04 /* PLA_CRWECR */ #define CRWECR_NORAML 0x00 #define CRWECR_CONFIG 0xc0 /* PLA_OOB_CTRL */ #define NOW_IS_OOB 0x80 #define TXFIFO_EMPTY 0x20 #define RXFIFO_EMPTY 0x10 #define LINK_LIST_READY 0x02 #define DIS_MCU_CLROOB 0x01 #define FIFO_EMPTY (TXFIFO_EMPTY | RXFIFO_EMPTY) /* PLA_MISC_1 */ #define RXDY_GATED_EN 0x0008 /* PLA_SFF_STS_7 */ #define RE_INIT_LL 0x8000 #define MCU_BORW_EN 0x4000 /* PLA_CPCR */ #define FLOW_CTRL_EN BIT(0) #define CPCR_RX_VLAN 0x0040 /* PLA_CFG_WOL */ #define MAGIC_EN 0x0001 /* PLA_TEREDO_CFG */ #define TEREDO_SEL 0x8000 #define TEREDO_WAKE_MASK 0x7f00 #define TEREDO_RS_EVENT_MASK 0x00fe #define OOB_TEREDO_EN 0x0001 /* PLA_BDC_CR */ #define ALDPS_PROXY_MODE 0x0001 /* PLA_EFUSE_CMD */ #define EFUSE_READ_CMD BIT(15) #define EFUSE_DATA_BIT16 BIT(7) /* PLA_CONFIG34 */ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 /* PLA_CONFIG6 */ #define LANWAKE_CLR_EN BIT(0) /* PLA_USB_CFG */ #define EN_XG_LIP BIT(1) #define EN_G_LIP BIT(2) /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 #define UWF_EN 0x0010 #define LAN_WAKE_EN 0x0002 /* PLA_LED_FEATURE */ #define LED_MODE_MASK 0x0700 /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 #define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 #define MCU_CLK_RATIO 0x07010f07 #define MCU_CLK_RATIO_MASK 0x0f0f0f0f #define ALDPS_SPDWN_RATIO 0x0f87 /* PLA_MAC_PWR_CTRL2 */ #define EEE_SPDWN_RATIO 0x8007 #define MAC_CLK_SPDWN_EN BIT(15) #define EEE_SPDWN_RATIO_MASK 0xff /* PLA_MAC_PWR_CTRL3 */ #define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 #define L1_SPDWN_EN 0x0001 /* PLA_MAC_PWR_CTRL4 */ #define PWRSAVE_SPDWN_EN 0x1000 #define RXDV_SPDWN_EN 0x0800 #define TX10MIDLE_EN 0x0100 #define IDLE_SPDWN_EN BIT(6) #define TP100_SPDWN_EN 0x0020 #define TP500_SPDWN_EN 0x0010 #define TP1000_SPDWN_EN 0x0008 #define EEE_SPDWN_EN 0x0001 /* PLA_GPHY_INTR_IMR */ #define GPHY_STS_MSK 0x0001 #define SPEED_DOWN_MSK 0x0002 #define SPDWN_RXDV_MSK 0x0004 #define SPDWN_LINKCHG_MSK 0x0008 /* PLA_PHYAR */ #define PHYAR_FLAG 0x80000000 /* PLA_EEE_CR */ #define EEE_RX_EN 0x0001 #define EEE_TX_EN 0x0002 /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 /* PLA_LWAKE_CTRL_REG */ #define LANWAKE_PIN BIT(7) /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) /* PLA_INDICATE_FALG */ #define UPCOMING_RUNTIME_D3 BIT(0) /* PLA_MACDBG_PRE and PLA_MACDBG_POST */ #define DEBUG_OE BIT(0) #define DEBUG_LTSSM 0x0082 /* PLA_EXTRA_STATUS */ #define CUR_LINK_OK BIT(15) #define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */ #define LINK_CHANGE_FLAG BIT(8) #define POLL_LINK_CHG BIT(0) /* PLA_GPHY_CTRL */ #define GPHY_FLASH BIT(1) /* PLA_POL_GPIO_CTRL */ #define DACK_DET_EN BIT(15) #define POL_GPHY_PATCH BIT(4) /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 /* USB_SSPHYLINK1 */ #define DELAY_PHY_PWR_CHG BIT(1) /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) /* USB_CSR_DUMMY1 */ #define DYNAMIC_BURST 0x0001 /* USB_CSR_DUMMY2 */ #define EP4_FULL_FC 0x0001 /* USB_DEV_STAT */ #define STAT_SPEED_MASK 0x0006 #define STAT_SPEED_HIGH 0x0000 #define STAT_SPEED_FULL 0x0002 /* USB_FW_FIX_EN0 */ #define FW_FIX_SUSPEND BIT(14) /* USB_FW_FIX_EN1 */ #define FW_IP_RESET_EN BIT(9) /* USB_LPM_CONFIG */ #define LPM_U1U2_EN BIT(0) /* USB_TX_AGG */ #define TX_AGG_MAX_THRESHOLD 0x03 /* USB_RX_BUF_TH */ #define RX_THR_SUPPER 0x0c350180 #define RX_THR_HIGH 0x7a120180 #define RX_THR_SLOW 0xffff0180 #define RX_THR_B 0x00010001 /* USB_TX_DMA */ #define TEST_MODE_DISABLE 0x00000001 #define TX_SIZE_ADJUST1 0x00000100 /* USB_BMU_RESET */ #define BMU_RESET_EP_IN 0x01 #define BMU_RESET_EP_OUT 0x02 /* USB_BMU_CONFIG */ #define ACT_ODMA BIT(1) /* USB_UPT_RXDMA_OWN */ #define OWN_UPDATE BIT(0) #define OWN_CLEAR BIT(1) /* USB_FW_TASK */ #define FC_PATCH_TASK BIT(1) /* USB_RX_AGGR_NUM */ #define RX_AGGR_NUM_MASK 0x1ff /* USB_UPS_CTRL */ #define POWER_CUT 0x0100 /* USB_PM_CTRL_STATUS */ #define RESUME_INDICATE 0x0001 /* USB_ECM_OPTION */ #define BYPASS_MAC_RESET BIT(5) /* USB_CSTMR */ #define FORCE_SUPER BIT(0) /* USB_MISC_2 */ #define UPS_FORCE_PWR_DOWN BIT(0) /* USB_ECM_OP */ #define EN_ALL_SPEED BIT(0) /* USB_GPHY_CTRL */ #define GPHY_PATCH_DONE BIT(2) #define BYPASS_FLASH BIT(5) #define BACKUP_RESTRORE BIT(6) /* USB_SPEED_OPTION */ #define RG_PWRDN_EN BIT(8) #define ALL_SPEED_OFF BIT(9) /* USB_FW_CTRL */ #define FLOW_CTRL_PATCH_OPT BIT(1) #define AUTO_SPEEDUP BIT(3) #define FLOW_CTRL_PATCH_2 BIT(8) /* USB_FC_TIMER */ #define CTRL_TIMER_EN BIT(15) /* USB_USB_CTRL */ #define CDC_ECM_EN BIT(3) #define RX_AGG_DISABLE 0x0010 #define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 #define RX_DETECT8 BIT(3) /* USB_POWER_CUT */ #define PWR_EN 0x0001 #define PHASE2_EN 0x0008 #define UPS_EN BIT(4) #define USP_PREWAKE BIT(5) /* USB_MISC_0 */ #define PCUT_STATUS 0x0001 /* USB_RX_EARLY_TIMEOUT */ #define COALESCE_SUPER 85000U #define COALESCE_HIGH 250000U #define COALESCE_SLOW 524280U /* USB_WDT1_CTRL */ #define WTD1_EN BIT(0) /* USB_WDT11_CTRL */ #define TIMER11_EN 0x0001 /* USB_LPM_CTRL */ /* bit 4 ~ 5: fifo empty boundary */ #define FIFO_EMPTY_1FB 0x30 /* 0x1fb * 64 = 32448 bytes */ /* bit 2 ~ 3: LMP timer */ #define LPM_TIMER_MASK 0x0c #define LPM_TIMER_500MS 0x04 /* 500 ms */ #define LPM_TIMER_500US 0x0c /* 500 us */ #define ROK_EXIT_LPM 0x02 /* USB_AFE_CTRL2 */ #define SEN_VAL_MASK 0xf800 #define SEN_VAL_NORMAL 0xa000 #define SEL_RXIDLE 0x0100 /* USB_UPHY_XTAL */ #define OOBS_POLLING BIT(8) /* USB_UPS_CFG */ #define SAW_CNT_1MS_MASK 0x0fff #define MID_REVERSE BIT(5) /* RTL8156A */ /* USB_UPS_FLAGS */ #define UPS_FLAGS_R_TUNE BIT(0) #define UPS_FLAGS_EN_10M_CKDIV BIT(1) #define UPS_FLAGS_250M_CKDIV BIT(2) #define UPS_FLAGS_EN_ALDPS BIT(3) #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) #define UPS_FLAGS_SPEED_MASK (0xf << 16) #define ups_flags_speed(x) ((x) << 16) #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) #define UPS_FLAGS_EN_EEE_CKDIV BIT(22) #define UPS_FLAGS_EEE_PLLOFF_100 BIT(23) #define UPS_FLAGS_EEE_PLLOFF_GIGA BIT(24) #define UPS_FLAGS_EEE_CMOD_LV_EN BIT(25) #define UPS_FLAGS_EN_GREEN BIT(26) #define UPS_FLAGS_EN_FLOW_CTR BIT(27) enum spd_duplex { NWAY_10M_HALF, NWAY_10M_FULL, NWAY_100M_HALF, NWAY_100M_FULL, NWAY_1000M_FULL, FORCE_10M_HALF, FORCE_10M_FULL, FORCE_100M_HALF, FORCE_100M_FULL, FORCE_1000M_FULL, NWAY_2500M_FULL, }; /* OCP_ALDPS_CONFIG */ #define ENPWRSAVE 0x8000 #define ENPDNPS 0x0200 #define LINKENA 0x0100 #define DIS_SDSAVE 0x0010 /* OCP_PHY_STATUS */ #define PHY_STAT_MASK 0x0007 #define PHY_STAT_EXT_INIT 2 #define PHY_STAT_LAN_ON 3 #define PHY_STAT_PWRDN 5 /* OCP_INTR_EN */ #define INTR_SPEED_FORCE BIT(3) /* OCP_NCTL_CFG */ #define PGA_RETURN_EN BIT(1) /* OCP_POWER_CFG */ #define EEE_CLKDIV_EN 0x8000 #define EN_ALDPS 0x0004 #define EN_10M_PLLOFF 0x0001 /* OCP_EEE_CONFIG1 */ #define RG_TXLPI_MSK_HFDUP 0x8000 #define RG_MATCLR_EN 0x4000 #define EEE_10_CAP 0x2000 #define EEE_NWAY_EN 0x1000 #define TX_QUIET_EN 0x0200 #define RX_QUIET_EN 0x0100 #define sd_rise_time_mask 0x0070 #define sd_rise_time(x) (min(x, 7) << 4) /* bit 4 ~ 6 */ #define RG_RXLPI_MSK_HFDUP 0x0008 #define SDFALLTIME 0x0007 /* bit 0 ~ 2 */ /* OCP_EEE_CONFIG2 */ #define RG_LPIHYS_NUM 0x7000 /* bit 12 ~ 15 */ #define RG_DACQUIET_EN 0x0400 #define RG_LDVQUIET_EN 0x0200 #define RG_CKRSEL 0x0020 #define RG_EEEPRG_EN 0x0010 /* OCP_EEE_CONFIG3 */ #define fast_snr_mask 0xff80 #define fast_snr(x) (min(x, 0x1ff) << 7) /* bit 7 ~ 15 */ #define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */ #define MSK_PH 0x0006 /* bit 0 ~ 3 */ /* OCP_EEE_AR */ /* bit[15:14] function */ #define FUN_ADDR 0x0000 #define FUN_DATA 0x4000 /* bit[4:0] device addr */ /* OCP_EEE_CFG */ #define CTAP_SHORT_EN 0x0040 #define EEE10_EN 0x0010 /* OCP_DOWN_SPEED */ #define EN_EEE_CMODE BIT(14) #define EN_EEE_1000 BIT(13) #define EN_EEE_100 BIT(12) #define EN_10M_CLKDIV BIT(11) #define EN_10M_BGOFF 0x0080 /* OCP_10GBT_CTRL */ #define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */ /* OCP_PHY_STATE */ #define TXDIS_STATE 0x01 #define ABD_STATE 0x02 /* OCP_PHY_PATCH_STAT */ #define PATCH_READY BIT(6) /* OCP_PHY_PATCH_CMD */ #define PATCH_REQUEST BIT(4) /* OCP_PHY_LOCK */ #define PATCH_LOCK BIT(0) /* OCP_ADC_CFG */ #define CKADSEL_L 0x0100 #define ADC_EN 0x0080 #define EN_EMI_L 0x0040 /* OCP_SYSCLK_CFG */ #define sysclk_div_expo(x) (min(x, 5) << 8) #define clk_div_expo(x) (min(x, 5) << 4) /* SRAM_GREEN_CFG */ #define GREEN_ETH_EN BIT(15) #define R_TUNE_EN BIT(11) /* SRAM_LPF_CFG */ #define LPF_AUTO_TUNE 0x8000 /* SRAM_10M_AMP1 */ #define GDAC_IB_UPALL 0x0008 /* SRAM_10M_AMP2 */ #define AMP_DN 0x0200 /* SRAM_IMPEDANCE */ #define RX_DRIVING_MASK 0x6000 /* SRAM_PHY_LOCK */ #define PHY_PATCH_LOCK 0x0001 /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 #define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 #define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */ enum rtl_register_content { _2500bps = BIT(10), _1250bps = BIT(9), _500bps = BIT(8), _tx_flow = BIT(6), _rx_flow = BIT(5), _1000bps = 0x10, _100bps = 0x08, _10bps = 0x04, LINK_STATUS = 0x02, FULL_DUP = 0x01, }; #define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS)) #define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow)) #define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 #define TX_ALIGN 4 #define RX_ALIGN 8 #define RTL8152_RX_MAX_PENDING 4096 #define RTL8152_RXFG_HEADSZ 256 #define INTR_LINK 0x0004 #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) #define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) #define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) #define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { RTL8152_INACCESSIBLE = 0, RTL8152_SET_RX_MODE, WORK_ENABLE, RTL8152_LINK_CHG, SELECTIVE_SUSPEND, PHY_RESET, SCHEDULE_TASKLET, GREEN_ETHERNET, RX_EPROTO, IN_PRE_RESET, PROBED_WITH_NO_ERRORS, PROBE_SHOULD_RETRY, }; #define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e #define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054 #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062 struct tally_counter { __le64 tx_packets; __le64 rx_packets; __le64 tx_errors; __le32 rx_errors; __le16 rx_missed; __le16 align_errors; __le32 tx_one_collision; __le32 tx_multi_collision; __le64 rx_unicast; __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; __le16 tx_underrun; }; struct rx_desc { __le32 opts1; #define RX_LEN_MASK 0x7fff __le32 opts2; #define RD_UDP_CS BIT(23) #define RD_TCP_CS BIT(22) #define RD_IPV6_CS BIT(20) #define RD_IPV4_CS BIT(19) __le32 opts3; #define IPF BIT(23) /* IP checksum fail */ #define UDPF BIT(22) /* UDP checksum fail */ #define TCPF BIT(21) /* TCP checksum fail */ #define RX_VLAN_TAG BIT(16) __le32 opts4; __le32 opts5; __le32 opts6; }; struct tx_desc { __le32 opts1; #define TX_FS BIT(31) /* First segment of a packet */ #define TX_LS BIT(30) /* Final segment of a packet */ #define GTSENDV4 BIT(28) #define GTSENDV6 BIT(27) #define GTTCPHO_SHIFT 18 #define GTTCPHO_MAX 0x7fU #define TX_LEN_MAX 0x3ffffU __le32 opts2; #define UDP_CS BIT(31) /* Calculate UDP/IP checksum */ #define TCP_CS BIT(30) /* Calculate TCP/IP checksum */ #define IPV4_CS BIT(29) /* Calculate IPv4 checksum */ #define IPV6_CS BIT(28) /* Calculate IPv6 checksum */ #define MSS_SHIFT 17 #define MSS_MAX 0x7ffU #define TCPHO_SHIFT 17 #define TCPHO_MAX 0x7ffU #define TX_VLAN_TAG BIT(16) }; struct r8152; struct rx_agg { struct list_head list, info_list; struct urb *urb; struct r8152 *context; struct page *page; void *buffer; }; struct tx_agg { struct list_head list; struct urb *urb; struct r8152 *context; void *buffer; void *head; u32 skb_num; u32 skb_len; }; struct r8152 { unsigned long flags; struct usb_device *udev; struct napi_struct napi; struct usb_interface *intf; struct net_device *netdev; struct urb *intr_urb; struct tx_agg tx_info[RTL8152_MAX_TX]; struct list_head rx_info, rx_used; struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notifier; #endif struct tasklet_struct tx_tl; struct rtl_ops { void (*init)(struct r8152 *tp); int (*enable)(struct r8152 *tp); void (*disable)(struct r8152 *tp); void (*up)(struct r8152 *tp); void (*down)(struct r8152 *tp); void (*unload)(struct r8152 *tp); int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee); int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee); bool (*in_nway)(struct r8152 *tp); void (*hw_phy_cfg)(struct r8152 *tp); void (*autosuspend_en)(struct r8152 *tp, bool enable); void (*change_mtu)(struct r8152 *tp); } rtl_ops; struct ups_info { u32 r_tune:1; u32 _10m_ckdiv:1; u32 _250m_ckdiv:1; u32 aldps:1; u32 lite_mode:2; u32 speed_duplex:4; u32 eee:1; u32 eee_lite:1; u32 eee_ckdiv:1; u32 eee_plloff_100:1; u32 eee_plloff_giga:1; u32 eee_cmod_lv:1; u32 green:1; u32 flow_control:1; u32 ctap_short_off:1; } ups_info; #define RTL_VER_SIZE 32 struct rtl_fw { const char *fw_name; const struct firmware *fw; char version[RTL_VER_SIZE]; int (*pre_fw)(struct r8152 *tp); int (*post_fw)(struct r8152 *tp); bool retry; } rtl_fw; atomic_t rx_count; bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; u32 coalesce; u32 advertising; u32 rx_buf_sz; u32 rx_copybreak; u32 rx_pending; u32 fc_pause_on, fc_pause_off; unsigned int pipe_in, pipe_out, pipe_intr, pipe_ctrl_in, pipe_ctrl_out; u32 support_2500full:1; u32 lenovo_macpassthru:1; u32 dell_tb_rx_agg_bug:1; u16 ocp_base; u16 speed; u16 eee_adv; u8 *intr_buff; u8 version; u8 duplex; u8 autoneg; unsigned int reg_access_reset_count; }; /** * struct fw_block - block type and total length * @type: type of the current block, such as RTL_FW_END, RTL_FW_PLA, * RTL_FW_USB and so on. * @length: total length of the current block. */ struct fw_block { __le32 type; __le32 length; } __packed; /** * struct fw_header - header of the firmware file * @checksum: checksum of sha256 which is calculated from the whole file * except the checksum field of the file. That is, calculate sha256 * from the version field to the end of the file. * @version: version of this firmware. * @blocks: the first firmware block of the file */ struct fw_header { u8 checksum[32]; char version[RTL_VER_SIZE]; struct fw_block blocks[]; } __packed; enum rtl8152_fw_flags { FW_FLAGS_USB = 0, FW_FLAGS_PLA, FW_FLAGS_START, FW_FLAGS_STOP, FW_FLAGS_NC, FW_FLAGS_NC1, FW_FLAGS_NC2, FW_FLAGS_UC2, FW_FLAGS_UC, FW_FLAGS_SPEED_UP, FW_FLAGS_VER, }; enum rtl8152_fw_fixup_cmd { FW_FIXUP_AND = 0, FW_FIXUP_OR, FW_FIXUP_NOT, FW_FIXUP_XOR, }; struct fw_phy_set { __le16 addr; __le16 data; } __packed; struct fw_phy_speed_up { struct fw_block blk_hdr; __le16 fw_offset; __le16 version; __le16 fw_reg; __le16 reserved; char info[]; } __packed; struct fw_phy_ver { struct fw_block blk_hdr; struct fw_phy_set ver; __le32 reserved; } __packed; struct fw_phy_fixup { struct fw_block blk_hdr; struct fw_phy_set setting; __le16 bit_cmd; __le16 reserved; } __packed; struct fw_phy_union { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; struct fw_phy_set pre_set[2]; struct fw_phy_set bp[8]; struct fw_phy_set bp_en; u8 pre_num; u8 bp_num; char info[]; } __packed; /** * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. * The layout of the firmware block is: * <struct fw_mac> + <info> + <firmware data>. * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_mac + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. * @bp_ba_addr: the register to write break point base address. Depends on * chip. * @bp_ba_value: break point base address. Depends on chip. * @bp_en_addr: the register to write break point enabled mask. Depends * on chip. * @bp_en_value: break point enabled mask. Depends on the firmware. * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. * @reserved: reserved space (unused) * @fw_ver_reg: the register to store the fw version. * @fw_ver_data: the firmware version of the current type. * @info: additional information for debugging, and is followed by the * binary data of firmware. */ struct fw_mac { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; __le16 bp_ba_addr; __le16 bp_ba_value; __le16 bp_en_addr; __le16 bp_en_value; __le16 bp_start; __le16 bp_num; __le16 bp[16]; /* any value determined by firmware */ __le32 reserved; __le16 fw_ver_reg; u8 fw_ver_data; char info[]; } __packed; /** * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START. * This is used to set patch key when loading the firmware of PHY. * @blk_hdr: firmware descriptor (type, length) * @key_reg: the register to write the patch key. * @key_data: patch key. * @reserved: reserved space (unused) */ struct fw_phy_patch_key { struct fw_block blk_hdr; __le16 key_reg; __le16 key_data; __le32 reserved; } __packed; /** * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC. * The layout of the firmware block is: * <struct fw_phy_nc> + <info> + <firmware data>. * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_phy_nc + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. * @ba_reg: the register to write the base address. Depends on chip. * @ba_data: base address. Depends on chip. * @patch_en_addr: the register of enabling patch mode. Depends on chip. * @patch_en_value: patch mode enabled mask. Depends on the firmware. * @mode_reg: the regitster of switching the mode. * @mode_pre: the mode needing to be set before loading the firmware. * @mode_post: the mode to be set when finishing to load the firmware. * @reserved: reserved space (unused) * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. * @info: additional information for debugging, and is followed by the * binary data of firmware. */ struct fw_phy_nc { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; __le16 ba_reg; __le16 ba_data; __le16 patch_en_addr; __le16 patch_en_value; __le16 mode_reg; __le16 mode_pre; __le16 mode_post; __le16 reserved; __le16 bp_start; __le16 bp_num; __le16 bp[4]; char info[]; } __packed; enum rtl_fw_type { RTL_FW_END = 0, RTL_FW_PLA, RTL_FW_USB, RTL_FW_PHY_START, RTL_FW_PHY_STOP, RTL_FW_PHY_NC, RTL_FW_PHY_FIXUP, RTL_FW_PHY_UNION_NC, RTL_FW_PHY_UNION_NC1, RTL_FW_PHY_UNION_NC2, RTL_FW_PHY_UNION_UC2, RTL_FW_PHY_UNION_UC, RTL_FW_PHY_UNION_MISC, RTL_FW_PHY_SPEED_UP, RTL_FW_PHY_VER, }; enum rtl_version { RTL_VER_UNKNOWN = 0, RTL_VER_01, RTL_VER_02, RTL_VER_03, RTL_VER_04, RTL_VER_05, RTL_VER_06, RTL_VER_07, RTL_VER_08, RTL_VER_09, RTL_TEST_01, RTL_VER_10, RTL_VER_11, RTL_VER_12, RTL_VER_13, RTL_VER_14, RTL_VER_15, RTL_VER_MAX }; enum tx_csum_stat { TX_CSUM_SUCCESS = 0, TX_CSUM_TSO, TX_CSUM_NONE }; #define RTL_ADVERTISED_10_HALF BIT(0) #define RTL_ADVERTISED_10_FULL BIT(1) #define RTL_ADVERTISED_100_HALF BIT(2) #define RTL_ADVERTISED_100_FULL BIT(3) #define RTL_ADVERTISED_1000_HALF BIT(4) #define RTL_ADVERTISED_1000_FULL BIT(5) #define RTL_ADVERTISED_2500_FULL BIT(6) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; static unsigned int agg_buf_sz = 16384; #define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) /* If register access fails then we block access and issue a reset. If this * happens too many times in a row without a successful access then we stop * trying to reset and just leave access blocked. */ #define REGISTER_ACCESS_MAX_RESETS 3 static void rtl_set_inaccessible(struct r8152 *tp) { set_bit(RTL8152_INACCESSIBLE, &tp->flags); smp_mb__after_atomic(); } static void rtl_set_accessible(struct r8152 *tp) { clear_bit(RTL8152_INACCESSIBLE, &tp->flags); smp_mb__after_atomic(); } static int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, const char *msg_tag) { struct usb_device *udev = tp->udev; int ret; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; ret = usb_control_msg(udev, pipe, request, requesttype, value, index, data, size, USB_CTRL_GET_TIMEOUT); /* No need to issue a reset to report an error if the USB device got * unplugged; just return immediately. */ if (ret == -ENODEV) return ret; /* If the write was successful then we're done */ if (ret >= 0) { tp->reg_access_reset_count = 0; return ret; } dev_err(&udev->dev, "Failed to %s %d bytes at %#06x/%#06x (%d)\n", msg_tag, size, value, index, ret); /* Block all future register access until we reset. Much of the code * in the driver doesn't check for errors. Notably, many parts of the * driver do a read/modify/write of a register value without * confirming that the read succeeded. Writing back modified garbage * like this can fully wedge the adapter, requiring a power cycle. */ rtl_set_inaccessible(tp); /* If probe hasn't yet finished, then we'll request a retry of the * whole probe routine if we get any control transfer errors. We * never have to clear this bit since we free/reallocate the whole "tp" * structure if we retry probe. */ if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) { set_bit(PROBE_SHOULD_RETRY, &tp->flags); return ret; } /* Failing to access registers in pre-reset is not surprising since we * wouldn't be resetting if things were behaving normally. The register * access we do in pre-reset isn't truly mandatory--we're just reusing * the disable() function and trying to be nice by powering the * adapter down before resetting it. Thus, if we're in pre-reset, * we'll return right away and not try to queue up yet another reset. * We know the post-reset is already coming. */ if (test_bit(IN_PRE_RESET, &tp->flags)) return ret; if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) { usb_queue_reset_device(tp->intf); tp->reg_access_reset_count++; } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) { dev_err(&udev->dev, "Tried to reset %d times; giving up.\n", REGISTER_ACCESS_MAX_RESETS); } return ret; } static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { int ret; void *tmp; tmp = kmalloc(size, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = r8152_control_msg(tp, tp->pipe_ctrl_in, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, "read"); if (ret < 0) memset(data, 0xff, size); else memcpy(data, tmp, size); kfree(tmp); return ret; } static int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { int ret; void *tmp; tmp = kmemdup(data, size, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = r8152_control_msg(tp, tp->pipe_ctrl_out, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, value, index, tmp, size, "write"); kfree(tmp); return ret; } static void rtl_set_unplug(struct r8152 *tp) { if (tp->udev->state == USB_STATE_NOTATTACHED) rtl_set_inaccessible(tp); } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data, u16 type) { u16 limit = 64; int ret = 0; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ if ((size & 3) || !size || (index & 3) || !data) return -EPERM; if ((u32)index + (u32)size > 0xffff) return -EPERM; while (size) { if (size > limit) { ret = get_registers(tp, index, type, limit, data); if (ret < 0) break; index += limit; data += limit; size -= limit; } else { ret = get_registers(tp, index, type, size, data); if (ret < 0) break; index += size; data += size; size = 0; break; } } if (ret == -ENODEV) rtl_set_unplug(tp); return ret; } static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data, u16 type) { int ret; u16 byteen_start, byteen_end, byen; u16 limit = 512; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ if ((size & 3) || !size || (index & 3) || !data) return -EPERM; if ((u32)index + (u32)size > 0xffff) return -EPERM; byteen_start = byteen & BYTE_EN_START_MASK; byteen_end = byteen & BYTE_EN_END_MASK; byen = byteen_start | (byteen_start << 4); /* Split the first DWORD if the byte_en is not 0xff */ if (byen != BYTE_EN_DWORD) { ret = set_registers(tp, index, type | byen, 4, data); if (ret < 0) goto error1; index += 4; data += 4; size -= 4; } if (size) { byen = byteen_end | (byteen_end >> 4); /* Split the last DWORD if the byte_en is not 0xff */ if (byen != BYTE_EN_DWORD) size -= 4; while (size) { if (size > limit) { ret = set_registers(tp, index, type | BYTE_EN_DWORD, limit, data); if (ret < 0) goto error1; index += limit; data += limit; size -= limit; } else { ret = set_registers(tp, index, type | BYTE_EN_DWORD, size, data); if (ret < 0) goto error1; index += size; data += size; size = 0; break; } } /* Set the last DWORD */ if (byen != BYTE_EN_DWORD) ret = set_registers(tp, index, type | byen, 4, data); } error1: if (ret == -ENODEV) rtl_set_unplug(tp); return ret; } static inline int pla_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) { return generic_ocp_read(tp, index, size, data, MCU_TYPE_PLA); } static inline int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA); } static inline int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB); } static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) { __le32 data; generic_ocp_read(tp, index, sizeof(data), &data, type); return __le32_to_cpu(data); } static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) { __le32 tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type); } static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; index &= ~3; byen <<= shift; generic_ocp_read(tp, index, sizeof(tmp), &tmp, type | byen); data = __le32_to_cpu(tmp); data >>= (shift * 8); data &= 0xffff; return (u16)data; } static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) { u32 mask = 0xffff; __le32 tmp; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; data &= mask; if (index & 2) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); } static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; u8 shift = index & 3; index &= ~3; generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); data = __le32_to_cpu(tmp); data >>= (shift * 8); data &= 0xff; return (u8)data; } static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) { u32 mask = 0xff; __le32 tmp; u16 byen = BYTE_EN_BYTE; u8 shift = index & 3; data &= mask; if (index & 3) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); } static u16 ocp_reg_read(struct r8152 *tp, u16 addr) { u16 ocp_base, ocp_index; ocp_base = addr & 0xf000; if (ocp_base != tp->ocp_base) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); tp->ocp_base = ocp_base; } ocp_index = (addr & 0x0fff) | 0xb000; return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index); } static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data) { u16 ocp_base, ocp_index; ocp_base = addr & 0xf000; if (ocp_base != tp->ocp_base) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); tp->ocp_base = ocp_base; } ocp_index = (addr & 0x0fff) | 0xb000; ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data); } static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) { ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value); } static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr) { return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2); } static void sram_write(struct r8152 *tp, u16 addr, u16 data) { ocp_reg_write(tp, OCP_SRAM_ADDR, addr); ocp_reg_write(tp, OCP_SRAM_DATA, data); } static u16 sram_read(struct r8152 *tp, u16 addr) { ocp_reg_write(tp, OCP_SRAM_ADDR, addr); return ocp_reg_read(tp, OCP_SRAM_DATA); } static int read_mii_word(struct net_device *netdev, int phy_id, int reg) { struct r8152 *tp = netdev_priv(netdev); int ret; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (phy_id != R8152_PHY_ID) return -EINVAL; ret = r8152_mdio_read(tp, reg); return ret; } static void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) { struct r8152 *tp = netdev_priv(netdev); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (phy_id != R8152_PHY_ID) return; r8152_mdio_write(tp, reg, val); } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising); static int __rtl8152_set_mac_address(struct net_device *netdev, void *p, bool in_resume) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; int ret = -EADDRNOTAVAIL; if (!is_valid_ether_addr(addr->sa_data)) goto out1; if (!in_resume) { ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out1; } mutex_lock(&tp->control); eth_hw_addr_set(netdev, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); mutex_unlock(&tp->control); if (!in_resume) usb_autopm_put_interface(tp->intf); out1: return ret; } static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { return __rtl8152_set_mac_address(netdev, p, false); } /* Devices containing proper chips can support a persistent * host system provided MAC address. * Examples of this are Dell TB15 and Dell WD15 docks */ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; int ret = -EINVAL; u32 ocp_data; unsigned char buf[6]; char *mac_obj_name; acpi_object_type mac_obj_type; int mac_strlen; if (tp->lenovo_macpassthru) { mac_obj_name = "\\MACA"; mac_obj_type = ACPI_TYPE_STRING; mac_strlen = 0x16; } else { /* test for -AD variant of RTL8153 */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if ((ocp_data & AD_MASK) == 0x1000) { /* test for MAC address pass-through bit */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); if ((ocp_data & PASS_THRU_MASK) != 1) { netif_dbg(tp, probe, tp->netdev, "No efuse for RTL8153-AD MAC pass through\n"); return -ENODEV; } } else { /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; } } mac_obj_name = "\\_SB.AMAC"; mac_obj_type = ACPI_TYPE_BUFFER; mac_strlen = 0x17; } /* returns _AUXMAC_#AABBCCDDEEFF# */ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer); obj = (union acpi_object *)buffer.pointer; if (!ACPI_SUCCESS(status)) return -ENODEV; if (obj->type != mac_obj_type || obj->string.length != mac_strlen) { netif_warn(tp, probe, tp->netdev, "Invalid buffer for pass-thru MAC addr: (%d, %d)\n", obj->type, obj->string.length); goto amacout; } if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { netif_warn(tp, probe, tp->netdev, "Invalid header when reading pass-thru MAC addr\n"); goto amacout; } ret = hex2bin(buf, obj->string.pointer + 9, 6); if (!(ret == 0 && is_valid_ether_addr(buf))) { netif_warn(tp, probe, tp->netdev, "Invalid MAC for pass-thru MAC addr: %d, %pM\n", ret, buf); ret = -EINVAL; goto amacout; } memcpy(sa->sa_data, buf, 6); tp->netdev->addr_assign_type = NET_ADDR_STOLEN; netif_info(tp, probe, tp->netdev, "Using pass-thru MAC addr %pM\n", sa->sa_data); amacout: kfree(obj); return ret; } static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa) { struct net_device *dev = tp->netdev; int ret; sa->sa_family = dev->type; ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data); if (ret < 0) { if (tp->version == RTL_VER_01) { ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data); } else { /* if device doesn't support MAC pass through this will * be expected to be non-zero */ ret = vendor_mac_passthru_addr_read(tp, sa); if (ret < 0) ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data); } } if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); } else if (!is_valid_ether_addr(sa->sa_data)) { netif_err(tp, probe, dev, "Invalid ether addr %pM\n", sa->sa_data); eth_hw_addr_random(dev); ether_addr_copy(sa->sa_data, dev->dev_addr); netif_info(tp, probe, dev, "Random ether addr %pM\n", sa->sa_data); return 0; } return ret; } static int set_ethernet_addr(struct r8152 *tp, bool in_resume) { struct net_device *dev = tp->netdev; struct sockaddr sa; int ret; ret = determine_ethernet_addr(tp, &sa); if (ret < 0) return ret; if (tp->version == RTL_VER_01) eth_hw_addr_set(dev, sa.sa_data); else ret = __rtl8152_set_mac_address(dev, &sa, in_resume); return ret; } static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; int status = urb->status; struct rx_agg *agg; struct r8152 *tp; unsigned long flags; agg = urb->context; if (!agg) return; tp = agg->context; if (!tp) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; netdev = tp->netdev; /* When link down, the driver would cancel all bulks. */ /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(netdev)) return; usb_mark_last_busy(tp->udev); switch (status) { case 0: if (urb->actual_length < ETH_ZLEN) break; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: rtl_set_unplug(tp); netif_device_detach(tp->netdev); return; case -EPROTO: urb->actual_length = 0; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); set_bit(RX_EPROTO, &tp->flags); schedule_delayed_work(&tp->schedule, 1); return; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (net_ratelimit()) netdev_warn(netdev, "maybe reset is needed?\n"); break; default: if (net_ratelimit()) netdev_warn(netdev, "Rx status %d\n", status); break; } r8152_submit_rx(tp, agg, GFP_ATOMIC); } static void write_bulk_callback(struct urb *urb) { struct net_device_stats *stats; struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; unsigned long flags; int status = urb->status; agg = urb->context; if (!agg) return; tp = agg->context; if (!tp) return; netdev = tp->netdev; stats = &netdev->stats; if (status) { if (net_ratelimit()) netdev_warn(netdev, "Tx status %d\n", status); stats->tx_errors += agg->skb_num; } else { stats->tx_packets += agg->skb_num; stats->tx_bytes += agg->skb_len; } spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); if (!netif_carrier_ok(netdev)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!skb_queue_empty(&tp->tx_queue)) tasklet_schedule(&tp->tx_tl); } static void intr_callback(struct urb *urb) { struct r8152 *tp; __le16 *d; int status = urb->status; int res; tp = urb->context; if (!tp) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); fallthrough; case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: if (net_ratelimit()) netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: netif_info(tp, intr, tp->netdev, "intr status %d\n", status); goto resubmit; } d = urb->transfer_buffer; if (INTR_LINK & __le16_to_cpu(d[0])) { if (!netif_carrier_ok(tp->netdev)) { set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } else { if (netif_carrier_ok(tp->netdev)) { netif_stop_queue(tp->netdev); set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } resubmit: res = usb_submit_urb(urb, GFP_ATOMIC); if (res == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (res) { netif_err(tp, intr, tp->netdev, "can't resubmit intr, status %d\n", res); } } static inline void *rx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, RX_ALIGN); } static inline void *tx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, TX_ALIGN); } static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) { list_del(&agg->info_list); usb_free_urb(agg->urb); put_page(agg->page); kfree(agg); atomic_dec(&tp->rx_count); } static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) { struct net_device *netdev = tp->netdev; int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; unsigned int order = get_order(tp->rx_buf_sz); struct rx_agg *rx_agg; unsigned long flags; rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); if (!rx_agg) return NULL; rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order); if (!rx_agg->page) goto free_rx; rx_agg->buffer = page_address(rx_agg->page); rx_agg->urb = usb_alloc_urb(0, mflags); if (!rx_agg->urb) goto free_buf; rx_agg->context = tp; INIT_LIST_HEAD(&rx_agg->list); INIT_LIST_HEAD(&rx_agg->info_list); spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&rx_agg->info_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); atomic_inc(&tp->rx_count); return rx_agg; free_buf: __free_pages(rx_agg->page, order); free_rx: kfree(rx_agg); return NULL; } static void free_all_mem(struct r8152 *tp) { struct rx_agg *agg, *agg_next; unsigned long flags; int i; spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) free_rx_agg(tp, agg); spin_unlock_irqrestore(&tp->rx_lock, flags); WARN_ON(atomic_read(&tp->rx_count)); for (i = 0; i < RTL8152_MAX_TX; i++) { usb_free_urb(tp->tx_info[i].urb); tp->tx_info[i].urb = NULL; kfree(tp->tx_info[i].buffer); tp->tx_info[i].buffer = NULL; tp->tx_info[i].head = NULL; } usb_free_urb(tp->intr_urb); tp->intr_urb = NULL; kfree(tp->intr_buff); tp->intr_buff = NULL; } static int alloc_all_mem(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct usb_interface *intf = tp->intf; struct usb_host_interface *alt = intf->cur_altsetting; struct usb_host_endpoint *ep_intr = alt->endpoint + 2; int node, i; node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); INIT_LIST_HEAD(&tp->rx_info); INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); atomic_set(&tp->rx_count, 0); for (i = 0; i < RTL8152_MAX_RX; i++) { if (!alloc_rx_agg(tp, GFP_KERNEL)) goto err1; } for (i = 0; i < RTL8152_MAX_TX; i++) { struct urb *urb; u8 *buf; buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != tx_agg_align(buf)) { kfree(buf); buf = kmalloc_node(agg_buf_sz + TX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { kfree(buf); goto err1; } INIT_LIST_HEAD(&tp->tx_info[i].list); tp->tx_info[i].context = tp; tp->tx_info[i].urb = urb; tp->tx_info[i].buffer = buf; tp->tx_info[i].head = tx_agg_align(buf); list_add_tail(&tp->tx_info[i].list, &tp->tx_free); } tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!tp->intr_urb) goto err1; tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!tp->intr_buff) goto err1; tp->intr_interval = (int)ep_intr->desc.bInterval; usb_fill_int_urb(tp->intr_urb, tp->udev, tp->pipe_intr, tp->intr_buff, INTBUFSIZE, intr_callback, tp, tp->intr_interval); return 0; err1: free_all_mem(tp); return -ENOMEM; } static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) { struct tx_agg *agg = NULL; unsigned long flags; if (list_empty(&tp->tx_free)) return NULL; spin_lock_irqsave(&tp->tx_lock, flags); if (!list_empty(&tp->tx_free)) { struct list_head *cursor; cursor = tp->tx_free.next; list_del_init(cursor); agg = list_entry(cursor, struct tx_agg, list); } spin_unlock_irqrestore(&tp->tx_lock, flags); return agg; } /* r8152_csum_workaround() * The hw limits the value of the transport offset. When the offset is out of * range, calculate the checksum by sw. */ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, struct sk_buff_head *list) { if (skb_shinfo(skb)->gso_size) { netdev_features_t features = tp->netdev->features; struct sk_buff *segs, *seg, *next; struct sk_buff_head seg_list; features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); segs = skb_gso_segment(skb, features); if (IS_ERR(segs) || !segs) goto drop; __skb_queue_head_init(&seg_list); skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); __skb_queue_tail(&seg_list, seg); } skb_queue_splice(&seg_list, list); dev_kfree_skb(skb); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_help(skb) < 0) goto drop; __skb_queue_head(list, skb); } else { struct net_device_stats *stats; drop: stats = &tp->netdev->stats; stats->tx_dropped++; dev_kfree_skb(skb); } } static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) { if (skb_vlan_tag_present(skb)) { u32 opts2; opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb)); desc->opts2 |= cpu_to_le32(opts2); } } static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); if (opts2 & RX_VLAN_TAG) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb, u32 len) { u32 mss = skb_shinfo(skb)->gso_size; u32 opts1, opts2 = 0; int ret = TX_CSUM_SUCCESS; WARN_ON_ONCE(len > TX_LEN_MAX); opts1 = len | TX_FS | TX_LS; if (mss) { u32 transport_offset = (u32)skb_transport_offset(skb); if (transport_offset > GTTCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x for TSO\n", transport_offset); ret = TX_CSUM_TSO; goto unavailable; } switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts1 |= GTSENDV4; break; case htons(ETH_P_IPV6): if (skb_cow_head(skb, 0)) { ret = TX_CSUM_TSO; goto unavailable; } tcp_v6_gso_csum_prep(skb); opts1 |= GTSENDV6; break; default: WARN_ON_ONCE(1); break; } opts1 |= transport_offset << GTTCPHO_SHIFT; opts2 |= min(mss, MSS_MAX) << MSS_SHIFT; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u32 transport_offset = (u32)skb_transport_offset(skb); u8 ip_protocol; if (transport_offset > TCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x\n", transport_offset); ret = TX_CSUM_NONE; goto unavailable; } switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts2 |= IPV4_CS; ip_protocol = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): opts2 |= IPV6_CS; ip_protocol = ipv6_hdr(skb)->nexthdr; break; default: ip_protocol = IPPROTO_RAW; break; } if (ip_protocol == IPPROTO_TCP) opts2 |= TCP_CS; else if (ip_protocol == IPPROTO_UDP) opts2 |= UDP_CS; else WARN_ON_ONCE(1); opts2 |= transport_offset << TCPHO_SHIFT; } desc->opts2 = cpu_to_le32(opts2); desc->opts1 = cpu_to_le32(opts1); unavailable: return ret; } static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) { struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; int remain, ret; u8 *tx_data; __skb_queue_head_init(&skb_head); spin_lock(&tx_queue->lock); skb_queue_splice_init(tx_queue, &skb_head); spin_unlock(&tx_queue->lock); tx_data = agg->head; agg->skb_num = 0; agg->skb_len = 0; remain = agg_buf_sz; while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; struct sk_buff *skb; unsigned int len; skb = __skb_dequeue(&skb_head); if (!skb) break; len = skb->len + sizeof(*tx_desc); if (len > remain) { __skb_queue_head(&skb_head, skb); break; } tx_data = tx_agg_align(tx_data); tx_desc = (struct tx_desc *)tx_data; if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) { r8152_csum_workaround(tp, skb, &skb_head); continue; } rtl_tx_vlan_tag(tx_desc, skb); tx_data += sizeof(*tx_desc); len = skb->len; if (skb_copy_bits(skb, 0, tx_data, len) < 0) { struct net_device_stats *stats = &tp->netdev->stats; stats->tx_dropped++; dev_kfree_skb_any(skb); tx_data -= sizeof(*tx_desc); continue; } tx_data += len; agg->skb_len += len; agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); if (tp->dell_tb_rx_agg_bug) break; } if (!skb_queue_empty(&skb_head)) { spin_lock(&tx_queue->lock); skb_queue_splice(&skb_head, tx_queue); spin_unlock(&tx_queue->lock); } netif_tx_lock(tp->netdev); if (netif_queue_stopped(tp->netdev) && skb_queue_len(&tp->tx_queue) < tp->tx_qlen) netif_wake_queue(tp->netdev); netif_tx_unlock(tp->netdev); ret = usb_autopm_get_interface_async(tp->intf); if (ret < 0) goto out_tx_fill; usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_out, agg->head, (int)(tx_data - (u8 *)agg->head), (usb_complete_t)write_bulk_callback, agg); ret = usb_submit_urb(agg->urb, GFP_ATOMIC); if (ret < 0) usb_autopm_put_interface_async(tp->intf); out_tx_fill: return ret; } static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) { u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; if (!(tp->netdev->features & NETIF_F_RXCSUM)) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); opts3 = le32_to_cpu(rx_desc->opts3); if (opts2 & RD_IPV4_CS) { if (opts3 & IPF) checksum = CHECKSUM_NONE; else if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } else if (opts2 & RD_IPV6_CS) { if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } return_result: return checksum; } static inline bool rx_count_exceed(struct r8152 *tp) { return atomic_read(&tp->rx_count) > RTL8152_MAX_RX; } static inline int agg_offset(struct rx_agg *agg, void *addr) { return (int)(addr - agg->buffer); } static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags) { struct rx_agg *agg, *agg_next, *agg_free = NULL; unsigned long flags; spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) { if (page_count(agg->page) == 1) { if (!agg_free) { list_del_init(&agg->list); agg_free = agg; continue; } if (rx_count_exceed(tp)) { list_del_init(&agg->list); free_rx_agg(tp, agg); } break; } } spin_unlock_irqrestore(&tp->rx_lock, flags); if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending) agg_free = alloc_rx_agg(tp, mflags); return agg_free; } static int rx_bottom(struct r8152 *tp, int budget) { unsigned long flags; struct list_head *cursor, *next, rx_queue; int ret = 0, work_done = 0; struct napi_struct *napi = &tp->napi; if (!skb_queue_empty(&tp->rx_queue)) { while (work_done < budget) { struct sk_buff *skb = __skb_dequeue(&tp->rx_queue); struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; unsigned int pkt_len; if (!skb) break; pkt_len = skb->len; napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; } } if (list_empty(&tp->rx_done) || work_done >= budget) goto out1; clear_bit(RX_EPROTO, &tp->flags); INIT_LIST_HEAD(&rx_queue); spin_lock_irqsave(&tp->rx_lock, flags); list_splice_init(&tp->rx_done, &rx_queue); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_safe(cursor, next, &rx_queue) { struct rx_desc *rx_desc; struct rx_agg *agg, *agg_free; int len_used = 0; struct urb *urb; u8 *rx_data; /* A bulk transfer of USB may contain may packets, so the * total packets may more than the budget. Deal with all * packets in current bulk transfer, and stop to handle the * next bulk transfer until next schedule, if budget is * exhausted. */ if (work_done >= budget) break; list_del_init(cursor); agg = list_entry(cursor, struct rx_agg, list); urb = agg->urb; if (urb->status != 0 || urb->actual_length < ETH_ZLEN) goto submit; agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); rx_desc = agg->buffer; rx_data = agg->buffer; len_used += sizeof(struct rx_desc); while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; unsigned int pkt_len, rx_frag_head_sz, len; struct sk_buff *skb; bool use_frags; WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; len_used += pkt_len; if (urb->actual_length < len_used) break; pkt_len -= ETH_FCS_LEN; len = pkt_len; rx_data += sizeof(struct rx_desc); if (!agg_free || tp->rx_copybreak > len) use_frags = false; else use_frags = true; if (use_frags) { /* If the budget is exhausted, the packet * would be queued in the driver. That is, * napi_gro_frags() wouldn't be called, so * we couldn't use napi_get_frags(). */ if (work_done >= budget) { rx_frag_head_sz = tp->rx_copybreak; skb = napi_alloc_skb(napi, rx_frag_head_sz); } else { rx_frag_head_sz = 0; skb = napi_get_frags(napi); } } else { rx_frag_head_sz = 0; skb = napi_alloc_skb(napi, len); } if (!skb) { stats->rx_dropped++; goto find_next_rx; } skb->ip_summed = r8152_rx_csum(tp, rx_desc); rtl_rx_vlan_tag(rx_desc, skb); if (use_frags) { if (rx_frag_head_sz) { memcpy(skb->data, rx_data, rx_frag_head_sz); skb_put(skb, rx_frag_head_sz); len -= rx_frag_head_sz; rx_data += rx_frag_head_sz; skb->protocol = eth_type_trans(skb, netdev); } skb_add_rx_frag(skb, 0, agg->page, agg_offset(agg, rx_data), len, SKB_DATA_ALIGN(len)); get_page(agg->page); } else { memcpy(skb->data, rx_data, len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, netdev); } if (work_done < budget) { if (use_frags) napi_gro_frags(napi); else napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; } else { __skb_queue_tail(&tp->rx_queue, skb); } find_next_rx: rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); } WARN_ON(!agg_free && page_count(agg->page) > 1); if (agg_free) { spin_lock_irqsave(&tp->rx_lock, flags); if (page_count(agg->page) == 1) { list_add(&agg_free->list, &tp->rx_used); } else { list_add_tail(&agg->list, &tp->rx_used); agg = agg_free; urb = agg->urb; } spin_unlock_irqrestore(&tp->rx_lock, flags); } submit: if (!ret) { ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); } else { urb->actual_length = 0; list_add_tail(&agg->list, next); } } /* Splice the remained list back to rx_done for next schedule */ if (!list_empty(&rx_queue)) { spin_lock_irqsave(&tp->rx_lock, flags); list_splice(&rx_queue, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } out1: return work_done; } static void tx_bottom(struct r8152 *tp) { int res; do { struct net_device *netdev = tp->netdev; struct tx_agg *agg; if (skb_queue_empty(&tp->tx_queue)) break; agg = r8152_get_tx_agg(tp); if (!agg) break; res = r8152_tx_agg_fill(tp, agg); if (!res) continue; if (res == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(netdev); } else { struct net_device_stats *stats = &netdev->stats; unsigned long flags; netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); stats->tx_dropped += agg->skb_num; spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); spin_unlock_irqrestore(&tp->tx_lock, flags); } } while (res == 0); } static void bottom_half(struct tasklet_struct *t) { struct r8152 *tp = from_tasklet(tp, t, tx_tl); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; /* When link down, the driver would cancel all bulks. */ /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(tp->netdev)) return; clear_bit(SCHEDULE_TASKLET, &tp->flags); tx_bottom(tp); } static int r8152_poll(struct napi_struct *napi, int budget) { struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; if (!budget) return 0; work_done = rx_bottom(tp, budget); if (work_done < budget) { if (!napi_complete_done(napi, work_done)) goto out; if (!list_empty(&tp->rx_done)) napi_schedule(napi); } out: return work_done; } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { int ret; /* The rx would be stopped, so skip submitting */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev)) return 0; usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_in, agg->buffer, tp->rx_buf_sz, (usb_complete_t)read_bulk_callback, agg); ret = usb_submit_urb(agg->urb, mem_flags); if (ret == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (ret) { struct urb *urb = agg->urb; unsigned long flags; urb->actual_length = 0; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); netif_err(tp, rx_err, tp->netdev, "Couldn't submit rx[%p], ret = %d\n", agg, ret); napi_schedule(&tp->napi); } return ret; } static void rtl_drop_queued_tx(struct r8152 *tp) { struct net_device_stats *stats = &tp->netdev->stats; struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; struct sk_buff *skb; if (skb_queue_empty(tx_queue)) return; __skb_queue_head_init(&skb_head); spin_lock_bh(&tx_queue->lock); skb_queue_splice_init(tx_queue, &skb_head); spin_unlock_bh(&tx_queue->lock); while ((skb = __skb_dequeue(&skb_head))) { dev_kfree_skb(skb); stats->tx_dropped++; } } static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct r8152 *tp = netdev_priv(netdev); netif_warn(tp, tx_err, netdev, "Tx timeout\n"); usb_queue_reset_device(tp->intf); } static void rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); if (netif_carrier_ok(netdev)) { set_bit(RTL8152_SET_RX_MODE, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } static void _rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); u32 mc_filter[2]; /* Multicast hash filter */ __le32 tmp[2]; u32 ocp_data; netif_stop_queue(netdev); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_data |= RCR_AB | RCR_APM; if (netdev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); ocp_data |= RCR_AM | RCR_AAP; mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else if ((netdev->flags & IFF_MULTICAST && netdev_mc_count(netdev) > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ ocp_data |= RCR_AM; mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else { mc_filter[1] = 0; mc_filter[0] = 0; if (netdev->flags & IFF_MULTICAST) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, netdev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); ocp_data |= RCR_AM; } } } tmp[0] = __cpu_to_le32(swab32(mc_filter[1])); tmp[1] = __cpu_to_le32(swab32(mc_filter[0])); pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); netif_wake_queue(netdev); } static netdev_features_t rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { u32 mss = skb_shinfo(skb)->gso_size; int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && skb_transport_offset(skb) > max_offset) features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) features &= ~NETIF_F_GSO_MASK; return features; } static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); skb_tx_timestamp(skb); skb_queue_tail(&tp->tx_queue, skb); if (!list_empty(&tp->tx_free)) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { set_bit(SCHEDULE_TASKLET, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } else { usb_mark_last_busy(tp->udev); tasklet_schedule(&tp->tx_tl); } } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); } return NETDEV_TX_OK; } static void r8152b_reset_packet_filter(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); ocp_data &= ~FMC_FCR_MCU_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); ocp_data |= FMC_FCR_MCU_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); } static void rtl8152_nic_reset(struct r8152 *tp) { u32 ocp_data; int i; switch (tp->version) { case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data &= ~CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data &= ~BMU_RESET_EP_IN; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data |= CDC_ECM_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data &= ~CR_RE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data |= BMU_RESET_EP_IN; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~CDC_ECM_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); break; default: ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) break; usleep_range(100, 400); } break; } } static void set_tx_qlen(struct r8152 *tp) { tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); } static inline u16 rtl8152_get_speed(struct r8152 *tp) { return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } static void rtl_eee_plus_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); if (enable) ocp_data |= EEEP_CR_EEEP_TX; else ocp_data &= ~EEEP_CR_EEEP_TX; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); } static void rtl_set_eee_plus(struct r8152 *tp) { if (rtl8152_get_speed(tp) & _10bps) rtl_eee_plus_en(tp, true); else rtl_eee_plus_en(tp, false); } static void rxdy_gated_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); if (enable) ocp_data |= RXDY_GATED_EN; else ocp_data &= ~RXDY_GATED_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); } static int rtl_start_rx(struct r8152 *tp) { struct rx_agg *agg, *agg_next; struct list_head tmp_list; unsigned long flags; int ret = 0, i = 0; INIT_LIST_HEAD(&tmp_list); spin_lock_irqsave(&tp->rx_lock, flags); INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tp->rx_used); list_splice_init(&tp->rx_info, &tmp_list); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { INIT_LIST_HEAD(&agg->list); /* Only RTL8152_MAX_RX rx_agg need to be submitted. */ if (++i > RTL8152_MAX_RX) { spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_used); spin_unlock_irqrestore(&tp->rx_lock, flags); } else if (unlikely(ret < 0)) { spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } else { ret = r8152_submit_rx(tp, agg, GFP_KERNEL); } } spin_lock_irqsave(&tp->rx_lock, flags); WARN_ON(!list_empty(&tp->rx_info)); list_splice(&tmp_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); return ret; } static int rtl_stop_rx(struct r8152 *tp) { struct rx_agg *agg, *agg_next; struct list_head tmp_list; unsigned long flags; INIT_LIST_HEAD(&tmp_list); /* The usb_kill_urb() couldn't be used in atomic. * Therefore, move the list of rx_info to a tmp one. * Then, list_for_each_entry_safe could be used without * spin lock. */ spin_lock_irqsave(&tp->rx_lock, flags); list_splice_init(&tp->rx_info, &tmp_list); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { /* At least RTL8152_MAX_RX rx_agg have the page_count being * equal to 1, so the other ones could be freed safely. */ if (page_count(agg->page) > 1) free_rx_agg(tp, agg); else usb_kill_urb(agg->urb); } /* Move back the list of temp to the rx_info */ spin_lock_irqsave(&tp->rx_lock, flags); WARN_ON(!list_empty(&tp->rx_info)); list_splice(&tmp_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); while (!skb_queue_empty(&tp->rx_queue)) dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); return 0; } static void rtl_set_ifg(struct r8152 *tp, u16 speed) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); ocp_data &= ~IFG_MASK; if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { ocp_data |= IFG_144NS; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data &= ~TX10MIDLE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); } else { ocp_data |= IFG_96NS; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= TX10MIDLE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); } } static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) { ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, OWN_UPDATE | OWN_CLEAR); } static int rtl_enable(struct r8152 *tp) { u32 ocp_data; r8152b_reset_packet_filter(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data |= CR_RE | CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: break; default: r8153b_rx_agg_chg_indicate(tp); break; } rxdy_gated_en(tp, false); return 0; } static int rtl8152_enable(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); return rtl_enable(tp); } static void r8153_set_rx_early_timeout(struct r8152 *tp) { u32 ocp_data = tp->coalesce / 8; switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. */ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, 128 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); break; case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, 640 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); break; default: break; } } static void r8153_set_rx_early_size(struct r8152 *tp) { u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 4); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; default: WARN_ON_ONCE(1); break; } } static int rtl8153_enable(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); rtl_set_ifg(tp, rtl8152_get_speed(tp)); switch (tp->version) { case RTL_VER_09: case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); break; default: break; } return rtl_enable(tp); } static void rtl_disable(struct r8152 *tp) { u32 ocp_data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl_drop_queued_tx(tp); for (i = 0; i < RTL8152_MAX_TX; i++) usb_kill_urb(tp->tx_info[i].urb); rxdy_gated_en(tp, true); for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) break; usleep_range(1000, 2000); } for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) break; usleep_range(1000, 2000); } rtl_stop_rx(tp); rtl8152_nic_reset(tp); } static void r8152_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL); if (enable) ocp_data |= POWER_CUT; else ocp_data &= ~POWER_CUT; ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS); ocp_data &= ~RESUME_INDICATE; ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); } static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) { u32 ocp_data; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); if (enable) ocp_data |= CPCR_RX_VLAN; else ocp_data &= ~CPCR_RX_VLAN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); break; case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: default: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1); if (enable) ocp_data |= OUTER_VLAN | INNER_VLAN; else ocp_data &= ~(OUTER_VLAN | INNER_VLAN); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data); break; } } static int rtl8152_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = features ^ dev->features; struct r8152 *tp = netdev_priv(dev); int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); if (changed & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) rtl_rx_vlan_en(tp, true); else rtl_rx_vlan_en(tp, false); } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl_get_wol(struct r8152 *tp) { u32 ocp_data; u32 wolopts = 0; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); if (ocp_data & LINK_ON_WAKE_EN) wolopts |= WAKE_PHY; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); if (ocp_data & UWF_EN) wolopts |= WAKE_UCAST; if (ocp_data & BWF_EN) wolopts |= WAKE_BCAST; if (ocp_data & MWF_EN) wolopts |= WAKE_MCAST; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); if (ocp_data & MAGIC_EN) wolopts |= WAKE_MAGIC; return wolopts; } static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) { u32 ocp_data; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data &= ~LINK_ON_WAKE_EN; if (wolopts & WAKE_PHY) ocp_data |= LINK_ON_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN); if (wolopts & WAKE_UCAST) ocp_data |= UWF_EN; if (wolopts & WAKE_BCAST) ocp_data |= BWF_EN; if (wolopts & WAKE_MCAST) ocp_data |= MWF_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); ocp_data &= ~MAGIC_EN; if (wolopts & WAKE_MAGIC) ocp_data |= MAGIC_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data); if (wolopts & WAKE_ANY) device_set_wakeup_enable(&tp->udev->dev, true); else device_set_wakeup_enable(&tp->udev->dev, false); } static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); /* MAC clock speed down */ if (enable) ocp_data |= MAC_CLK_SPDWN_EN; else ocp_data &= ~MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) { u32 ocp_data; /* MAC clock speed down */ if (enable) { /* aldps_spdwn_ratio, tp10_spdwn_ratio */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0x0403); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ocp_data &= ~EEE_SPDWN_RATIO_MASK; ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } else { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ocp_data &= ~MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } } static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; if (enable) memset(u1u2, 0xff, sizeof(u1u2)); else memset(u1u2, 0x00, sizeof(u1u2)); usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); } static void r8153b_u1u2en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG); if (enable) ocp_data |= LPM_U1U2_EN; else ocp_data &= ~LPM_U1U2_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG, ocp_data); } static void r8153_u2p3en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); if (enable) ocp_data |= U2P3_ENABLE; else ocp_data &= ~U2P3_ENABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); } static void r8153b_ups_flags(struct r8152 *tp) { u32 ups_flags = 0; if (tp->ups_info.green) ups_flags |= UPS_FLAGS_EN_GREEN; if (tp->ups_info.aldps) ups_flags |= UPS_FLAGS_EN_ALDPS; if (tp->ups_info.eee) ups_flags |= UPS_FLAGS_EN_EEE; if (tp->ups_info.flow_control) ups_flags |= UPS_FLAGS_EN_FLOW_CTR; if (tp->ups_info.eee_ckdiv) ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; if (tp->ups_info.eee_cmod_lv) ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; if (tp->ups_info.r_tune) ups_flags |= UPS_FLAGS_R_TUNE; if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; if (tp->ups_info.eee_plloff_100) ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; if (tp->ups_info.eee_plloff_giga) ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; if (tp->ups_info._250m_ckdiv) ups_flags |= UPS_FLAGS_250M_CKDIV; if (tp->ups_info.ctap_short_off) ups_flags |= UPS_FLAGS_CTAP_SHORT_DIS; switch (tp->ups_info.speed_duplex) { case NWAY_10M_HALF: ups_flags |= ups_flags_speed(1); break; case NWAY_10M_FULL: ups_flags |= ups_flags_speed(2); break; case NWAY_100M_HALF: ups_flags |= ups_flags_speed(3); break; case NWAY_100M_FULL: ups_flags |= ups_flags_speed(4); break; case NWAY_1000M_FULL: ups_flags |= ups_flags_speed(5); break; case FORCE_10M_HALF: ups_flags |= ups_flags_speed(6); break; case FORCE_10M_FULL: ups_flags |= ups_flags_speed(7); break; case FORCE_100M_HALF: ups_flags |= ups_flags_speed(8); break; case FORCE_100M_FULL: ups_flags |= ups_flags_speed(9); break; default: break; } ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void r8156_ups_flags(struct r8152 *tp) { u32 ups_flags = 0; if (tp->ups_info.green) ups_flags |= UPS_FLAGS_EN_GREEN; if (tp->ups_info.aldps) ups_flags |= UPS_FLAGS_EN_ALDPS; if (tp->ups_info.eee) ups_flags |= UPS_FLAGS_EN_EEE; if (tp->ups_info.flow_control) ups_flags |= UPS_FLAGS_EN_FLOW_CTR; if (tp->ups_info.eee_ckdiv) ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; if (tp->ups_info.eee_plloff_100) ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; if (tp->ups_info.eee_plloff_giga) ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; if (tp->ups_info._250m_ckdiv) ups_flags |= UPS_FLAGS_250M_CKDIV; switch (tp->ups_info.speed_duplex) { case FORCE_10M_HALF: ups_flags |= ups_flags_speed(0); break; case FORCE_10M_FULL: ups_flags |= ups_flags_speed(1); break; case FORCE_100M_HALF: ups_flags |= ups_flags_speed(2); break; case FORCE_100M_FULL: ups_flags |= ups_flags_speed(3); break; case NWAY_10M_HALF: ups_flags |= ups_flags_speed(4); break; case NWAY_10M_FULL: ups_flags |= ups_flags_speed(5); break; case NWAY_100M_HALF: ups_flags |= ups_flags_speed(6); break; case NWAY_100M_FULL: ups_flags |= ups_flags_speed(7); break; case NWAY_1000M_FULL: ups_flags |= ups_flags_speed(8); break; case NWAY_2500M_FULL: ups_flags |= ups_flags_speed(9); break; default: break; } switch (tp->ups_info.lite_mode) { case 1: ups_flags |= 0 << 5; break; case 2: ups_flags |= 2 << 5; break; case 0: default: ups_flags |= 1 << 5; break; } ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void rtl_green_en(struct r8152 *tp, bool enable) { u16 data; data = sram_read(tp, SRAM_GREEN_CFG); if (enable) data |= GREEN_ETH_EN; else data &= ~GREEN_ETH_EN; sram_write(tp, SRAM_GREEN_CFG, data); tp->ups_info.green = enable; } static void r8153b_green_en(struct r8152 *tp, bool enable) { if (enable) { sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */ sram_write(tp, 0x805d, 0x0022); /* 1000M short abiq&ldvbias */ } else { sram_write(tp, 0x8045, 0x2444); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x2444); /* 100M short abiq&ldvbias */ sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */ } rtl_green_en(tp, true); } static u16 r8153_phy_status(struct r8152 *tp, u16 desired) { u16 data; int i; for (i = 0; i < 500; i++) { data = ocp_reg_read(tp, OCP_PHY_STATUS); data &= PHY_STAT_MASK; if (desired) { if (data == desired) break; } else if (data == PHY_STAT_LAN_ON || data == PHY_STAT_PWRDN || data == PHY_STAT_EXT_INIT) { break; } msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } return data; } static void r8153b_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8153b_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; for (i = 0; i < 500; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } } } static void r8153c_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8153b_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_data &= ~BIT(7); ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; for (i = 0; i < 500; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= BIT(8); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } } static void r8156_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8156_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); switch (tp->version) { case RTL_VER_13: case RTL_VER_15: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL); ocp_data &= ~OOBS_POLLING; ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data); break; default: break; } } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } } } static void r8153_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) ocp_data |= PWR_EN | PHASE2_EN; else ocp_data &= ~(PWR_EN | PHASE2_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } static void r8153b_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) ocp_data |= PWR_EN | PHASE2_EN; else ocp_data &= ~PWR_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } static void r8153_queue_wake(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG); if (enable) ocp_data |= UPCOMING_RUNTIME_D3; else ocp_data &= ~UPCOMING_RUNTIME_D3; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG); ocp_data &= ~LINK_CHG_EVENT; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); ocp_data &= ~LINK_CHANGE_FLAG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } static bool rtl_can_wakeup(struct r8152 *tp) { struct usb_device *udev = tp->udev; return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP); } static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) { if (enable) { u32 ocp_data; __rtl_set_wol(tp, WAKE_ANY); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= LINK_OFF_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } else { u32 ocp_data; __rtl_set_wol(tp, tp->saved_wolopts); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data &= ~LINK_OFF_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } } static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); } else { rtl_runtime_suspend_enable(tp, false); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } r8153_u1u2en(tp, true); } } static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153b_ups_en(tp, true); } else { r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153c_ups_en(tp, true); } else { r8153c_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153b_u1u2en(tp, true); } } static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); } else { r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_u2p3en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_14: case RTL_VER_15: default: /* The bit 0 ~ 7 are relative with teredo settings. They are * W1C (write 1 to clear), so set all 1 to disable it. */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff); break; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE); ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); } static void rtl_reset_bmu(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT); ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); } /* Clear the bp to stop the firmware before loading a new one */ static void rtl_clear_bp(struct r8152 *tp, u16 type) { u16 bp[16] = {0}; u16 bp_num; switch (tp->version) { case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: if (type == MCU_TYPE_USB) { ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); bp_num = 16; break; } fallthrough; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_byte(tp, type, PLA_BP_EN, 0); fallthrough; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: bp_num = 8; break; case RTL_VER_14: default: ocp_write_word(tp, type, USB_BP2_EN, 0); bp_num = 16; break; } generic_ocp_write(tp, PLA_BP_0, BYTE_EN_DWORD, bp_num << 1, bp, type); /* wait 3 ms to make sure the firmware is stopped */ usleep_range(3000, 6000); ocp_write_word(tp, type, PLA_BP_BA, 0); } static inline void rtl_reset_ocp_base(struct r8152 *tp) { tp->ocp_base = -1; } static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) { u16 data, check; int i; data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD); if (request) { data |= PATCH_REQUEST; check = 0; } else { data &= ~PATCH_REQUEST; check = PATCH_READY; } ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data); for (i = 0; wait && i < 5000; i++) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; usleep_range(1000, 2000); ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT); if ((ocp_data & PATCH_READY) ^ check) break; } if (request && wait && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) { dev_err(&tp->intf->dev, "PHY patch request fail\n"); rtl_phy_patch_request(tp, false, false); return -ETIME; } else { return 0; } } static void rtl_patch_key_set(struct r8152 *tp, u16 key_addr, u16 patch_key) { if (patch_key && key_addr) { sram_write(tp, key_addr, patch_key); sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK); } else if (key_addr) { u16 data; sram_write(tp, 0x0000, 0x0000); data = ocp_reg_read(tp, OCP_PHY_LOCK); data &= ~PATCH_LOCK; ocp_reg_write(tp, OCP_PHY_LOCK, data); sram_write(tp, key_addr, 0x0000); } else { WARN_ON_ONCE(1); } } static int rtl_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key, bool wait) { if (rtl_phy_patch_request(tp, true, wait)) return -ETIME; rtl_patch_key_set(tp, key_addr, patch_key); return 0; } static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) { rtl_patch_key_set(tp, key_addr, 0); rtl_phy_patch_request(tp, false, wait); return 0; } static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy) { u16 fw_offset; u32 length; bool rc = false; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_14: goto out; case RTL_VER_13: case RTL_VER_15: default: break; } fw_offset = __le16_to_cpu(phy->fw_offset); length = __le32_to_cpu(phy->blk_hdr.length); if (fw_offset < sizeof(*phy) || length <= fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length & 3) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(phy->fw_reg) != 0x9A00) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver) { bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) { dev_err(&tp->intf->dev, "invalid phy ver addr\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix) { bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD || __le16_to_cpu(fix->setting.data) != BIT(7)) { dev_err(&tp->intf->dev, "invalid phy fixup\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy) { u16 fw_offset; u32 length; bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } fw_offset = __le16_to_cpu(phy->fw_offset); length = __le32_to_cpu(phy->blk_hdr.length); if (fw_offset < sizeof(*phy) || length <= fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length & 1) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (phy->pre_num > 2) { dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num); goto out; } if (phy->bp_num > 8) { dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy) { u32 length; u16 fw_offset, fw_reg, ba_reg, patch_en_addr, mode_reg, bp_start; bool rc = false; switch (tp->version) { case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: fw_reg = 0xa014; ba_reg = 0xa012; patch_en_addr = 0xa01a; mode_reg = 0xb820; bp_start = 0xa000; break; default: goto out; } fw_offset = __le16_to_cpu(phy->fw_offset); if (fw_offset < sizeof(*phy)) { dev_err(&tp->intf->dev, "fw_offset too small\n"); goto out; } length = __le32_to_cpu(phy->blk_hdr.length); if (length < fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= __le16_to_cpu(phy->fw_offset); if (!length || (length & 1)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(phy->fw_reg) != fw_reg) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } if (__le16_to_cpu(phy->ba_reg) != ba_reg) { dev_err(&tp->intf->dev, "invalid base address register\n"); goto out; } if (__le16_to_cpu(phy->patch_en_addr) != patch_en_addr) { dev_err(&tp->intf->dev, "invalid patch mode enabled register\n"); goto out; } if (__le16_to_cpu(phy->mode_reg) != mode_reg) { dev_err(&tp->intf->dev, "invalid register to switch the mode\n"); goto out; } if (__le16_to_cpu(phy->bp_start) != bp_start) { dev_err(&tp->intf->dev, "invalid start register of break point\n"); goto out; } if (__le16_to_cpu(phy->bp_num) > 4) { dev_err(&tp->intf->dev, "invalid break point number\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) { u16 fw_reg, bp_ba_addr, bp_en_addr, bp_start, fw_offset; bool rc = false; u32 length, type; int i, max_bp; type = __le32_to_cpu(mac->blk_hdr.type); if (type == RTL_FW_PLA) { switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = 0; bp_start = PLA_BP_0; max_bp = 8; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = PLA_BP_EN; bp_start = PLA_BP_0; max_bp = 8; break; case RTL_VER_14: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = USB_BP2_EN; bp_start = PLA_BP_0; max_bp = 16; break; default: goto out; } } else if (type == RTL_FW_USB) { switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: fw_reg = 0xf800; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP_EN; bp_start = USB_BP_0; max_bp = 8; break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_14: case RTL_VER_15: fw_reg = 0xe600; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP2_EN; bp_start = USB_BP_0; max_bp = 16; break; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: default: goto out; } } else { goto out; } fw_offset = __le16_to_cpu(mac->fw_offset); if (fw_offset < sizeof(*mac)) { dev_err(&tp->intf->dev, "fw_offset too small\n"); goto out; } length = __le32_to_cpu(mac->blk_hdr.length); if (length < fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length < 4 || (length & 3)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(mac->fw_reg) != fw_reg) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } if (__le16_to_cpu(mac->bp_ba_addr) != bp_ba_addr) { dev_err(&tp->intf->dev, "invalid base address register\n"); goto out; } if (__le16_to_cpu(mac->bp_en_addr) != bp_en_addr) { dev_err(&tp->intf->dev, "invalid enabled mask register\n"); goto out; } if (__le16_to_cpu(mac->bp_start) != bp_start) { dev_err(&tp->intf->dev, "invalid start register of break point\n"); goto out; } if (__le16_to_cpu(mac->bp_num) > max_bp) { dev_err(&tp->intf->dev, "invalid break point number\n"); goto out; } for (i = __le16_to_cpu(mac->bp_num); i < max_bp; i++) { if (mac->bp[i]) { dev_err(&tp->intf->dev, "unused bp%u is not zero\n", i); goto out; } } rc = true; out: return rc; } /* Verify the checksum for the firmware file. It is calculated from the version * field to the end of the file. Compare the result with the checksum field to * make sure the file is correct. */ static long rtl8152_fw_verify_checksum(struct r8152 *tp, struct fw_header *fw_hdr, size_t size) { unsigned char checksum[sizeof(fw_hdr->checksum)]; struct crypto_shash *alg; struct shash_desc *sdesc; size_t len; long rc; alg = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(alg)) { rc = PTR_ERR(alg); goto out; } if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) { rc = -EFAULT; dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n", crypto_shash_digestsize(alg)); goto free_shash; } len = sizeof(*sdesc) + crypto_shash_descsize(alg); sdesc = kmalloc(len, GFP_KERNEL); if (!sdesc) { rc = -ENOMEM; goto free_shash; } sdesc->tfm = alg; len = size - sizeof(fw_hdr->checksum); rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum); kfree(sdesc); if (rc) goto free_shash; if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) { dev_err(&tp->intf->dev, "checksum fail\n"); rc = -EFAULT; } free_shash: crypto_free_shash(alg); out: return rc; } static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) { const struct firmware *fw = rtl_fw->fw; struct fw_header *fw_hdr = (struct fw_header *)fw->data; unsigned long fw_flags = 0; long ret = -EFAULT; int i; if (fw->size < sizeof(*fw_hdr)) { dev_err(&tp->intf->dev, "file too small\n"); goto fail; } ret = rtl8152_fw_verify_checksum(tp, fw_hdr, fw->size); if (ret) goto fail; ret = -EFAULT; for (i = sizeof(*fw_hdr); i < fw->size;) { struct fw_block *block = (struct fw_block *)&fw->data[i]; u32 type; if ((i + sizeof(*block)) > fw->size) goto fail; type = __le32_to_cpu(block->type); switch (type) { case RTL_FW_END: if (__le32_to_cpu(block->length) != sizeof(*block)) goto fail; goto fw_end; case RTL_FW_PLA: if (test_bit(FW_FLAGS_PLA, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PLA firmware encountered"); goto fail; } if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check PLA firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_PLA, &fw_flags); break; case RTL_FW_USB: if (test_bit(FW_FLAGS_USB, &fw_flags)) { dev_err(&tp->intf->dev, "multiple USB firmware encountered"); goto fail; } if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check USB firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_USB, &fw_flags); break; case RTL_FW_PHY_START: if (test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_START fail\n"); goto fail; } if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) { dev_err(&tp->intf->dev, "Invalid length for PHY_START\n"); goto fail; } __set_bit(FW_FLAGS_START, &fw_flags); break; case RTL_FW_PHY_STOP: if (test_bit(FW_FLAGS_STOP, &fw_flags) || !test_bit(FW_FLAGS_START, &fw_flags)) { dev_err(&tp->intf->dev, "Check PHY_STOP fail\n"); goto fail; } if (__le32_to_cpu(block->length) != sizeof(*block)) { dev_err(&tp->intf->dev, "Invalid length for PHY_STOP\n"); goto fail; } __set_bit(FW_FLAGS_STOP, &fw_flags); break; case RTL_FW_PHY_NC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_NC fail\n"); goto fail; } if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) { dev_err(&tp->intf->dev, "check PHY NC firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_NC, &fw_flags); break; case RTL_FW_PHY_UNION_NC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n"); goto fail; } __set_bit(FW_FLAGS_NC, &fw_flags); break; case RTL_FW_PHY_UNION_NC1: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC1, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n"); goto fail; } __set_bit(FW_FLAGS_NC1, &fw_flags); break; case RTL_FW_PHY_UNION_NC2: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC2, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n"); goto fail; } __set_bit(FW_FLAGS_NC2, &fw_flags); break; case RTL_FW_PHY_UNION_UC2: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_UC2, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n"); goto fail; } __set_bit(FW_FLAGS_UC2, &fw_flags); break; case RTL_FW_PHY_UNION_UC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n"); goto fail; } if (test_bit(FW_FLAGS_UC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY UC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n"); goto fail; } __set_bit(FW_FLAGS_UC, &fw_flags); break; case RTL_FW_PHY_UNION_MISC: if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n"); goto fail; } break; case RTL_FW_PHY_FIXUP: if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) { dev_err(&tp->intf->dev, "check PHY fixup failed\n"); goto fail; } break; case RTL_FW_PHY_SPEED_UP: if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY firmware encountered"); goto fail; } if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) { dev_err(&tp->intf->dev, "check PHY speed up failed\n"); goto fail; } __set_bit(FW_FLAGS_SPEED_UP, &fw_flags); break; case RTL_FW_PHY_VER: if (test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "Invalid order to set PHY version\n"); goto fail; } if (test_bit(FW_FLAGS_VER, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY version encountered"); goto fail; } if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) { dev_err(&tp->intf->dev, "check PHY version failed\n"); goto fail; } __set_bit(FW_FLAGS_VER, &fw_flags); break; default: dev_warn(&tp->intf->dev, "Unknown type %u is found\n", type); break; } /* next block */ i += ALIGN(__le32_to_cpu(block->length), 8); } fw_end: if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "without PHY_STOP\n"); goto fail; } return 0; fail: return ret; } static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait) { u32 len; u8 *data; rtl_reset_ocp_base(tp); if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return; } len = __le32_to_cpu(phy->blk_hdr.length); len -= __le16_to_cpu(phy->fw_offset); data = (u8 *)phy + __le16_to_cpu(phy->fw_offset); if (rtl_phy_patch_request(tp, true, wait)) return; while (len) { u32 ocp_data, size; int i; if (len < 2048) size = len; else size = 2048; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL); ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE; ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data); generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB); data += size; len -= size; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL); ocp_data |= POL_GPHY_PATCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data); for (i = 0; i < 1000; i++) { if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH)) break; } if (i == 1000) { dev_err(&tp->intf->dev, "ram code speedup mode timeout\n"); break; } } rtl_reset_ocp_base(tp); rtl_phy_patch_request(tp, false, wait); if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); else dev_err(&tp->intf->dev, "ram code speedup mode fail\n"); } static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) { u16 ver_addr, ver; ver_addr = __le16_to_cpu(phy_ver->ver.addr); ver = __le16_to_cpu(phy_ver->ver.data); rtl_reset_ocp_base(tp); if (sram_read(tp, ver_addr) >= ver) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return 0; } sram_write(tp, ver_addr, ver); dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver); return ver; } static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) { u16 addr, data; rtl_reset_ocp_base(tp); addr = __le16_to_cpu(fix->setting.addr); data = ocp_reg_read(tp, addr); switch (__le16_to_cpu(fix->bit_cmd)) { case FW_FIXUP_AND: data &= __le16_to_cpu(fix->setting.data); break; case FW_FIXUP_OR: data |= __le16_to_cpu(fix->setting.data); break; case FW_FIXUP_NOT: data &= ~__le16_to_cpu(fix->setting.data); break; case FW_FIXUP_XOR: data ^= __le16_to_cpu(fix->setting.data); break; default: return; } ocp_reg_write(tp, addr, data); dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data); } static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy) { __le16 *data; u32 length; int i, num; rtl_reset_ocp_base(tp); num = phy->pre_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), __le16_to_cpu(phy->pre_set[i].data)); length = __le32_to_cpu(phy->blk_hdr.length); length -= __le16_to_cpu(phy->fw_offset); num = length / 2; data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); for (i = 0; i < num; i++) ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); num = phy->bp_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data)); if (phy->bp_num && phy->bp_en.addr) sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data)); dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); } static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) { u16 mode_reg, bp_index; u32 length, i, num; __le16 *data; rtl_reset_ocp_base(tp); mode_reg = __le16_to_cpu(phy->mode_reg); sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre)); sram_write(tp, __le16_to_cpu(phy->ba_reg), __le16_to_cpu(phy->ba_data)); length = __le32_to_cpu(phy->blk_hdr.length); length -= __le16_to_cpu(phy->fw_offset); num = length / 2; data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); for (i = 0; i < num; i++) ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); sram_write(tp, __le16_to_cpu(phy->patch_en_addr), __le16_to_cpu(phy->patch_en_value)); bp_index = __le16_to_cpu(phy->bp_start); num = __le16_to_cpu(phy->bp_num); for (i = 0; i < num; i++) { sram_write(tp, bp_index, __le16_to_cpu(phy->bp[i])); bp_index += 2; } sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_post)); dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); } static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) { u16 bp_en_addr, type, fw_ver_reg; u32 length; u8 *data; switch (__le32_to_cpu(mac->blk_hdr.type)) { case RTL_FW_PLA: type = MCU_TYPE_PLA; break; case RTL_FW_USB: type = MCU_TYPE_USB; break; default: return; } fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) { dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB"); return; } rtl_clear_bp(tp, type); /* Enable backup/restore of MACDBG. This is required after clearing PLA * break points and before applying the PLA firmware. */ if (tp->version == RTL_VER_04 && type == MCU_TYPE_PLA && !(ocp_read_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST) & DEBUG_OE)) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_PRE, DEBUG_LTSSM); ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST, DEBUG_LTSSM); } length = __le32_to_cpu(mac->blk_hdr.length); length -= __le16_to_cpu(mac->fw_offset); data = (u8 *)mac; data += __le16_to_cpu(mac->fw_offset); if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data, type) < 0) { dev_err(&tp->intf->dev, "Write %s fw fail\n", type ? "PLA" : "USB"); return; } ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr), __le16_to_cpu(mac->bp_ba_value)); if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4), mac->bp, type) < 0) { dev_err(&tp->intf->dev, "Write %s bp fail\n", type ? "PLA" : "USB"); return; } bp_en_addr = __le16_to_cpu(mac->bp_en_addr); if (bp_en_addr) ocp_write_word(tp, type, bp_en_addr, __le16_to_cpu(mac->bp_en_value)); if (fw_ver_reg) ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg, mac->fw_ver_data); dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info); } static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) { struct rtl_fw *rtl_fw = &tp->rtl_fw; const struct firmware *fw; struct fw_header *fw_hdr; struct fw_phy_patch_key *key; u16 key_addr = 0; int i, patch_phy = 1; if (IS_ERR_OR_NULL(rtl_fw->fw)) return; fw = rtl_fw->fw; fw_hdr = (struct fw_header *)fw->data; if (rtl_fw->pre_fw) rtl_fw->pre_fw(tp); for (i = offsetof(struct fw_header, blocks); i < fw->size;) { struct fw_block *block = (struct fw_block *)&fw->data[i]; switch (__le32_to_cpu(block->type)) { case RTL_FW_END: goto post_fw; case RTL_FW_PLA: case RTL_FW_USB: rtl8152_fw_mac_apply(tp, (struct fw_mac *)block); break; case RTL_FW_PHY_START: if (!patch_phy) break; key = (struct fw_phy_patch_key *)block; key_addr = __le16_to_cpu(key->key_reg); rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); break; case RTL_FW_PHY_STOP: if (!patch_phy) break; WARN_ON(!key_addr); rtl_post_ram_code(tp, key_addr, !power_cut); break; case RTL_FW_PHY_NC: rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); break; case RTL_FW_PHY_VER: patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block); break; case RTL_FW_PHY_UNION_NC: case RTL_FW_PHY_UNION_NC1: case RTL_FW_PHY_UNION_NC2: case RTL_FW_PHY_UNION_UC2: case RTL_FW_PHY_UNION_UC: case RTL_FW_PHY_UNION_MISC: if (patch_phy) rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block); break; case RTL_FW_PHY_FIXUP: if (patch_phy) rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block); break; case RTL_FW_PHY_SPEED_UP: rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut); break; default: break; } i += ALIGN(__le32_to_cpu(block->length), 8); } post_fw: if (rtl_fw->post_fw) rtl_fw->post_fw(tp); rtl_reset_ocp_base(tp); strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE); dev_dbg(&tp->intf->dev, "load %s successfully\n", rtl_fw->version); } static void rtl8152_release_firmware(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; if (!IS_ERR_OR_NULL(rtl_fw->fw)) { release_firmware(rtl_fw->fw); rtl_fw->fw = NULL; } } static int rtl8152_request_firmware(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; long rc; if (rtl_fw->fw || !rtl_fw->fw_name) { dev_info(&tp->intf->dev, "skip request firmware\n"); rc = 0; goto result; } rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, &tp->intf->dev); if (rc < 0) goto result; rc = rtl8152_check_firmware(tp, rtl_fw); if (rc < 0) release_firmware(rtl_fw->fw); result: if (rc) { rtl_fw->fw = ERR_PTR(rc); dev_warn(&tp->intf->dev, "unable to load firmware patch %s (%ld)\n", rtl_fw->fw_name, rc); } return rc; } static void r8152_aldps_en(struct r8152 *tp, bool enable) { if (enable) { ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | LINKENA | DIS_SDSAVE); } else { ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); msleep(20); } } static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) { ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); ocp_reg_write(tp, OCP_EEE_DATA, reg); ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); } static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) { u16 data; r8152_mmd_indirect(tp, dev, reg); data = ocp_reg_read(tp, OCP_EEE_DATA); ocp_reg_write(tp, OCP_EEE_AR, 0x0000); return data; } static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) { r8152_mmd_indirect(tp, dev, reg); ocp_reg_write(tp, OCP_EEE_DATA, data); ocp_reg_write(tp, OCP_EEE_AR, 0x0000); } static void r8152_eee_en(struct r8152 *tp, bool enable) { u16 config1, config2, config3; u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; if (enable) { ocp_data |= EEE_RX_EN | EEE_TX_EN; config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; config1 |= sd_rise_time(1); config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; config3 |= fast_snr(42); } else { ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN); config1 |= sd_rise_time(7); config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); config3 |= fast_snr(511); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); } static void r8153_eee_en(struct r8152 *tp, bool enable) { u32 ocp_data; u16 config; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); config = ocp_reg_read(tp, OCP_EEE_CFG); if (enable) { ocp_data |= EEE_RX_EN | EEE_TX_EN; config |= EEE10_EN; } else { ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); config &= ~EEE10_EN; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CFG, config); tp->ups_info.eee = enable; } static void r8156_eee_en(struct r8152 *tp, bool enable) { u16 config; r8153_eee_en(tp, enable); config = ocp_reg_read(tp, OCP_EEE_ADV2); if (enable) config |= MDIO_EEE_2_5GT; else config &= ~MDIO_EEE_2_5GT; ocp_reg_write(tp, OCP_EEE_ADV2, config); } static void rtl_eee_enable(struct r8152 *tp, bool enable) { switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: if (enable) { r8152_eee_en(tp, true); r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, tp->eee_adv); } else { r8152_eee_en(tp, false); r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); } break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: if (enable) { r8153_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); } else { r8153_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: if (enable) { r8156_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); } else { r8156_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; default: break; } } static void r8152b_enable_fc(struct r8152 *tp) { u16 anar; anar = r8152_mdio_read(tp, MII_ADVERTISE); anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; r8152_mdio_write(tp, MII_ADVERTISE, anar); tp->ups_info.flow_control = true; } static void rtl8152_disable(struct r8152 *tp) { r8152_aldps_en(tp, false); rtl_disable(tp); r8152_aldps_en(tp, true); } static void r8152b_hw_phy_cfg(struct r8152 *tp) { rtl8152_apply_firmware(tp, false); rtl_eee_enable(tp, tp->eee_en); r8152_aldps_en(tp, true); r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } static void wait_oob_link_list_ready(struct r8152 *tp) { u32 ocp_data; int i; for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; usleep_range(1000, 2000); } } static void r8156b_wait_loading_flash(struct r8152 *tp) { if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) && !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) { int i; for (i = 0; i < 100; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) break; usleep_range(1000, 2000); } } } static void r8152b_exit_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); if (tp->udev->speed == USB_SPEED_FULL || tp->udev->speed == USB_SPEED_LOW) { /* rx share fifo credit near full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_FULL); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_FULL); } else { /* rx share fifo credit near full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_HIGH); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_HIGH); } /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH); ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA, TEST_MODE_DISABLE | TX_SIZE_ADJUST1); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); } static void r8152b_enter_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); rtl_disable(tp); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } static int r8153_pre_firmware_1(struct r8152 *tp) { int i; /* Wait till the WTD timer is ready. It would take at most 104 ms. */ for (i = 0; i < 104; i++) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (!(ocp_data & WTD1_EN)) break; usleep_range(1000, 2000); } return 0; } static int r8153_post_firmware_1(struct r8152 *tp) { /* set USB_BP_4 to support USB_SPEED_SUPER only */ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, BP4_SUPER_ONLY); /* reset UPHY timer to 36 ms */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16); return 0; } static int r8153_pre_firmware_2(struct r8152 *tp) { u32 ocp_data; r8153_pre_firmware_1(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0); ocp_data &= ~FW_FIX_SUSPEND; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data); return 0; } static int r8153_post_firmware_2(struct r8152 *tp) { u32 ocp_data; /* enable bp0 if support USB_SPEED_SUPER only */ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); ocp_data |= BIT(0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); } /* reset UPHY timer to 36 ms */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16); /* enable U3P3 check, set the counter to 4 */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, U3P3_CHECK_EN | 4); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0); ocp_data |= FW_FIX_SUSPEND; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); return 0; } static int r8153_post_firmware_3(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); return 0; } static int r8153b_pre_firmware_1(struct r8152 *tp) { /* enable fc timer and set timer to 1 second. */ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, CTRL_TIMER_EN | (1000 / 8)); return 0; } static int r8153b_post_firmware_1(struct r8152 *tp) { u32 ocp_data; /* enable bp0 for RTL8153-BND */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); if (ocp_data & BND_MASK) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); ocp_data |= BIT(0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ocp_data |= FLOW_CTRL_PATCH_OPT; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); return 0; } static int r8153c_post_firmware_1(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ocp_data |= FLOW_CTRL_PATCH_2; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return 0; } static int r8156a_post_firmware_1(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); /* Modify U3PHY parameter for compatibility issue */ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e); ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9); return 0; } static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; data = ocp_reg_read(tp, OCP_POWER_CFG); if (enable) { data |= EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); } else { int i; data &= ~EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); for (i = 0; i < 20; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; usleep_range(1000, 2000); if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100) break; } } tp->ups_info.aldps = enable; } static void r8153_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); rtl8152_apply_firmware(tp, false); if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; ocp_reg_write(tp, OCP_EEE_CFG, data); } data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_10M_BGOFF; ocp_reg_write(tp, OCP_DOWN_SPEED, data); data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EN_10M_PLLOFF; ocp_reg_write(tp, OCP_POWER_CFG, data); sram_write(tp, SRAM_IMPEDANCE, 0x0b13); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); /* Enable LPF corner auto tune */ sram_write(tp, SRAM_LPF_CFG, 0xf70f); /* Adjust 10M Amplitude */ sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } set_bit(PHY_RESET, &tp->flags); } static u32 r8152_efuse_read(struct r8152 *tp, u8 addr) { u32 ocp_data; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD, EFUSE_READ_CMD | addr); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD); ocp_data = (ocp_data & EFUSE_DATA_BIT16) << 9; /* data of bit16 */ ocp_data |= ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_DATA); return ocp_data; } static void r8153b_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_PWRDN: case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = r8152_mdio_read(tp, MII_BMCR); data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); break; case PHY_STAT_LAN_ON: default: rtl8152_apply_firmware(tp, false); break; } r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = sram_read(tp, SRAM_GREEN_CFG); data |= R_TUNE_EN; sram_write(tp, SRAM_GREEN_CFG, data); data = ocp_reg_read(tp, OCP_NCTL_CFG); data |= PGA_RETURN_EN; ocp_reg_write(tp, OCP_NCTL_CFG, data); /* ADC Bias Calibration: * read efuse offset 0x7d to get a 17-bit data. Remove the dummy/fake * bit (bit3) to rebuild the real 16-bit data. Write the data to the * ADC ioffset. */ ocp_data = r8152_efuse_read(tp, 0x7d); data = (u16)(((ocp_data & 0x1fff0) >> 1) | (ocp_data & 0x7)); if (data != 0xffff) ocp_reg_write(tp, OCP_ADC_IOFFSET, data); /* ups mode tx-link-pulse timing adjustment: * rg_saw_cnt = OCP reg 0xC426 Bit[13:0] * swr_cnt_1ms_ini = 16000000 / rg_saw_cnt */ ocp_data = ocp_reg_read(tp, 0xc426); ocp_data &= 0x3fff; if (ocp_data) { u32 swr_cnt_1ms_ini; swr_cnt_1ms_ini = (16000000 / ocp_data) & SAW_CNT_1MS_MASK; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG); ocp_data = (ocp_data & ~SAW_CNT_1MS_MASK) | swr_cnt_1ms_ini; ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CFG, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); /* Advnace EEE */ if (!rtl_phy_patch_request(tp, true, true)) { data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = true; data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_EEE_CMODE | EN_EEE_1000 | EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info.eee_cmod_lv = true; tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_giga = true; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); tp->ups_info._250m_ckdiv = true; rtl_phy_patch_request(tp, false, true); } if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } static void r8153c_hw_phy_cfg(struct r8152 *tp) { r8153b_hw_phy_cfg(tp); tp->ups_info.r_tune = true; } static void rtl8153_change_mtu(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); } static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8153_change_mtu(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); } static void r8153_enter_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); /* RX FIFO settings for OOB */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); rtl_disable(tp); rtl_reset_bmu(tp); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~TEREDO_WAKE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); break; default: break; } rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } static void rtl8153_disable(struct r8152 *tp) { r8153_aldps_en(tp, false); rtl_disable(tp); rtl_reset_bmu(tp); r8153_aldps_en(tp, true); } static u32 fc_pause_on_auto(struct r8152 *tp) { return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); } static u32 fc_pause_off_auto(struct r8152 *tp) { return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); } static void r8156_fc_parameter(struct r8152 *tp) { u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); } static int rtl8156_enable(struct r8152 *tp) { u32 ocp_data; u16 speed; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; r8156_fc_parameter(tp); set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); speed = rtl8152_get_speed(tp); rtl_set_ifg(tp, speed); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); if (speed & _2500bps) ocp_data &= ~IDLE_SPDWN_EN; else ocp_data |= IDLE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); if (speed & _1000bps) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11); else if (speed & _500bps) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d); if (tp->udev->speed == USB_SPEED_HIGH) { /* USB 0xb45e[3:0] l1_nyet_hird */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ocp_data &= ~0xf; if (is_flow_control(speed)) ocp_data |= 0xf; else ocp_data |= 0x1; ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return rtl_enable(tp); } static void rtl8156_disable(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0); rtl8153_disable(tp); } static int rtl8156b_enable(struct r8152 *tp) { u32 ocp_data; u16 speed; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM); ocp_data &= ~RX_AGGR_NUM_MASK; ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); speed = rtl8152_get_speed(tp); rtl_set_ifg(tp, speed); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); if (speed & _2500bps) ocp_data &= ~IDLE_SPDWN_EN; else ocp_data |= IDLE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); if (tp->udev->speed == USB_SPEED_HIGH) { ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ocp_data &= ~0xf; if (is_flow_control(speed)) ocp_data |= 0xf; else ocp_data |= 0x1; ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return rtl_enable(tp); } static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising) { u16 bmcr; int ret = 0; if (autoneg == AUTONEG_DISABLE) { if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) return -EINVAL; switch (speed) { case SPEED_10: bmcr = BMCR_SPEED10; if (duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; tp->ups_info.speed_duplex = FORCE_10M_FULL; } else { tp->ups_info.speed_duplex = FORCE_10M_HALF; } break; case SPEED_100: bmcr = BMCR_SPEED100; if (duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; tp->ups_info.speed_duplex = FORCE_100M_FULL; } else { tp->ups_info.speed_duplex = FORCE_100M_HALF; } break; case SPEED_1000: if (tp->mii.supports_gmii) { bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX; tp->ups_info.speed_duplex = NWAY_1000M_FULL; break; } fallthrough; default: ret = -EINVAL; goto out; } if (duplex == DUPLEX_FULL) tp->mii.full_duplex = 1; else tp->mii.full_duplex = 0; tp->mii.force_media = 1; } else { u16 orig, new1; u32 support; support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { support |= RTL_ADVERTISED_1000_FULL; if (tp->support_2500full) support |= RTL_ADVERTISED_2500_FULL; } if (!(advertising & support)) return -EINVAL; orig = r8152_mdio_read(tp, MII_ADVERTISE); new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); if (advertising & RTL_ADVERTISED_10_HALF) { new1 |= ADVERTISE_10HALF; tp->ups_info.speed_duplex = NWAY_10M_HALF; } if (advertising & RTL_ADVERTISED_10_FULL) { new1 |= ADVERTISE_10FULL; tp->ups_info.speed_duplex = NWAY_10M_FULL; } if (advertising & RTL_ADVERTISED_100_HALF) { new1 |= ADVERTISE_100HALF; tp->ups_info.speed_duplex = NWAY_100M_HALF; } if (advertising & RTL_ADVERTISED_100_FULL) { new1 |= ADVERTISE_100FULL; tp->ups_info.speed_duplex = NWAY_100M_FULL; } if (orig != new1) { r8152_mdio_write(tp, MII_ADVERTISE, new1); tp->mii.advertising = new1; } if (tp->mii.supports_gmii) { orig = r8152_mdio_read(tp, MII_CTRL1000); new1 = orig & ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertising & RTL_ADVERTISED_1000_FULL) { new1 |= ADVERTISE_1000FULL; tp->ups_info.speed_duplex = NWAY_1000M_FULL; } if (orig != new1) r8152_mdio_write(tp, MII_CTRL1000, new1); } if (tp->support_2500full) { orig = ocp_reg_read(tp, OCP_10GBT_CTRL); new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G; if (advertising & RTL_ADVERTISED_2500_FULL) { new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G; tp->ups_info.speed_duplex = NWAY_2500M_FULL; } if (orig != new1) ocp_reg_write(tp, OCP_10GBT_CTRL, new1); } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; tp->mii.force_media = 0; } if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; r8152_mdio_write(tp, MII_BMCR, bmcr); if (bmcr & BMCR_RESET) { int i; for (i = 0; i < 50; i++) { msleep(20); if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) break; } } out: return ret; } static void rtl8152_up(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8152_aldps_en(tp, false); r8152b_exit_oob(tp); r8152_aldps_en(tp, true); } static void rtl8152_down(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } r8152_power_cut_en(tp, false); r8152_aldps_en(tp, false); r8152b_enter_oob(tp); r8152_aldps_en(tp, true); } static void rtl8153_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data |= LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); ocp_data &= ~LANWAKE_PIN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); ocp_data &= ~DELAY_PHY_PWR_CHG; ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); r8153_aldps_en(tp, true); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } r8153_u1u2en(tp, true); } static void rtl8153_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data &= ~LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); r8153_aldps_en(tp, false); r8153_enter_oob(tp); r8153_aldps_en(tp, true); } static void rtl8153b_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153_aldps_en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data |= PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); r8153_aldps_en(tp, false); r8153_enter_oob(tp); r8153_aldps_en(tp, true); } static void rtl8153c_change_mtu(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); /* Adjust the tx fifo free credit full threshold, otherwise * the fifo would be too small to send a jumbo frame packet. */ if (tp->netdev->mtu < 8000) ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8); else ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8); } static void rtl8153c_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8153c_change_mtu(tp); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= BIT(8); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153_aldps_en(tp, true); r8153b_u1u2en(tp, true); } static void rtl8156_change_mtu(struct r8152 *tp) { u32 rx_max_size = mtu_to_size(tp->netdev->mtu); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); r8156_fc_parameter(tp); /* TX share fifo free credit full threshold */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16); } static void rtl8156_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8156_change_mtu(tp); switch (tp->version) { case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ocp_data |= ACT_ODMA; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); break; default: break; } /* share FIFO settings */ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL); ocp_data &= ~RXFIFO_FULL_MASK; ocp_data |= 0x08; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION); ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); if (tp->saved_wolopts != __rtl_get_wol(tp)) { netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); __rtl_set_wol(tp, tp->saved_wolopts); } r8153_aldps_en(tp, true); r8153_u2p3en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } static void rtl8156_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data |= PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); r8153_aldps_en(tp, false); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); /* RX FIFO settings for OOB */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16); rtl_disable(tp); rtl_reset_bmu(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rtl_rx_vlan_en(tp, true); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); r8153_aldps_en(tp, true); } static bool rtl8152_in_nway(struct r8152 *tp) { u16 nway_state; ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000); tp->ocp_base = 0x2000; ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */ nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a); /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */ if (nway_state & 0xc000) return false; else return true; } static bool rtl8153_in_nway(struct r8152 *tp) { u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff; if (phy_state == TXDIS_STATE || phy_state == ABD_STATE) return false; else return true; } static void r8156_mdio_force_mode(struct r8152 *tp) { u16 data; /* Select force mode through 0xa5b4 bit 15 * 0: MDIO force mode * 1: MMD force mode */ data = ocp_reg_read(tp, 0xa5b4); if (data & BIT(15)) { data &= ~BIT(15); ocp_reg_write(tp, 0xa5b4, data); } } static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct napi_struct *napi = &tp->napi; u16 speed; speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); netif_stop_queue(netdev); napi_disable(napi); netif_carrier_on(netdev); rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); napi_enable(napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); } else if (netif_queue_stopped(netdev) && skb_queue_len(&tp->tx_queue) < tp->tx_qlen) { netif_wake_queue(netdev); } } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); tasklet_disable(&tp->tx_tl); napi_disable(napi); tp->rtl_ops.disable(tp); napi_enable(napi); tasklet_enable(&tp->tx_tl); netif_info(tp, link, netdev, "carrier off\n"); } } } static void rtl_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, schedule.work); /* If the device is unplugged or !netif_running(), the workqueue * doesn't need to wake the device, and could return directly. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev)) return; if (usb_autopm_get_interface(tp->intf) < 0) return; if (!test_bit(WORK_ENABLE, &tp->flags)) goto out1; if (!mutex_trylock(&tp->control)) { schedule_delayed_work(&tp->schedule, 0); goto out1; } if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) set_carrier(tp); if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); /* don't schedule tasket before linking */ if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && netif_carrier_ok(tp->netdev)) tasklet_schedule(&tp->tx_tl); if (test_and_clear_bit(RX_EPROTO, &tp->flags) && !list_empty(&tp->rx_done)) napi_schedule(&tp->napi); mutex_unlock(&tp->control); out1: usb_autopm_put_interface(tp->intf); } static void rtl_hw_phy_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (usb_autopm_get_interface(tp->intf) < 0) return; mutex_lock(&tp->control); if (rtl8152_request_firmware(tp) == -ENODEV && tp->rtl_fw.retry) { tp->rtl_fw.retry = false; tp->rtl_fw.fw = NULL; /* Delay execution in case request_firmware() is not ready yet. */ queue_delayed_work(system_long_wq, &tp->hw_phy_work, HZ * 10); goto ignore_once; } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); ignore_once: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); } #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct r8152 *tp = container_of(nb, struct r8152, pm_notifier); switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: usb_autopm_get_interface(tp->intf); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: usb_autopm_put_interface(tp->intf); break; case PM_POST_RESTORE: case PM_RESTORE_PREPARE: default: break; } return NOTIFY_DONE; } #endif static int rtl8152_open(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) { cancel_delayed_work_sync(&tp->hw_phy_work); rtl_hw_phy_work_func_t(&tp->hw_phy_work.work); } res = alloc_all_mem(tp); if (res) goto out; res = usb_autopm_get_interface(tp->intf); if (res < 0) goto out_free; mutex_lock(&tp->control); tp->rtl_ops.up(tp); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (res) { if (res == -ENODEV) netif_device_detach(tp->netdev); netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", res); goto out_unlock; } napi_enable(&tp->napi); tasklet_enable(&tp->tx_tl); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); #ifdef CONFIG_PM_SLEEP tp->pm_notifier.notifier_call = rtl_notifier; register_pm_notifier(&tp->pm_notifier); #endif return 0; out_unlock: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out_free: free_all_mem(tp); out: return res; } static int rtl8152_close(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif tasklet_disable(&tp->tx_tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); napi_disable(&tp->napi); netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); rtl_stop_rx(tp); } else { mutex_lock(&tp->control); tp->rtl_ops.down(tp); mutex_unlock(&tp->control); } if (!res) usb_autopm_put_interface(tp->intf); free_all_mem(tp); return res; } static void rtl_tally_reset(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY); ocp_data |= TALLY_RESET; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data); } static void r8152b_init(struct r8152 *tp) { u32 ocp_data; u16 data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } r8152_aldps_en(tp, false); if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); } r8152_power_cut_en(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL); ocp_data &= ~MCU_CLK_RATIO_MASK; ocp_data |= MCU_CLK_RATIO | D3_CLK_GATED_EN; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ocp_data); ocp_data = GPHY_STS_MSK | SPEED_DOWN_MSK | SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); rtl_tally_reset(tp); /* enable rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } static void r8153_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } data = r8153_phy_status(tp, 0); if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || tp->version == RTL_VER_05) ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); if (tp->version == RTL_VER_04) { ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2); ocp_data &= ~pwd_dn_scale_mask; ocp_data |= pwd_dn_scale(96); ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); } else if (tp->version == RTL_VER_05) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0); ocp_data &= ~ECM_ALDPS; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); } else if (tp->version == RTL_VER_06) { ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); r8153_queue_wake(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); ocp_data |= EP4_FULL_FC; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL); ocp_data &= ~TIMER11_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM; if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER) ocp_data |= LPM_TIMER_500MS; else ocp_data |= LPM_TIMER_500US; ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2); ocp_data &= ~SEN_VAL_MASK; ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE; ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_mac_clk_speed_down(tp, false); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data |= LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); ocp_data &= ~LANWAKE_PIN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); if (tp->dell_tb_rx_agg_bug) ocp_data |= RX_AGG_DISABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); switch (tp->udev->speed) { case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: tp->coalesce = COALESCE_SUPER; break; case USB_SPEED_HIGH: tp->coalesce = COALESCE_HIGH; break; default: tp->coalesce = COALESCE_SLOW; break; } } static void r8153b_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } data = r8153_phy_status(tp, 0); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); r8153b_power_cut_en(tp, false); r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); if (tp->version == RTL_VER_09) { /* Disable Test IO for 32QFN */ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TEST_IO_OFF; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); } } set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8153c_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); /* Disable spi_en */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ocp_data &= ~BIT(3); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0); ocp_data |= BIT(1); ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); r8153b_power_cut_en(tp, false); r8153c_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~BIT(7); ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8156_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); break; case PHY_STAT_LAN_ON: case PHY_STAT_PWRDN: default: rtl8152_apply_firmware(tp, false); break; } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); switch (tp->version) { case RTL_VER_10: data = ocp_reg_read(tp, 0xad40); data &= ~0x3ff; data |= BIT(7) | BIT(2); ocp_reg_write(tp, 0xad40, data); data = ocp_reg_read(tp, 0xad4e); data |= BIT(4); ocp_reg_write(tp, 0xad4e, data); data = ocp_reg_read(tp, 0xad16); data &= ~0x3ff; data |= 0x6; ocp_reg_write(tp, 0xad16, data); data = ocp_reg_read(tp, 0xad32); data &= ~0x3f; data |= 6; ocp_reg_write(tp, 0xad32, data); data = ocp_reg_read(tp, 0xac08); data &= ~(BIT(12) | BIT(8)); ocp_reg_write(tp, 0xac08, data); data = ocp_reg_read(tp, 0xac8a); data |= BIT(12) | BIT(13) | BIT(14); data &= ~BIT(15); ocp_reg_write(tp, 0xac8a, data); data = ocp_reg_read(tp, 0xad18); data |= BIT(10); ocp_reg_write(tp, 0xad18, data); data = ocp_reg_read(tp, 0xad1a); data |= 0x3ff; ocp_reg_write(tp, 0xad1a, data); data = ocp_reg_read(tp, 0xad1c); data |= 0x3ff; ocp_reg_write(tp, 0xad1c, data); data = sram_read(tp, 0x80ea); data &= ~0xff00; data |= 0xc400; sram_write(tp, 0x80ea, data); data = sram_read(tp, 0x80eb); data &= ~0x0700; data |= 0x0300; sram_write(tp, 0x80eb, data); data = sram_read(tp, 0x80f8); data &= ~0xff00; data |= 0x1c00; sram_write(tp, 0x80f8, data); data = sram_read(tp, 0x80f1); data &= ~0xff00; data |= 0x3000; sram_write(tp, 0x80f1, data); data = sram_read(tp, 0x80fe); data &= ~0xff00; data |= 0xa500; sram_write(tp, 0x80fe, data); data = sram_read(tp, 0x8102); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x8102, data); data = sram_read(tp, 0x8015); data &= ~0xff00; data |= 0x3300; sram_write(tp, 0x8015, data); data = sram_read(tp, 0x8100); data &= ~0xff00; data |= 0x7000; sram_write(tp, 0x8100, data); data = sram_read(tp, 0x8014); data &= ~0xff00; data |= 0xf000; sram_write(tp, 0x8014, data); data = sram_read(tp, 0x8016); data &= ~0xff00; data |= 0x6500; sram_write(tp, 0x8016, data); data = sram_read(tp, 0x80dc); data &= ~0xff00; data |= 0xed00; sram_write(tp, 0x80dc, data); data = sram_read(tp, 0x80df); data |= BIT(8); sram_write(tp, 0x80df, data); data = sram_read(tp, 0x80e1); data &= ~BIT(8); sram_write(tp, 0x80e1, data); data = ocp_reg_read(tp, 0xbf06); data &= ~0x003f; data |= 0x0038; ocp_reg_write(tp, 0xbf06, data); sram_write(tp, 0x819f, 0xddb6); ocp_reg_write(tp, 0xbc34, 0x5555); data = ocp_reg_read(tp, 0xbf0a); data &= ~0x0e00; data |= 0x0a00; ocp_reg_write(tp, 0xbf0a, data); data = ocp_reg_read(tp, 0xbd2c); data &= ~BIT(13); ocp_reg_write(tp, 0xbd2c, data); break; case RTL_VER_11: data = ocp_reg_read(tp, 0xad16); data |= 0x3ff; ocp_reg_write(tp, 0xad16, data); data = ocp_reg_read(tp, 0xad32); data &= ~0x3f; data |= 6; ocp_reg_write(tp, 0xad32, data); data = ocp_reg_read(tp, 0xac08); data &= ~(BIT(12) | BIT(8)); ocp_reg_write(tp, 0xac08, data); data = ocp_reg_read(tp, 0xacc0); data &= ~0x3; data |= BIT(1); ocp_reg_write(tp, 0xacc0, data); data = ocp_reg_read(tp, 0xad40); data &= ~0xe7; data |= BIT(6) | BIT(2); ocp_reg_write(tp, 0xad40, data); data = ocp_reg_read(tp, 0xac14); data &= ~BIT(7); ocp_reg_write(tp, 0xac14, data); data = ocp_reg_read(tp, 0xac80); data &= ~(BIT(8) | BIT(9)); ocp_reg_write(tp, 0xac80, data); data = ocp_reg_read(tp, 0xac5e); data &= ~0x7; data |= BIT(1); ocp_reg_write(tp, 0xac5e, data); ocp_reg_write(tp, 0xad4c, 0x00a8); ocp_reg_write(tp, 0xac5c, 0x01ff); data = ocp_reg_read(tp, 0xac8a); data &= ~0xf0; data |= BIT(4) | BIT(5); ocp_reg_write(tp, 0xac8a, data); ocp_reg_write(tp, 0xb87c, 0x8157); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0500; ocp_reg_write(tp, 0xb87e, data); ocp_reg_write(tp, 0xb87c, 0x8159); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0700; ocp_reg_write(tp, 0xb87e, data); /* AAGC */ ocp_reg_write(tp, 0xb87c, 0x80a2); ocp_reg_write(tp, 0xb87e, 0x0153); ocp_reg_write(tp, 0xb87c, 0x809c); ocp_reg_write(tp, 0xb87e, 0x0153); /* EEE parameter */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG); ocp_data |= EN_XG_LIP | EN_G_LIP; ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); sram_write(tp, 0x8257, 0x020f); /* XG PLL */ sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */ if (rtl_phy_patch_request(tp, true, true)) return; /* Advance EEE */ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= EEE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data &= ~(EN_EEE_100 | EN_EEE_1000); data |= EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_100 = false; tp->ups_info.eee_plloff_giga = false; data = ocp_reg_read(tp, OCP_POWER_CFG); data &= ~EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = false; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5)); tp->ups_info._250m_ckdiv = false; rtl_phy_patch_request(tp, false, true); /* enable ADC Ibias Cal */ data = ocp_reg_read(tp, 0xd068); data |= BIT(13); ocp_reg_write(tp, 0xd068, data); /* enable Thermal Sensor */ data = sram_read(tp, 0x81a2); data &= ~BIT(8); sram_write(tp, 0x81a2, data); data = ocp_reg_read(tp, 0xb54c); data &= ~0xff00; data |= 0xdb00; ocp_reg_write(tp, 0xb54c, data); /* Nway 2.5G Lite */ data = ocp_reg_read(tp, 0xa454); data &= ~BIT(0); ocp_reg_write(tp, 0xa454, data); /* CS DSP solution */ data = ocp_reg_read(tp, OCP_10GBT_CTRL); data |= RTL_ADV2_5G_F_R; ocp_reg_write(tp, OCP_10GBT_CTRL, data); data = ocp_reg_read(tp, 0xad4e); data &= ~BIT(4); ocp_reg_write(tp, 0xad4e, data); data = ocp_reg_read(tp, 0xa86a); data &= ~BIT(0); ocp_reg_write(tp, 0xa86a, data); /* MDI SWAP */ if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) && (ocp_reg_read(tp, 0xd068) & BIT(1))) { u16 swap_a, swap_b; data = ocp_reg_read(tp, 0xd068); data &= ~0x1f; data |= 0x1; /* p0 */ ocp_reg_write(tp, 0xd068, data); swap_a = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x18; /* p3 */ ocp_reg_write(tp, 0xd068, data); swap_b = ocp_reg_read(tp, 0xd06a); data &= ~0x18; /* p0 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_a & ~0x7ff) | (swap_b & 0x7ff)); data |= 0x18; /* p3 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_b & ~0x7ff) | (swap_a & 0x7ff)); data &= ~0x18; data |= 0x08; /* p1 */ ocp_reg_write(tp, 0xd068, data); swap_a = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x10; /* p2 */ ocp_reg_write(tp, 0xd068, data); swap_b = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x08; /* p1 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_a & ~0x7ff) | (swap_b & 0x7ff)); data &= ~0x18; data |= 0x10; /* p2 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_b & ~0x7ff) | (swap_a & 0x7ff)); swap_a = ocp_reg_read(tp, 0xbd5a); swap_b = ocp_reg_read(tp, 0xbd5c); ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) | ((swap_b & 0x1f) << 8) | ((swap_b >> 8) & 0x1f)); ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) | ((swap_a & 0x1f) << 8) | ((swap_a >> 8) & 0x1f)); swap_a = ocp_reg_read(tp, 0xbc18); swap_b = ocp_reg_read(tp, 0xbc1a); ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) | ((swap_b & 0x1f) << 8) | ((swap_b >> 8) & 0x1f)); ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) | ((swap_a & 0x1f) << 8) | ((swap_a >> 8) & 0x1f)); } /* Notify the MAC when the speed is changed to force mode. */ data = ocp_reg_read(tp, OCP_INTR_EN); data |= INTR_SPEED_FORCE; ocp_reg_write(tp, OCP_INTR_EN, data); break; default: break; } rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = ocp_reg_read(tp, 0xa428); data &= ~BIT(9); ocp_reg_write(tp, 0xa428, data); data = ocp_reg_read(tp, 0xa5ea); data &= ~BIT(0); ocp_reg_write(tp, 0xa5ea, data); tp->ups_info.lite_mode = 0; if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } static void r8156b_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; switch (tp->version) { case RTL_VER_12: ocp_reg_write(tp, 0xbf86, 0x9000); data = ocp_reg_read(tp, 0xc402); data |= BIT(10); ocp_reg_write(tp, 0xc402, data); data &= ~BIT(10); ocp_reg_write(tp, 0xc402, data); ocp_reg_write(tp, 0xbd86, 0x1010); ocp_reg_write(tp, 0xbd88, 0x1010); data = ocp_reg_read(tp, 0xbd4e); data &= ~(BIT(10) | BIT(11)); data |= BIT(11); ocp_reg_write(tp, 0xbd4e, data); data = ocp_reg_read(tp, 0xbf46); data &= ~0xf00; data |= 0x700; ocp_reg_write(tp, 0xbf46, data); break; case RTL_VER_13: case RTL_VER_15: r8156b_wait_loading_flash(tp); break; default: break; } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = ocp_reg_read(tp, 0xa466); data &= ~BIT(0); ocp_reg_write(tp, 0xa466, data); data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); break; case PHY_STAT_LAN_ON: case PHY_STAT_PWRDN: default: rtl8152_apply_firmware(tp, false); break; } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); switch (tp->version) { case RTL_VER_12: data = ocp_reg_read(tp, 0xbc08); data |= BIT(3) | BIT(2); ocp_reg_write(tp, 0xbc08, data); data = sram_read(tp, 0x8fff); data &= ~0xff00; data |= 0x0400; sram_write(tp, 0x8fff, data); data = ocp_reg_read(tp, 0xacda); data |= 0xff00; ocp_reg_write(tp, 0xacda, data); data = ocp_reg_read(tp, 0xacde); data |= 0xf000; ocp_reg_write(tp, 0xacde, data); ocp_reg_write(tp, 0xac8c, 0x0ffc); ocp_reg_write(tp, 0xac46, 0xb7b4); ocp_reg_write(tp, 0xac50, 0x0fbc); ocp_reg_write(tp, 0xac3c, 0x9240); ocp_reg_write(tp, 0xac4e, 0x0db4); ocp_reg_write(tp, 0xacc6, 0x0707); ocp_reg_write(tp, 0xacc8, 0xa0d3); ocp_reg_write(tp, 0xad08, 0x0007); ocp_reg_write(tp, 0xb87c, 0x8560); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8562); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8564); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8566); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x8568); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x856a); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x8ffe); ocp_reg_write(tp, 0xb87e, 0x0907); ocp_reg_write(tp, 0xb87c, 0x80d6); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x80f2); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x80f4); ocp_reg_write(tp, 0xb87e, 0x6077); ocp_reg_write(tp, 0xb506, 0x01e7); ocp_reg_write(tp, 0xb87c, 0x8013); ocp_reg_write(tp, 0xb87e, 0x0700); ocp_reg_write(tp, 0xb87c, 0x8fb9); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x8fba); ocp_reg_write(tp, 0xb87e, 0x0100); ocp_reg_write(tp, 0xb87c, 0x8fbc); ocp_reg_write(tp, 0xb87e, 0x1900); ocp_reg_write(tp, 0xb87c, 0x8fbe); ocp_reg_write(tp, 0xb87e, 0xe100); ocp_reg_write(tp, 0xb87c, 0x8fc0); ocp_reg_write(tp, 0xb87e, 0x0800); ocp_reg_write(tp, 0xb87c, 0x8fc2); ocp_reg_write(tp, 0xb87e, 0xe500); ocp_reg_write(tp, 0xb87c, 0x8fc4); ocp_reg_write(tp, 0xb87e, 0x0f00); ocp_reg_write(tp, 0xb87c, 0x8fc6); ocp_reg_write(tp, 0xb87e, 0xf100); ocp_reg_write(tp, 0xb87c, 0x8fc8); ocp_reg_write(tp, 0xb87e, 0x0400); ocp_reg_write(tp, 0xb87c, 0x8fca); ocp_reg_write(tp, 0xb87e, 0xf300); ocp_reg_write(tp, 0xb87c, 0x8fcc); ocp_reg_write(tp, 0xb87e, 0xfd00); ocp_reg_write(tp, 0xb87c, 0x8fce); ocp_reg_write(tp, 0xb87e, 0xff00); ocp_reg_write(tp, 0xb87c, 0x8fd0); ocp_reg_write(tp, 0xb87e, 0xfb00); ocp_reg_write(tp, 0xb87c, 0x8fd2); ocp_reg_write(tp, 0xb87e, 0x0100); ocp_reg_write(tp, 0xb87c, 0x8fd4); ocp_reg_write(tp, 0xb87e, 0xf400); ocp_reg_write(tp, 0xb87c, 0x8fd6); ocp_reg_write(tp, 0xb87e, 0xff00); ocp_reg_write(tp, 0xb87c, 0x8fd8); ocp_reg_write(tp, 0xb87e, 0xf600); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG); ocp_data |= EN_XG_LIP | EN_G_LIP; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); ocp_reg_write(tp, 0xb87c, 0x813d); ocp_reg_write(tp, 0xb87e, 0x390e); ocp_reg_write(tp, 0xb87c, 0x814f); ocp_reg_write(tp, 0xb87e, 0x790e); ocp_reg_write(tp, 0xb87c, 0x80b0); ocp_reg_write(tp, 0xb87e, 0x0f31); data = ocp_reg_read(tp, 0xbf4c); data |= BIT(1); ocp_reg_write(tp, 0xbf4c, data); data = ocp_reg_read(tp, 0xbcca); data |= BIT(9) | BIT(8); ocp_reg_write(tp, 0xbcca, data); ocp_reg_write(tp, 0xb87c, 0x8141); ocp_reg_write(tp, 0xb87e, 0x320e); ocp_reg_write(tp, 0xb87c, 0x8153); ocp_reg_write(tp, 0xb87e, 0x720e); ocp_reg_write(tp, 0xb87c, 0x8529); ocp_reg_write(tp, 0xb87e, 0x050e); data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; ocp_reg_write(tp, OCP_EEE_CFG, data); sram_write(tp, 0x816c, 0xc4a0); sram_write(tp, 0x8170, 0xc4a0); sram_write(tp, 0x8174, 0x04a0); sram_write(tp, 0x8178, 0x04a0); sram_write(tp, 0x817c, 0x0719); sram_write(tp, 0x8ff4, 0x0400); sram_write(tp, 0x8ff1, 0x0404); ocp_reg_write(tp, 0xbf4a, 0x001b); ocp_reg_write(tp, 0xb87c, 0x8033); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8037); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x803b); ocp_reg_write(tp, 0xb87e, 0xfc32); ocp_reg_write(tp, 0xb87c, 0x803f); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8043); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8047); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8145); ocp_reg_write(tp, 0xb87e, 0x370e); ocp_reg_write(tp, 0xb87c, 0x8157); ocp_reg_write(tp, 0xb87e, 0x770e); ocp_reg_write(tp, 0xb87c, 0x8169); ocp_reg_write(tp, 0xb87e, 0x0d0a); ocp_reg_write(tp, 0xb87c, 0x817b); ocp_reg_write(tp, 0xb87e, 0x1d0a); data = sram_read(tp, 0x8217); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x8217, data); data = sram_read(tp, 0x821a); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x821a, data); sram_write(tp, 0x80da, 0x0403); data = sram_read(tp, 0x80dc); data &= ~0xff00; data |= 0x1000; sram_write(tp, 0x80dc, data); sram_write(tp, 0x80b3, 0x0384); sram_write(tp, 0x80b7, 0x2007); data = sram_read(tp, 0x80ba); data &= ~0xff00; data |= 0x6c00; sram_write(tp, 0x80ba, data); sram_write(tp, 0x80b5, 0xf009); data = sram_read(tp, 0x80bd); data &= ~0xff00; data |= 0x9f00; sram_write(tp, 0x80bd, data); sram_write(tp, 0x80c7, 0xf083); sram_write(tp, 0x80dd, 0x03f0); data = sram_read(tp, 0x80df); data &= ~0xff00; data |= 0x1000; sram_write(tp, 0x80df, data); sram_write(tp, 0x80cb, 0x2007); data = sram_read(tp, 0x80ce); data &= ~0xff00; data |= 0x6c00; sram_write(tp, 0x80ce, data); sram_write(tp, 0x80c9, 0x8009); data = sram_read(tp, 0x80d1); data &= ~0xff00; data |= 0x8000; sram_write(tp, 0x80d1, data); sram_write(tp, 0x80a3, 0x200a); sram_write(tp, 0x80a5, 0xf0ad); sram_write(tp, 0x809f, 0x6073); sram_write(tp, 0x80a1, 0x000b); data = sram_read(tp, 0x80a9); data &= ~0xff00; data |= 0xc000; sram_write(tp, 0x80a9, data); if (rtl_phy_patch_request(tp, true, true)) return; data = ocp_reg_read(tp, 0xb896); data &= ~BIT(0); ocp_reg_write(tp, 0xb896, data); data = ocp_reg_read(tp, 0xb892); data &= ~0xff00; ocp_reg_write(tp, 0xb892, data); ocp_reg_write(tp, 0xb88e, 0xc23e); ocp_reg_write(tp, 0xb890, 0x0000); ocp_reg_write(tp, 0xb88e, 0xc240); ocp_reg_write(tp, 0xb890, 0x0103); ocp_reg_write(tp, 0xb88e, 0xc242); ocp_reg_write(tp, 0xb890, 0x0507); ocp_reg_write(tp, 0xb88e, 0xc244); ocp_reg_write(tp, 0xb890, 0x090b); ocp_reg_write(tp, 0xb88e, 0xc246); ocp_reg_write(tp, 0xb890, 0x0c0e); ocp_reg_write(tp, 0xb88e, 0xc248); ocp_reg_write(tp, 0xb890, 0x1012); ocp_reg_write(tp, 0xb88e, 0xc24a); ocp_reg_write(tp, 0xb890, 0x1416); data = ocp_reg_read(tp, 0xb896); data |= BIT(0); ocp_reg_write(tp, 0xb896, data); rtl_phy_patch_request(tp, false, true); data = ocp_reg_read(tp, 0xa86a); data |= BIT(0); ocp_reg_write(tp, 0xa86a, data); data = ocp_reg_read(tp, 0xa6f0); data |= BIT(0); ocp_reg_write(tp, 0xa6f0, data); ocp_reg_write(tp, 0xbfa0, 0xd70d); ocp_reg_write(tp, 0xbfa2, 0x4100); ocp_reg_write(tp, 0xbfa4, 0xe868); ocp_reg_write(tp, 0xbfa6, 0xdc59); ocp_reg_write(tp, 0xb54c, 0x3c18); data = ocp_reg_read(tp, 0xbfa4); data &= ~BIT(5); ocp_reg_write(tp, 0xbfa4, data); data = sram_read(tp, 0x817d); data |= BIT(12); sram_write(tp, 0x817d, data); break; case RTL_VER_13: /* 2.5G INRX */ data = ocp_reg_read(tp, 0xac46); data &= ~0x00f0; data |= 0x0090; ocp_reg_write(tp, 0xac46, data); data = ocp_reg_read(tp, 0xad30); data &= ~0x0003; data |= 0x0001; ocp_reg_write(tp, 0xad30, data); fallthrough; case RTL_VER_15: /* EEE parameter */ ocp_reg_write(tp, 0xb87c, 0x80f5); ocp_reg_write(tp, 0xb87e, 0x760e); ocp_reg_write(tp, 0xb87c, 0x8107); ocp_reg_write(tp, 0xb87e, 0x360e); ocp_reg_write(tp, 0xb87c, 0x8551); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0800; ocp_reg_write(tp, 0xb87e, data); /* ADC_PGA parameter */ data = ocp_reg_read(tp, 0xbf00); data &= ~0xe000; data |= 0xa000; ocp_reg_write(tp, 0xbf00, data); data = ocp_reg_read(tp, 0xbf46); data &= ~0x0f00; data |= 0x0300; ocp_reg_write(tp, 0xbf46, data); /* Green Table-PGA, 1G full viterbi */ sram_write(tp, 0x8044, 0x2417); sram_write(tp, 0x804a, 0x2417); sram_write(tp, 0x8050, 0x2417); sram_write(tp, 0x8056, 0x2417); sram_write(tp, 0x805c, 0x2417); sram_write(tp, 0x8062, 0x2417); sram_write(tp, 0x8068, 0x2417); sram_write(tp, 0x806e, 0x2417); sram_write(tp, 0x8074, 0x2417); sram_write(tp, 0x807a, 0x2417); /* XG PLL */ data = ocp_reg_read(tp, 0xbf84); data &= ~0xe000; data |= 0xa000; ocp_reg_write(tp, 0xbf84, data); break; default: break; } /* Notify the MAC when the speed is changed to force mode. */ data = ocp_reg_read(tp, OCP_INTR_EN); data |= INTR_SPEED_FORCE; ocp_reg_write(tp, OCP_INTR_EN, data); if (rtl_phy_patch_request(tp, true, true)) return; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= EEE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data &= ~(EN_EEE_100 | EN_EEE_1000); data |= EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_100 = false; tp->ups_info.eee_plloff_giga = false; data = ocp_reg_read(tp, OCP_POWER_CFG); data &= ~EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = false; rtl_phy_patch_request(tp, false, true); rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = ocp_reg_read(tp, 0xa428); data &= ~BIT(9); ocp_reg_write(tp, 0xa428, data); data = ocp_reg_read(tp, 0xa5ea); data &= ~BIT(0); ocp_reg_write(tp, 0xa5ea, data); tp->ups_info.lite_mode = 0; if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } static void r8156_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ocp_data &= ~EN_ALL_SPEED; ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ocp_data |= BYPASS_MAC_RESET; ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); r8153b_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); if (data == PHY_STAT_EXT_INIT) { data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); r8156_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); r8156_mac_clk_spd(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ocp_data |= ACT_ODMA; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8156b_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ocp_data &= ~EN_ALL_SPEED; ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ocp_data |= BYPASS_MAC_RESET; ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); ocp_data |= RX_DETECT8; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); r8153b_u1u2en(tp, false); switch (tp->version) { case RTL_VER_13: case RTL_VER_15: r8156b_wait_loading_flash(tp); break; default: break; } for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); if (data == PHY_STAT_EXT_INIT) { data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); data = ocp_reg_read(tp, 0xa466); data &= ~BIT(0); ocp_reg_write(tp, 0xa466, data); } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); r8156_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~SLOT_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); ocp_data |= FLOW_CTRL_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); /* enable fc timer and set timer to 600 ms. */ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, CTRL_TIMER_EN | (600 / 8)); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN)) ocp_data |= FLOW_CTRL_PATCH_2; ocp_data &= ~AUTO_SPEEDUP; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); r8156_mac_clk_spd(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static bool rtl_check_vendor_ok(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; struct usb_endpoint_descriptor *in, *out, *intr; if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) { dev_err(&intf->dev, "Expected endpoints are not found\n"); return false; } /* Check Rx endpoint address */ if (usb_endpoint_num(in) != 1) { dev_err(&intf->dev, "Invalid Rx endpoint address\n"); return false; } /* Check Tx endpoint address */ if (usb_endpoint_num(out) != 2) { dev_err(&intf->dev, "Invalid Tx endpoint address\n"); return false; } /* Check interrupt endpoint address */ if (usb_endpoint_num(intr) != 3) { dev_err(&intf->dev, "Invalid interrupt endpoint address\n"); return false; } return true; } static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; rtnl_lock(); if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) return 0; netdev = tp->netdev; if (!netif_running(netdev)) return 0; netif_stop_queue(netdev); tasklet_disable(&tp->tx_tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); napi_disable(&tp->napi); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); set_bit(IN_PRE_RESET, &tp->flags); tp->rtl_ops.disable(tp); clear_bit(IN_PRE_RESET, &tp->flags); mutex_unlock(&tp->control); } return 0; } static int rtl8152_post_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; struct sockaddr sa; if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) goto exit; rtl_set_accessible(tp); /* reset the MAC address in case of policy change */ if (determine_ethernet_addr(tp, &sa) >= 0) dev_set_mac_address (tp->netdev, &sa, NULL); netdev = tp->netdev; if (!netif_running(netdev)) goto exit; set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.enable(tp); rtl_start_rx(tp); _rtl8152_set_rx_mode(netdev); mutex_unlock(&tp->control); } napi_enable(&tp->napi); tasklet_enable(&tp->tx_tl); netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); exit: rtnl_unlock(); return 0; } static bool delay_autosuspend(struct r8152 *tp) { bool sw_linking = !!netif_carrier_ok(tp->netdev); bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS); /* This means a linking change occurs and the driver doesn't detect it, * yet. If the driver has disabled tx/rx and hw is linking on, the * device wouldn't wake up by receiving any packet. */ if (work_busy(&tp->schedule.work) || sw_linking != hw_linking) return true; /* If the linking down is occurred by nway, the device may miss the * linking change event. And it wouldn't wake when linking on. */ if (!sw_linking && tp->rtl_ops.in_nway(tp)) return true; else if (!skb_queue_empty(&tp->tx_queue)) return true; else return false; } static int rtl8152_runtime_resume(struct r8152 *tp) { struct net_device *netdev = tp->netdev; if (netif_running(netdev) && netdev->flags & IFF_UP) { struct napi_struct *napi = &tp->napi; tp->rtl_ops.autosuspend_en(tp, false); napi_disable(napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { if (rtl8152_get_speed(tp) & LINK_STATUS) { rtl_start_rx(tp); } else { netif_carrier_off(netdev); tp->rtl_ops.disable(tp); netif_info(tp, link, netdev, "linking down\n"); } } napi_enable(napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); usb_submit_urb(tp->intr_urb, GFP_NOIO); } else { if (netdev->flags & IFF_UP) tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } return 0; } static int rtl8152_system_resume(struct r8152 *tp) { struct net_device *netdev = tp->netdev; netif_device_attach(netdev); if (netif_running(netdev) && (netdev->flags & IFF_UP)) { tp->rtl_ops.up(tp); netif_carrier_off(netdev); set_bit(WORK_ENABLE, &tp->flags); usb_submit_urb(tp->intr_urb, GFP_NOIO); } /* If the device is RTL8152_INACCESSIBLE here then we should do a * reset. This is important because the usb_lock_device_for_reset() * that happens as a result of usb_queue_reset_device() will silently * fail if the device was suspended or if too much time passed. * * NOTE: The device is locked here so we can directly do the reset. * We don't need usb_lock_device_for_reset() because that's just a * wrapper over device_lock() and device_resume() (which calls us) * does that for us. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) usb_reset_device(tp->udev); return 0; } static int rtl8152_runtime_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; int ret = 0; if (!tp->rtl_ops.autosuspend_en) return -EBUSY; set_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { u32 rcr = 0; if (netif_carrier_ok(netdev)) { u32 ocp_data; rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data = rcr & ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rxdy_gated_en(tp, true); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (!(ocp_data & RXFIFO_EMPTY)) { rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); ret = -EBUSY; goto out1; } } clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tp->rtl_ops.autosuspend_en(tp, true); if (netif_carrier_ok(netdev)) { struct napi_struct *napi = &tp->napi; napi_disable(napi); rtl_stop_rx(tp); rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); napi_enable(napi); } if (delay_autosuspend(tp)) { rtl8152_runtime_resume(tp); ret = -EBUSY; } } out1: return ret; } static int rtl8152_system_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; netif_device_detach(netdev); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { struct napi_struct *napi = &tp->napi; clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tasklet_disable(&tp->tx_tl); napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); napi_enable(napi); tasklet_enable(&tp->tx_tl); } /* If we're inaccessible here then some of the work that we did to * get the adapter ready for suspend didn't work. Queue up a wakeup * event so we can try again. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) pm_wakeup_event(&tp->udev->dev, 0); return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) { struct r8152 *tp = usb_get_intfdata(intf); int ret; mutex_lock(&tp->control); if (PMSG_IS_AUTO(message)) ret = rtl8152_runtime_suspend(tp); else ret = rtl8152_system_suspend(tp); mutex_unlock(&tp->control); return ret; } static int rtl8152_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); int ret; mutex_lock(&tp->control); rtl_reset_ocp_base(tp); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) ret = rtl8152_runtime_resume(tp); else ret = rtl8152_system_resume(tp); mutex_unlock(&tp->control); return ret; } static int rtl8152_reset_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); clear_bit(SELECTIVE_SUSPEND, &tp->flags); rtl_reset_ocp_base(tp); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, true); return rtl8152_resume(intf); } static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); if (usb_autopm_get_interface(tp->intf) < 0) return; if (!rtl_can_wakeup(tp)) { wol->supported = 0; wol->wolopts = 0; } else { mutex_lock(&tp->control); wol->supported = WAKE_ANY; wol->wolopts = __rtl_get_wol(tp); mutex_unlock(&tp->control); } usb_autopm_put_interface(tp->intf); } static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); int ret; if (!rtl_can_wakeup(tp)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_ANY) return -EINVAL; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out_set_wol; mutex_lock(&tp->control); __rtl_set_wol(tp, wol->wolopts); tp->saved_wolopts = wol->wolopts & WAKE_ANY; mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out_set_wol: return ret; } static u32 rtl8152_get_msglevel(struct net_device *dev) { struct r8152 *tp = netdev_priv(dev); return tp->msg_enable; } static void rtl8152_set_msglevel(struct net_device *dev, u32 value) { struct r8152 *tp = netdev_priv(dev); tp->msg_enable = value; } static void rtl8152_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct r8152 *tp = netdev_priv(netdev); strscpy(info->driver, MODULENAME, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); if (!IS_ERR_OR_NULL(tp->rtl_fw.fw)) strscpy(info->fw_version, tp->rtl_fw.version, sizeof(info->fw_version)); } static int rtl8152_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(netdev); int ret; if (!tp->mii.mdio_read) return -EOPNOTSUPP; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); mii_ethtool_get_link_ksettings(&tp->mii, cmd); linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.supported, tp->support_2500full); if (tp->support_2500full) { linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.advertising, ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G); linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.lp_advertising, ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G); if (is_speed_2500(rtl8152_get_speed(tp))) cmd->base.speed = SPEED_2500; } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(dev); u32 advertising = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; if (test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_10_HALF; if (test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_10_FULL; if (test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_100_HALF; if (test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_100_FULL; if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_HALF; if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_FULL; if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_2500_FULL; mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); if (!ret) { tp->autoneg = cmd->base.autoneg; tp->speed = cmd->base.speed; tp->duplex = cmd->base.duplex; tp->advertising = advertising; } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_packets", "tx_errors", "rx_errors", "rx_missed", "align_errors", "tx_single_collisions", "tx_multi_collisions", "rx_unicast", "rx_broadcast", "rx_multicast", "tx_aborted", "tx_underrun", }; static int rtl8152_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(rtl8152_gstrings); default: return -EOPNOTSUPP; } } static void rtl8152_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct r8152 *tp = netdev_priv(dev); struct tally_counter tally; if (usb_autopm_get_interface(tp->intf) < 0) return; generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); usb_autopm_put_interface(tp->intf); data[0] = le64_to_cpu(tally.tx_packets); data[1] = le64_to_cpu(tally.rx_packets); data[2] = le64_to_cpu(tally.tx_errors); data[3] = le32_to_cpu(tally.rx_errors); data[4] = le16_to_cpu(tally.rx_missed); data[5] = le16_to_cpu(tally.align_errors); data[6] = le32_to_cpu(tally.tx_one_collision); data[7] = le32_to_cpu(tally.tx_multi_collision); data[8] = le64_to_cpu(tally.rx_unicast); data[9] = le64_to_cpu(tally.rx_broadcast); data[10] = le32_to_cpu(tally.rx_multicast); data[11] = le16_to_cpu(tally.tx_aborted); data[12] = le16_to_cpu(tally.tx_underrun); } static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings)); break; } } static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u16 val; val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); mii_eee_cap1_mod_linkmode_t(eee->supported, val); val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV); mii_eee_cap1_mod_linkmode_t(eee->advertised, val); val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val); eee->eee_enabled = tp->eee_en; linkmode_and(common, eee->advertised, eee->lp_advertised); eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common); return 0; } static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee) { u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised); tp->eee_en = eee->eee_enabled; tp->eee_adv = val; rtl_eee_enable(tp, tp->eee_en); return 0; } static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u16 val; val = ocp_reg_read(tp, OCP_EEE_ABLE); mii_eee_cap1_mod_linkmode_t(eee->supported, val); val = ocp_reg_read(tp, OCP_EEE_ADV); mii_eee_cap1_mod_linkmode_t(eee->advertised, val); val = ocp_reg_read(tp, OCP_EEE_LPABLE); mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val); eee->eee_enabled = tp->eee_en; linkmode_and(common, eee->advertised, eee->lp_advertised); eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common); return 0; } static int rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata) { struct r8152 *tp = netdev_priv(net); int ret; if (!tp->rtl_ops.eee_get) { ret = -EOPNOTSUPP; goto out; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = tp->rtl_ops.eee_get(tp, edata); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata) { struct r8152 *tp = netdev_priv(net); int ret; if (!tp->rtl_ops.eee_set) { ret = -EOPNOTSUPP; goto out; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = tp->rtl_ops.eee_set(tp, edata); if (!ret) ret = mii_nway_restart(&tp->mii); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_nway_reset(struct net_device *dev) { struct r8152 *tp = netdev_priv(dev); int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = mii_nway_restart(&tp->mii); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: return -EOPNOTSUPP; default: break; } coalesce->rx_coalesce_usecs = tp->coalesce; return 0; } static int rtl8152_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); int ret; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: return -EOPNOTSUPP; default: break; } if (coalesce->rx_coalesce_usecs > COALESCE_SLOW) return -EINVAL; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); if (tp->coalesce != coalesce->rx_coalesce_usecs) { tp->coalesce = coalesce->rx_coalesce_usecs; if (netif_running(netdev) && netif_carrier_ok(netdev)) { netif_stop_queue(netdev); napi_disable(&tp->napi); tp->rtl_ops.disable(tp); tp->rtl_ops.enable(tp); rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); napi_enable(&tp->napi); netif_wake_queue(netdev); } } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static int rtl8152_get_tunable(struct net_device *netdev, const struct ethtool_tunable *tunable, void *d) { struct r8152 *tp = netdev_priv(netdev); switch (tunable->id) { case ETHTOOL_RX_COPYBREAK: *(u32 *)d = tp->rx_copybreak; break; default: return -EOPNOTSUPP; } return 0; } static int rtl8152_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tunable, const void *d) { struct r8152 *tp = netdev_priv(netdev); u32 val; switch (tunable->id) { case ETHTOOL_RX_COPYBREAK: val = *(u32 *)d; if (val < ETH_ZLEN) { netif_err(tp, rx_err, netdev, "Invalid rx copy break value\n"); return -EINVAL; } if (tp->rx_copybreak != val) { if (netdev->flags & IFF_UP) { mutex_lock(&tp->control); napi_disable(&tp->napi); tp->rx_copybreak = val; napi_enable(&tp->napi); mutex_unlock(&tp->control); } else { tp->rx_copybreak = val; } } break; default: return -EOPNOTSUPP; } return 0; } static void rtl8152_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); ring->rx_max_pending = RTL8152_RX_MAX_PENDING; ring->rx_pending = tp->rx_pending; } static int rtl8152_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); if (ring->rx_pending < (RTL8152_MAX_RX * 2)) return -EINVAL; if (tp->rx_pending != ring->rx_pending) { if (netdev->flags & IFF_UP) { mutex_lock(&tp->control); napi_disable(&tp->napi); tp->rx_pending = ring->rx_pending; napi_enable(&tp->napi); mutex_unlock(&tp->control); } else { tp->rx_pending = ring->rx_pending; } } return 0; } static void rtl8152_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct r8152 *tp = netdev_priv(netdev); u16 bmcr, lcladv, rmtadv; u8 cap; if (usb_autopm_get_interface(tp->intf) < 0) return; mutex_lock(&tp->control); bmcr = r8152_mdio_read(tp, MII_BMCR); lcladv = r8152_mdio_read(tp, MII_ADVERTISE); rmtadv = r8152_mdio_read(tp, MII_LPA); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); if (!(bmcr & BMCR_ANENABLE)) { pause->autoneg = 0; pause->rx_pause = 0; pause->tx_pause = 0; return; } pause->autoneg = 1; cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_RX) pause->rx_pause = 1; if (cap & FLOW_CTRL_TX) pause->tx_pause = 1; } static int rtl8152_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct r8152 *tp = netdev_priv(netdev); u16 old, new1; u8 cap = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); if (pause->autoneg && !(r8152_mdio_read(tp, MII_BMCR) & BMCR_ANENABLE)) { ret = -EINVAL; goto out; } if (pause->rx_pause) cap |= FLOW_CTRL_RX; if (pause->tx_pause) cap |= FLOW_CTRL_TX; old = r8152_mdio_read(tp, MII_ADVERTISE); new1 = (old & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) | mii_advertise_flowctrl(cap); if (old != new1) r8152_mdio_write(tp, MII_ADVERTISE, new1); out: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static const struct ethtool_ops ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, .set_msglevel = rtl8152_set_msglevel, .get_wol = rtl8152_get_wol, .set_wol = rtl8152_set_wol, .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, .get_coalesce = rtl8152_get_coalesce, .set_coalesce = rtl8152_set_coalesce, .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, .get_link_ksettings = rtl8152_get_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings, .get_tunable = rtl8152_get_tunable, .set_tunable = rtl8152_set_tunable, .get_ringparam = rtl8152_get_ringparam, .set_ringparam = rtl8152_set_ringparam, .get_pauseparam = rtl8152_get_pauseparam, .set_pauseparam = rtl8152_set_pauseparam, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct r8152 *tp = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(rq); int res; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; res = usb_autopm_get_interface(tp->intf); if (res < 0) goto out; switch (cmd) { case SIOCGMIIPHY: data->phy_id = R8152_PHY_ID; /* Internal PHY */ break; case SIOCGMIIREG: mutex_lock(&tp->control); data->val_out = r8152_mdio_read(tp, data->reg_num); mutex_unlock(&tp->control); break; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } mutex_lock(&tp->control); r8152_mdio_write(tp, data->reg_num, data->val_in); mutex_unlock(&tp->control); break; default: res = -EOPNOTSUPP; } usb_autopm_put_interface(tp->intf); out: return res; } static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) { struct r8152 *tp = netdev_priv(dev); int ret; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: WRITE_ONCE(dev->mtu, new_mtu); return 0; default: break; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); WRITE_ONCE(dev->mtu, new_mtu); if (netif_running(dev)) { if (tp->rtl_ops.change_mtu) tp->rtl_ops.change_mtu(tp); if (netif_carrier_ok(dev)) { netif_stop_queue(dev); napi_disable(&tp->napi); tasklet_disable(&tp->tx_tl); tp->rtl_ops.disable(tp); tp->rtl_ops.enable(tp); rtl_start_rx(tp); tasklet_enable(&tp->tx_tl); napi_enable(&tp->napi); rtl8152_set_rx_mode(dev); netif_wake_queue(dev); } } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static const struct net_device_ops rtl8152_netdev_ops = { .ndo_open = rtl8152_open, .ndo_stop = rtl8152_close, .ndo_eth_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, .ndo_set_features = rtl8152_set_features, .ndo_set_rx_mode = rtl8152_set_rx_mode, .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_features_check = rtl8152_features_check, }; static void rtl8152_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (tp->version != RTL_VER_01) r8152_power_cut_en(tp, true); } static void rtl8153_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_power_cut_en(tp, false); } static void rtl8153b_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_power_cut_en(tp, false); } static int rtl_ops_init(struct r8152 *tp) { struct rtl_ops *ops = &tp->rtl_ops; int ret = 0; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: ops->init = r8152b_init; ops->enable = rtl8152_enable; ops->disable = rtl8152_disable; ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; tp->rx_buf_sz = 16 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_100TX; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ops->init = r8153_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; ops->change_mtu = rtl8153_change_mtu; if (tp->udev->speed < USB_SPEED_SUPER) tp->rx_buf_sz = 16 * 1024; else tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_08: case RTL_VER_09: ops->init = r8153b_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153b_up; ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; ops->change_mtu = rtl8153_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_11: tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; fallthrough; case RTL_VER_10: ops->init = r8156_init; ops->enable = rtl8156_enable; ops->disable = rtl8156_disable; ops->up = rtl8156_up; ops->down = rtl8156_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8156_hw_phy_cfg; ops->autosuspend_en = rtl8156_runtime_enable; ops->change_mtu = rtl8156_change_mtu; tp->rx_buf_sz = 48 * 1024; tp->support_2500full = 1; break; case RTL_VER_12: case RTL_VER_13: tp->support_2500full = 1; fallthrough; case RTL_VER_15: tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; ops->init = r8156b_init; ops->enable = rtl8156b_enable; ops->disable = rtl8153_disable; ops->up = rtl8156_up; ops->down = rtl8156_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8156b_hw_phy_cfg; ops->autosuspend_en = rtl8156_runtime_enable; ops->change_mtu = rtl8156_change_mtu; tp->rx_buf_sz = 48 * 1024; break; case RTL_VER_14: ops->init = r8153c_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153c_up; ops->down = rtl8153b_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153c_hw_phy_cfg; ops->autosuspend_en = rtl8153c_runtime_enable; ops->change_mtu = rtl8153c_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; default: ret = -ENODEV; dev_err(&tp->intf->dev, "Unknown Device\n"); break; } return ret; } #define FIRMWARE_8153A_2 "rtl_nic/rtl8153a-2.fw" #define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw" #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" #define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" #define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw" #define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw" MODULE_FIRMWARE(FIRMWARE_8153A_2); MODULE_FIRMWARE(FIRMWARE_8153A_3); MODULE_FIRMWARE(FIRMWARE_8153A_4); MODULE_FIRMWARE(FIRMWARE_8153B_2); MODULE_FIRMWARE(FIRMWARE_8153C_1); MODULE_FIRMWARE(FIRMWARE_8156A_2); MODULE_FIRMWARE(FIRMWARE_8156B_2); static int rtl_fw_init(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; switch (tp->version) { case RTL_VER_04: rtl_fw->fw_name = FIRMWARE_8153A_2; rtl_fw->pre_fw = r8153_pre_firmware_1; rtl_fw->post_fw = r8153_post_firmware_1; break; case RTL_VER_05: rtl_fw->fw_name = FIRMWARE_8153A_3; rtl_fw->pre_fw = r8153_pre_firmware_2; rtl_fw->post_fw = r8153_post_firmware_2; break; case RTL_VER_06: rtl_fw->fw_name = FIRMWARE_8153A_4; rtl_fw->post_fw = r8153_post_firmware_3; break; case RTL_VER_09: rtl_fw->fw_name = FIRMWARE_8153B_2; rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153b_post_firmware_1; break; case RTL_VER_11: rtl_fw->fw_name = FIRMWARE_8156A_2; rtl_fw->post_fw = r8156a_post_firmware_1; break; case RTL_VER_13: case RTL_VER_15: rtl_fw->fw_name = FIRMWARE_8156B_2; break; case RTL_VER_14: rtl_fw->fw_name = FIRMWARE_8153C_1; rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153c_post_firmware_1; break; default: break; } return 0; } static u8 __rtl_get_hw_ver(struct usb_device *udev) { u32 ocp_data = 0; __le32 *tmp; u8 version; int ret; int i; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return 0; /* Retry up to 3 times in case there is a transitory error. We do this * since retrying a read of the version is always safe and this * function doesn't take advantage of r8152_control_msg(). */ for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), USB_CTRL_GET_TIMEOUT); if (ret > 0) { ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; break; } } if (i != 0 && ret > 0) dev_warn(&udev->dev, "Needed %d retries to read version\n", i); kfree(tmp); switch (ocp_data) { case 0x4c00: version = RTL_VER_01; break; case 0x4c10: version = RTL_VER_02; break; case 0x5c00: version = RTL_VER_03; break; case 0x5c10: version = RTL_VER_04; break; case 0x5c20: version = RTL_VER_05; break; case 0x5c30: version = RTL_VER_06; break; case 0x4800: version = RTL_VER_07; break; case 0x6000: version = RTL_VER_08; break; case 0x6010: version = RTL_VER_09; break; case 0x7010: version = RTL_TEST_01; break; case 0x7020: version = RTL_VER_10; break; case 0x7030: version = RTL_VER_11; break; case 0x7400: version = RTL_VER_12; break; case 0x7410: version = RTL_VER_13; break; case 0x6400: version = RTL_VER_14; break; case 0x7420: version = RTL_VER_15; break; default: version = RTL_VER_UNKNOWN; dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data); break; } return version; } u8 rtl8152_get_version(struct usb_interface *intf) { u8 version; version = __rtl_get_hw_ver(interface_to_usbdev(intf)); dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); return version; } EXPORT_SYMBOL_GPL(rtl8152_get_version); static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) { int parent_vendor_id = le16_to_cpu(udev->parent->descriptor.idVendor); int product_id = le16_to_cpu(udev->descriptor.idProduct); int vendor_id = le16_to_cpu(udev->descriptor.idVendor); if (vendor_id == VENDOR_ID_LENOVO) { switch (product_id) { case DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB: case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK: case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: case DEVICE_ID_THINKPAD_USB_C_DONGLE: return 1; } } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) { switch (product_id) { case 0x8153: return 1; } } return 0; } static int rtl8152_probe_once(struct usb_interface *intf, const struct usb_device_id *id, u8 version) { struct usb_device *udev = interface_to_usbdev(intf); struct r8152 *tp; struct net_device *netdev; int ret; usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { dev_err(&intf->dev, "Out of memory\n"); return -ENOMEM; } SET_NETDEV_DEV(netdev, &intf->dev); tp = netdev_priv(netdev); tp->msg_enable = 0x7FFF; tp->udev = udev; tp->netdev = netdev; tp->intf = intf; tp->version = version; tp->pipe_ctrl_in = usb_rcvctrlpipe(udev, 0); tp->pipe_ctrl_out = usb_sndctrlpipe(udev, 0); tp->pipe_in = usb_rcvbulkpipe(udev, 1); tp->pipe_out = usb_sndbulkpipe(udev, 2); tp->pipe_intr = usb_rcvintpipe(udev, 3); switch (version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: tp->mii.supports_gmii = 0; break; default: tp->mii.supports_gmii = 1; break; } ret = rtl_ops_init(tp); if (ret) goto out; rtl_fw_init(tp); mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); tasklet_setup(&tp->tx_tl, bottom_half); tasklet_disable(&tp->tx_tl); netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; if (tp->version == RTL_VER_01) { netdev->features &= ~NETIF_F_RXCSUM; netdev->hw_features &= ~NETIF_F_RXCSUM; } tp->lenovo_macpassthru = rtl8152_supports_lenovo_macpassthru(udev); if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); tp->dell_tb_rx_agg_bug = 1; } netdev->ethtool_ops = &ops; netif_set_tso_max_size(netdev, RTL_LIMITED_TSO_SIZE); /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: netdev->max_mtu = size_to_mtu(9 * 1024); break; case RTL_VER_10: case RTL_VER_11: netdev->max_mtu = size_to_mtu(15 * 1024); break; case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: netdev->max_mtu = size_to_mtu(16 * 1024); break; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: default: netdev->max_mtu = ETH_DATA_LEN; break; } tp->mii.dev = netdev; tp->mii.mdio_read = read_mii_word; tp->mii.mdio_write = write_mii_word; tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; tp->autoneg = AUTONEG_ENABLE; tp->speed = SPEED_100; tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { if (tp->support_2500full && tp->udev->speed >= USB_SPEED_SUPER) { tp->speed = SPEED_2500; tp->advertising |= RTL_ADVERTISED_2500_FULL; } else { tp->speed = SPEED_1000; } tp->advertising |= RTL_ADVERTISED_1000_FULL; } tp->duplex = DUPLEX_FULL; tp->rx_copybreak = RTL8152_RXFG_HEADSZ; tp->rx_pending = 10 * RTL8152_MAX_RX; intf->needs_remote_wakeup = 1; if (!rtl_can_wakeup(tp)) __rtl_set_wol(tp, 0); else tp->saved_wolopts = __rtl_get_wol(tp); tp->rtl_ops.init(tp); #if IS_BUILTIN(CONFIG_USB_RTL8152) /* Retry in case request_firmware() is not ready yet. */ tp->rtl_fw.retry = true; #endif queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, false); usb_set_intfdata(intf, tp); netif_napi_add(netdev, &tp->napi, r8152_poll); ret = register_netdev(netdev); if (ret != 0) { dev_err(&intf->dev, "couldn't register the device\n"); goto out1; } if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else device_set_wakeup_enable(&udev->dev, false); /* If we saw a control transfer error while probing then we may * want to try probe() again. Consider this an error. */ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) goto out2; set_bit(PROBED_WITH_NO_ERRORS, &tp->flags); netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; out2: unregister_netdev(netdev); out1: tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); if (tp->rtl_ops.unload) tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); usb_set_intfdata(intf, NULL); out: if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) ret = -EAGAIN; free_netdev(netdev); return ret; } #define RTL8152_PROBE_TRIES 3 static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { u8 version; int ret; int i; if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) return -ENODEV; if (!rtl_check_vendor_ok(intf)) return -ENODEV; version = rtl8152_get_version(intf); if (version == RTL_VER_UNKNOWN) return -ENODEV; for (i = 0; i < RTL8152_PROBE_TRIES; i++) { ret = rtl8152_probe_once(intf, id, version); if (ret != -EAGAIN) break; } if (ret == -EAGAIN) { dev_err(&intf->dev, "r8152 failed probe after %d tries; giving up\n", i); return -ENODEV; } return ret; } static void rtl8152_disconnect(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (tp) { rtl_set_unplug(tp); unregister_netdev(tp->netdev); tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); if (tp->rtl_ops.unload) tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); free_netdev(tp->netdev); } } /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { /* Realtek */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) }, /* Microsoft */ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, { USB_DEVICE(VENDOR_ID_DELL, 0xb097) }, { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; MODULE_DEVICE_TABLE(usb, rtl8152_table); static struct usb_driver rtl8152_driver = { .name = MODULENAME, .id_table = rtl8152_table, .probe = rtl8152_probe, .disconnect = rtl8152_disconnect, .suspend = rtl8152_suspend, .resume = rtl8152_resume, .reset_resume = rtl8152_reset_resume, .pre_reset = rtl8152_pre_reset, .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev) { struct usb_host_config *c; int i, num_configs; /* Switch the device to vendor mode, if and only if the vendor mode * driver supports it. */ if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN) return -ENODEV; /* The vendor mode is not always config #1, so to find it out. */ c = udev->config; num_configs = udev->descriptor.bNumConfigurations; for (i = 0; i < num_configs; (i++, c++)) { struct usb_interface_descriptor *desc = NULL; if (!c->desc.bNumInterfaces) continue; desc = &c->intf_cache[0]->altsetting->desc; if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) break; } if (i == num_configs) return -ENODEV; return c->desc.bConfigurationValue; } static struct usb_device_driver rtl8152_cfgselector_driver = { .name = MODULENAME "-cfgselector", .choose_configuration = rtl8152_cfgselector_choose_configuration, .id_table = rtl8152_table, .generic_subclass = 1, .supports_autosuspend = 1, }; static int __init rtl8152_driver_init(void) { int ret; ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); if (ret) return ret; return usb_register(&rtl8152_driver); } static void __exit rtl8152_driver_exit(void) { usb_deregister(&rtl8152_driver); usb_deregister_device_driver(&rtl8152_cfgselector_driver); } module_init(rtl8152_driver_init); module_exit(rtl8152_driver_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 /* SPDX-License-Identifier: GPL-2.0-only */ /* * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 * * Copyright (C) 2017 ARM Limited or its affiliates. * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #include <linux/module.h> #include <linux/unaligned.h> #include <crypto/sm3.h> static const u32 ____cacheline_aligned K[64] = { 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 }; /* * Transform the message X which consists of 16 32-bit-words. See * GM/T 004-2012 for details. */ #define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ do { \ ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ ss2 = ss1 ^ rol32((a), 12); \ d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ h += GG ## i(e, f, g) + ss1 + (w1); \ b = rol32((b), 9); \ f = rol32((f), 19); \ h = P0((h)); \ } while (0) #define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ R(1, a, b, c, d, e, f, g, h, t, w1, w2) #define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ R(2, a, b, c, d, e, f, g, h, t, w1, w2) #define FF1(x, y, z) (x ^ y ^ z) #define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) #define GG1(x, y, z) FF1(x, y, z) #define GG2(x, y, z) ((x & y) | (~x & z)) /* Message expansion */ #define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) #define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) #define I(i) (W[i] = get_unaligned_be32(data + i * 4)) #define W1(i) (W[i & 0x0f]) #define W2(i) (W[i & 0x0f] = \ P1(W[i & 0x0f] \ ^ W[(i-9) & 0x0f] \ ^ rol32(W[(i-3) & 0x0f], 15)) \ ^ rol32(W[(i-13) & 0x0f], 7) \ ^ W[(i-6) & 0x0f]) static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) { u32 a, b, c, d, e, f, g, h, ss1, ss2; a = sctx->state[0]; b = sctx->state[1]; c = sctx->state[2]; d = sctx->state[3]; e = sctx->state[4]; f = sctx->state[5]; g = sctx->state[6]; h = sctx->state[7]; R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); sctx->state[0] ^= a; sctx->state[1] ^= b; sctx->state[2] ^= c; sctx->state[3] ^= d; sctx->state[4] ^= e; sctx->state[5] ^= f; sctx->state[6] ^= g; sctx->state[7] ^= h; } #undef R #undef R1 #undef R2 #undef I #undef W1 #undef W2 static inline void sm3_block(struct sm3_state *sctx, u8 const *data, int blocks, u32 W[16]) { while (blocks--) { sm3_transform(sctx, data, W); data += SM3_BLOCK_SIZE; } } void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) { unsigned int partial = sctx->count % SM3_BLOCK_SIZE; u32 W[16]; sctx->count += len; if ((partial + len) >= SM3_BLOCK_SIZE) { int blocks; if (partial) { int p = SM3_BLOCK_SIZE - partial; memcpy(sctx->buffer + partial, data, p); data += p; len -= p; sm3_block(sctx, sctx->buffer, 1, W); } blocks = len / SM3_BLOCK_SIZE; len %= SM3_BLOCK_SIZE; if (blocks) { sm3_block(sctx, data, blocks, W); data += blocks * SM3_BLOCK_SIZE; } memzero_explicit(W, sizeof(W)); partial = 0; } if (len) memcpy(sctx->buffer + partial, data, len); } EXPORT_SYMBOL_GPL(sm3_update); void sm3_final(struct sm3_state *sctx, u8 *out) { const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); __be32 *digest = (__be32 *)out; unsigned int partial = sctx->count % SM3_BLOCK_SIZE; u32 W[16]; int i; sctx->buffer[partial++] = 0x80; if (partial > bit_offset) { memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); partial = 0; sm3_block(sctx, sctx->buffer, 1, W); } memset(sctx->buffer + partial, 0, bit_offset - partial); *bits = cpu_to_be64(sctx->count << 3); sm3_block(sctx, sctx->buffer, 1, W); for (i = 0; i < 8; i++) put_unaligned_be32(sctx->state[i], digest++); /* Zeroize sensitive information. */ memzero_explicit(W, sizeof(W)); memzero_explicit(sctx, sizeof(*sctx)); } EXPORT_SYMBOL_GPL(sm3_final); MODULE_DESCRIPTION("Generic SM3 library"); MODULE_LICENSE("GPL v2");
11 11 7 4 2 9 7 4 11 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) International Business Machines Corp., 2000-2004 */ /* * jfs_umount.c * * note: file system in transition to aggregate/fileset: * (ref. jfs_mount.c) * * file system unmount is interpreted as mount of the single/only * fileset in the aggregate and, if unmount of the last fileset, * as unmount of the aggerate; */ #include <linux/fs.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_superblock.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_metapage.h" #include "jfs_debug.h" /* * NAME: jfs_umount(vfsp, flags, crp) * * FUNCTION: vfs_umount() * * PARAMETERS: vfsp - virtual file system pointer * flags - unmount for shutdown * crp - credential * * RETURN : EBUSY - device has open files */ int jfs_umount(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct inode *ipbmap = sbi->ipbmap; struct inode *ipimap = sbi->ipimap; struct inode *ipaimap = sbi->ipaimap; struct inode *ipaimap2 = sbi->ipaimap2; struct jfs_log *log; int rc = 0; jfs_info("UnMount JFS: sb:0x%p", sb); /* * update superblock and close log * * if mounted read-write and log based recovery was enabled */ if ((log = sbi->log)) /* * Wait for outstanding transactions to be written to log: */ jfs_flush_journal(log, 2); /* * close fileset inode allocation map (aka fileset inode) */ diUnmount(ipimap, 0); diFreeSpecial(ipimap); sbi->ipimap = NULL; /* * close secondary aggregate inode allocation map */ if (ipaimap2) { diUnmount(ipaimap2, 0); diFreeSpecial(ipaimap2); sbi->ipaimap2 = NULL; } /* * close aggregate inode allocation map */ diUnmount(ipaimap, 0); diFreeSpecial(ipaimap); sbi->ipaimap = NULL; /* * close aggregate block allocation map */ dbUnmount(ipbmap, 0); diFreeSpecial(ipbmap); sbi->ipbmap = NULL; /* * Make sure all metadata makes it to disk before we mark * the superblock as clean */ filemap_write_and_wait(sbi->direct_inode->i_mapping); /* * ensure all file system file pages are propagated to their * home blocks on disk (and their in-memory buffer pages are * invalidated) BEFORE updating file system superblock state * (to signify file system is unmounted cleanly, and thus in * consistent state) and log superblock active file system * list (to signify skip logredo()). */ if (log) { /* log = NULL if read-only mount */ updateSuper(sb, FM_CLEAN); /* * close log: * * remove file system from log active file system list. */ rc = lmLogClose(sb); } jfs_info("UnMount JFS Complete: rc = %d", rc); return rc; } int jfs_umount_rw(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; if (!log) return 0; /* * close log: * * remove file system from log active file system list. */ jfs_flush_journal(log, 2); /* * Make sure all metadata makes it to disk */ dbSync(sbi->ipbmap); diSync(sbi->ipimap); /* * Note that we have to do this even if sync_blockdev() will * do exactly the same a few instructions later: We can't * mark the superblock clean before everything is flushed to * disk. */ filemap_write_and_wait(sbi->direct_inode->i_mapping); updateSuper(sb, FM_CLEAN); return lmLogClose(sb); }
29 1 8 20 11 17 20 8 4 13 13 2 4 9 12 2 98 35 2 15 14 10 5 13 3 2 13 13 12 9 27 27 27 27 27 27 13 13 13 13 13 19 12 9 9 9 9 58 60 9057 9068 27 19 1 13 26 13 27 27 85 84 81 20 84 58 79 11 20 20 20 20 115 109 115 76 85 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 // SPDX-License-Identifier: GPL-2.0-only /* * Landlock LSM - Ruleset management * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI */ #include <linux/bits.h> #include <linux/bug.h> #include <linux/cleanup.h> #include <linux/compiler_types.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/overflow.h> #include <linux/rbtree.h> #include <linux/refcount.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include "access.h" #include "limits.h" #include "object.h" #include "ruleset.h" static struct landlock_ruleset *create_ruleset(const u32 num_layers) { struct landlock_ruleset *new_ruleset; new_ruleset = kzalloc(struct_size(new_ruleset, access_masks, num_layers), GFP_KERNEL_ACCOUNT); if (!new_ruleset) return ERR_PTR(-ENOMEM); refcount_set(&new_ruleset->usage, 1); mutex_init(&new_ruleset->lock); new_ruleset->root_inode = RB_ROOT; #if IS_ENABLED(CONFIG_INET) new_ruleset->root_net_port = RB_ROOT; #endif /* IS_ENABLED(CONFIG_INET) */ new_ruleset->num_layers = num_layers; /* * hierarchy = NULL * num_rules = 0 * access_masks[] = 0 */ return new_ruleset; } struct landlock_ruleset * landlock_create_ruleset(const access_mask_t fs_access_mask, const access_mask_t net_access_mask, const access_mask_t scope_mask) { struct landlock_ruleset *new_ruleset; /* Informs about useless ruleset. */ if (!fs_access_mask && !net_access_mask && !scope_mask) return ERR_PTR(-ENOMSG); new_ruleset = create_ruleset(1); if (IS_ERR(new_ruleset)) return new_ruleset; if (fs_access_mask) landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0); if (net_access_mask) landlock_add_net_access_mask(new_ruleset, net_access_mask, 0); if (scope_mask) landlock_add_scope_mask(new_ruleset, scope_mask, 0); return new_ruleset; } static void build_check_rule(void) { const struct landlock_rule rule = { .num_layers = ~0, }; BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS); } static bool is_object_pointer(const enum landlock_key_type key_type) { switch (key_type) { case LANDLOCK_KEY_INODE: return true; #if IS_ENABLED(CONFIG_INET) case LANDLOCK_KEY_NET_PORT: return false; #endif /* IS_ENABLED(CONFIG_INET) */ default: WARN_ON_ONCE(1); return false; } } static struct landlock_rule * create_rule(const struct landlock_id id, const struct landlock_layer (*const layers)[], const u32 num_layers, const struct landlock_layer *const new_layer) { struct landlock_rule *new_rule; u32 new_num_layers; build_check_rule(); if (new_layer) { /* Should already be checked by landlock_merge_ruleset(). */ if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS)) return ERR_PTR(-E2BIG); new_num_layers = num_layers + 1; } else { new_num_layers = num_layers; } new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers), GFP_KERNEL_ACCOUNT); if (!new_rule) return ERR_PTR(-ENOMEM); RB_CLEAR_NODE(&new_rule->node); if (is_object_pointer(id.type)) { /* This should have been caught by insert_rule(). */ WARN_ON_ONCE(!id.key.object); landlock_get_object(id.key.object); } new_rule->key = id.key; new_rule->num_layers = new_num_layers; /* Copies the original layer stack. */ memcpy(new_rule->layers, layers, flex_array_size(new_rule, layers, num_layers)); if (new_layer) /* Adds a copy of @new_layer on the layer stack. */ new_rule->layers[new_rule->num_layers - 1] = *new_layer; return new_rule; } static struct rb_root *get_root(struct landlock_ruleset *const ruleset, const enum landlock_key_type key_type) { switch (key_type) { case LANDLOCK_KEY_INODE: return &ruleset->root_inode; #if IS_ENABLED(CONFIG_INET) case LANDLOCK_KEY_NET_PORT: return &ruleset->root_net_port; #endif /* IS_ENABLED(CONFIG_INET) */ default: WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } } static void free_rule(struct landlock_rule *const rule, const enum landlock_key_type key_type) { might_sleep(); if (!rule) return; if (is_object_pointer(key_type)) landlock_put_object(rule->key.object); kfree(rule); } static void build_check_ruleset(void) { const struct landlock_ruleset ruleset = { .num_rules = ~0, .num_layers = ~0, }; BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); } /** * insert_rule - Create and insert a rule in a ruleset * * @ruleset: The ruleset to be updated. * @id: The ID to build the new rule with. The underlying kernel object, if * any, must be held by the caller. * @layers: One or multiple layers to be copied into the new rule. * @num_layers: The number of @layers entries. * * When user space requests to add a new rule to a ruleset, @layers only * contains one entry and this entry is not assigned to any level. In this * case, the new rule will extend @ruleset, similarly to a boolean OR between * access rights. * * When merging a ruleset in a domain, or copying a domain, @layers will be * added to @ruleset as new constraints, similarly to a boolean AND between * access rights. */ static int insert_rule(struct landlock_ruleset *const ruleset, const struct landlock_id id, const struct landlock_layer (*const layers)[], const size_t num_layers) { struct rb_node **walker_node; struct rb_node *parent_node = NULL; struct landlock_rule *new_rule; struct rb_root *root; might_sleep(); lockdep_assert_held(&ruleset->lock); if (WARN_ON_ONCE(!layers)) return -ENOENT; if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object)) return -ENOENT; root = get_root(ruleset, id.type); if (IS_ERR(root)) return PTR_ERR(root); walker_node = &root->rb_node; while (*walker_node) { struct landlock_rule *const this = rb_entry(*walker_node, struct landlock_rule, node); if (this->key.data != id.key.data) { parent_node = *walker_node; if (this->key.data < id.key.data) walker_node = &((*walker_node)->rb_right); else walker_node = &((*walker_node)->rb_left); continue; } /* Only a single-level layer should match an existing rule. */ if (WARN_ON_ONCE(num_layers != 1)) return -EINVAL; /* If there is a matching rule, updates it. */ if ((*layers)[0].level == 0) { /* * Extends access rights when the request comes from * landlock_add_rule(2), i.e. @ruleset is not a domain. */ if (WARN_ON_ONCE(this->num_layers != 1)) return -EINVAL; if (WARN_ON_ONCE(this->layers[0].level != 0)) return -EINVAL; this->layers[0].access |= (*layers)[0].access; return 0; } if (WARN_ON_ONCE(this->layers[0].level == 0)) return -EINVAL; /* * Intersects access rights when it is a merge between a * ruleset and a domain. */ new_rule = create_rule(id, &this->layers, this->num_layers, &(*layers)[0]); if (IS_ERR(new_rule)) return PTR_ERR(new_rule); rb_replace_node(&this->node, &new_rule->node, root); free_rule(this, id.type); return 0; } /* There is no match for @id. */ build_check_ruleset(); if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) return -E2BIG; new_rule = create_rule(id, layers, num_layers, NULL); if (IS_ERR(new_rule)) return PTR_ERR(new_rule); rb_link_node(&new_rule->node, parent_node, walker_node); rb_insert_color(&new_rule->node, root); ruleset->num_rules++; return 0; } static void build_check_layer(void) { const struct landlock_layer layer = { .level = ~0, .access = ~0, }; BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS); BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS); } /* @ruleset must be locked by the caller. */ int landlock_insert_rule(struct landlock_ruleset *const ruleset, const struct landlock_id id, const access_mask_t access) { struct landlock_layer layers[] = { { .access = access, /* When @level is zero, insert_rule() extends @ruleset. */ .level = 0, } }; build_check_layer(); return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers)); } static void get_hierarchy(struct landlock_hierarchy *const hierarchy) { if (hierarchy) refcount_inc(&hierarchy->usage); } static void put_hierarchy(struct landlock_hierarchy *hierarchy) { while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { const struct landlock_hierarchy *const freeme = hierarchy; hierarchy = hierarchy->parent; kfree(freeme); } } static int merge_tree(struct landlock_ruleset *const dst, struct landlock_ruleset *const src, const enum landlock_key_type key_type) { struct landlock_rule *walker_rule, *next_rule; struct rb_root *src_root; int err = 0; might_sleep(); lockdep_assert_held(&dst->lock); lockdep_assert_held(&src->lock); src_root = get_root(src, key_type); if (IS_ERR(src_root)) return PTR_ERR(src_root); /* Merges the @src tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root, node) { struct landlock_layer layers[] = { { .level = dst->num_layers, } }; const struct landlock_id id = { .key = walker_rule->key, .type = key_type, }; if (WARN_ON_ONCE(walker_rule->num_layers != 1)) return -EINVAL; if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) return -EINVAL; layers[0].access = walker_rule->layers[0].access; err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers)); if (err) return err; } return err; } static int merge_ruleset(struct landlock_ruleset *const dst, struct landlock_ruleset *const src) { int err = 0; might_sleep(); /* Should already be checked by landlock_merge_ruleset() */ if (WARN_ON_ONCE(!src)) return 0; /* Only merge into a domain. */ if (WARN_ON_ONCE(!dst || !dst->hierarchy)) return -EINVAL; /* Locks @dst first because we are its only owner. */ mutex_lock(&dst->lock); mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING); /* Stacks the new layer. */ if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) { err = -EINVAL; goto out_unlock; } dst->access_masks[dst->num_layers - 1] = landlock_upgrade_handled_access_masks(src->access_masks[0]); /* Merges the @src inode tree. */ err = merge_tree(dst, src, LANDLOCK_KEY_INODE); if (err) goto out_unlock; #if IS_ENABLED(CONFIG_INET) /* Merges the @src network port tree. */ err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT); if (err) goto out_unlock; #endif /* IS_ENABLED(CONFIG_INET) */ out_unlock: mutex_unlock(&src->lock); mutex_unlock(&dst->lock); return err; } static int inherit_tree(struct landlock_ruleset *const parent, struct landlock_ruleset *const child, const enum landlock_key_type key_type) { struct landlock_rule *walker_rule, *next_rule; struct rb_root *parent_root; int err = 0; might_sleep(); lockdep_assert_held(&parent->lock); lockdep_assert_held(&child->lock); parent_root = get_root(parent, key_type); if (IS_ERR(parent_root)) return PTR_ERR(parent_root); /* Copies the @parent inode or network tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, parent_root, node) { const struct landlock_id id = { .key = walker_rule->key, .type = key_type, }; err = insert_rule(child, id, &walker_rule->layers, walker_rule->num_layers); if (err) return err; } return err; } static int inherit_ruleset(struct landlock_ruleset *const parent, struct landlock_ruleset *const child) { int err = 0; might_sleep(); if (!parent) return 0; /* Locks @child first because we are its only owner. */ mutex_lock(&child->lock); mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); /* Copies the @parent inode tree. */ err = inherit_tree(parent, child, LANDLOCK_KEY_INODE); if (err) goto out_unlock; #if IS_ENABLED(CONFIG_INET) /* Copies the @parent network port tree. */ err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT); if (err) goto out_unlock; #endif /* IS_ENABLED(CONFIG_INET) */ if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { err = -EINVAL; goto out_unlock; } /* Copies the parent layer stack and leaves a space for the new layer. */ memcpy(child->access_masks, parent->access_masks, flex_array_size(parent, access_masks, parent->num_layers)); if (WARN_ON_ONCE(!parent->hierarchy)) { err = -EINVAL; goto out_unlock; } get_hierarchy(parent->hierarchy); child->hierarchy->parent = parent->hierarchy; out_unlock: mutex_unlock(&parent->lock); mutex_unlock(&child->lock); return err; } static void free_ruleset(struct landlock_ruleset *const ruleset) { struct landlock_rule *freeme, *next; might_sleep(); rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, node) free_rule(freeme, LANDLOCK_KEY_INODE); #if IS_ENABLED(CONFIG_INET) rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_net_port, node) free_rule(freeme, LANDLOCK_KEY_NET_PORT); #endif /* IS_ENABLED(CONFIG_INET) */ put_hierarchy(ruleset->hierarchy); kfree(ruleset); } void landlock_put_ruleset(struct landlock_ruleset *const ruleset) { might_sleep(); if (ruleset && refcount_dec_and_test(&ruleset->usage)) free_ruleset(ruleset); } static void free_ruleset_work(struct work_struct *const work) { struct landlock_ruleset *ruleset; ruleset = container_of(work, struct landlock_ruleset, work_free); free_ruleset(ruleset); } void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) { if (ruleset && refcount_dec_and_test(&ruleset->usage)) { INIT_WORK(&ruleset->work_free, free_ruleset_work); schedule_work(&ruleset->work_free); } } /** * landlock_merge_ruleset - Merge a ruleset with a domain * * @parent: Parent domain. * @ruleset: New ruleset to be merged. * * Returns the intersection of @parent and @ruleset, or returns @parent if * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. */ struct landlock_ruleset * landlock_merge_ruleset(struct landlock_ruleset *const parent, struct landlock_ruleset *const ruleset) { struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL; u32 num_layers; int err; might_sleep(); if (WARN_ON_ONCE(!ruleset || parent == ruleset)) return ERR_PTR(-EINVAL); if (parent) { if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS) return ERR_PTR(-E2BIG); num_layers = parent->num_layers + 1; } else { num_layers = 1; } /* Creates a new domain... */ new_dom = create_ruleset(num_layers); if (IS_ERR(new_dom)) return new_dom; new_dom->hierarchy = kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT); if (!new_dom->hierarchy) return ERR_PTR(-ENOMEM); refcount_set(&new_dom->hierarchy->usage, 1); /* ...as a child of @parent... */ err = inherit_ruleset(parent, new_dom); if (err) return ERR_PTR(err); /* ...and including @ruleset. */ err = merge_ruleset(new_dom, ruleset); if (err) return ERR_PTR(err); return no_free_ptr(new_dom); } /* * The returned access has the same lifetime as @ruleset. */ const struct landlock_rule * landlock_find_rule(const struct landlock_ruleset *const ruleset, const struct landlock_id id) { const struct rb_root *root; const struct rb_node *node; root = get_root((struct landlock_ruleset *)ruleset, id.type); if (IS_ERR(root)) return NULL; node = root->rb_node; while (node) { struct landlock_rule *this = rb_entry(node, struct landlock_rule, node); if (this->key.data == id.key.data) return this; if (this->key.data < id.key.data) node = node->rb_right; else node = node->rb_left; } return NULL; } /* * @layer_masks is read and may be updated according to the access request and * the matching rule. * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks). * * Returns true if the request is allowed (i.e. relevant layer masks for the * request are empty). */ bool landlock_unmask_layers(const struct landlock_rule *const rule, const access_mask_t access_request, layer_mask_t (*const layer_masks)[], const size_t masks_array_size) { size_t layer_level; if (!access_request || !layer_masks) return true; if (!rule) return false; /* * An access is granted if, for each policy layer, at least one rule * encountered on the pathwalk grants the requested access, * regardless of its position in the layer stack. We must then check * the remaining layers for each inode, from the first added layer to * the last one. When there is multiple requested accesses, for each * policy layer, the full set of requested accesses may not be granted * by only one rule, but by the union (binary OR) of multiple rules. * E.g. /a/b <execute> + /a <read> => /a/b <execute + read> */ for (layer_level = 0; layer_level < rule->num_layers; layer_level++) { const struct landlock_layer *const layer = &rule->layers[layer_level]; const layer_mask_t layer_bit = BIT_ULL(layer->level - 1); const unsigned long access_req = access_request; unsigned long access_bit; bool is_empty; /* * Records in @layer_masks which layer grants access to each * requested access. */ is_empty = true; for_each_set_bit(access_bit, &access_req, masks_array_size) { if (layer->access & BIT_ULL(access_bit)) (*layer_masks)[access_bit] &= ~layer_bit; is_empty = is_empty && !(*layer_masks)[access_bit]; } if (is_empty) return true; } return false; } typedef access_mask_t get_access_mask_t(const struct landlock_ruleset *const ruleset, const u16 layer_level); /** * landlock_init_layer_masks - Initialize layer masks from an access request * * Populates @layer_masks such that for each access right in @access_request, * the bits for all the layers are set where this access right is handled. * * @domain: The domain that defines the current restrictions. * @access_request: The requested access rights to check. * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type. * @key_type: The key type to switch between access masks of different types. * * Returns: An access mask where each access right bit is set which is handled * in any of the active layers in @domain. */ access_mask_t landlock_init_layer_masks(const struct landlock_ruleset *const domain, const access_mask_t access_request, layer_mask_t (*const layer_masks)[], const enum landlock_key_type key_type) { access_mask_t handled_accesses = 0; size_t layer_level, num_access; get_access_mask_t *get_access_mask; switch (key_type) { case LANDLOCK_KEY_INODE: get_access_mask = landlock_get_fs_access_mask; num_access = LANDLOCK_NUM_ACCESS_FS; break; #if IS_ENABLED(CONFIG_INET) case LANDLOCK_KEY_NET_PORT: get_access_mask = landlock_get_net_access_mask; num_access = LANDLOCK_NUM_ACCESS_NET; break; #endif /* IS_ENABLED(CONFIG_INET) */ default: WARN_ON_ONCE(1); return 0; } memset(layer_masks, 0, array_size(sizeof((*layer_masks)[0]), num_access)); /* An empty access request can happen because of O_WRONLY | O_RDWR. */ if (!access_request) return 0; /* Saves all handled accesses per layer. */ for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { const unsigned long access_req = access_request; const access_mask_t access_mask = get_access_mask(domain, layer_level); unsigned long access_bit; for_each_set_bit(access_bit, &access_req, num_access) { if (BIT_ULL(access_bit) & access_mask) { (*layer_masks)[access_bit] |= BIT_ULL(layer_level); handled_accesses |= BIT_ULL(access_bit); } } } return handled_accesses; }
18 18 18 18 15 18 3 15 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 // SPDX-License-Identifier: GPL-2.0-or-later /* * Randomness driver for virtio * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation */ #include <asm/barrier.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_rng.h> #include <linux/module.h> #include <linux/slab.h> static DEFINE_IDA(rng_index_ida); struct virtrng_info { struct hwrng hwrng; struct virtqueue *vq; char name[25]; int index; bool hwrng_register_done; bool hwrng_removed; /* data transfer */ struct completion have_data; unsigned int data_avail; unsigned int data_idx; /* minimal size returned by rng_buffer_size() */ #if SMP_CACHE_BYTES < 32 u8 data[32]; #else u8 data[SMP_CACHE_BYTES]; #endif }; static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; unsigned int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!virtqueue_get_buf(vi->vq, &len)) return; smp_store_release(&vi->data_avail, len); complete(&vi->have_data); } static void request_entropy(struct virtrng_info *vi) { struct scatterlist sg; reinit_completion(&vi->have_data); vi->data_idx = 0; sg_init_one(&sg, vi->data, sizeof(vi->data)); /* There should always be room for one buffer. */ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL); virtqueue_kick(vi->vq); } static unsigned int copy_data(struct virtrng_info *vi, void *buf, unsigned int size) { size = min_t(unsigned int, size, vi->data_avail); memcpy(buf, vi->data + vi->data_idx, size); vi->data_idx += size; vi->data_avail -= size; if (vi->data_avail == 0) request_entropy(vi); return size; } static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; unsigned int chunk; size_t read; if (vi->hwrng_removed) return -ENODEV; read = 0; /* copy available data */ if (smp_load_acquire(&vi->data_avail)) { chunk = copy_data(vi, buf, size); size -= chunk; read += chunk; } if (!wait) return read; /* We have already copied available entropy, * so either size is 0 or data_avail is 0 */ while (size != 0) { /* data_avail is 0 but a request is pending */ ret = wait_for_completion_killable(&vi->have_data); if (ret < 0) return ret; /* if vi->data_avail is 0, we have been interrupted * by a cleanup, but buffer stays in the queue */ if (vi->data_avail == 0) return read; chunk = copy_data(vi, buf + read, size); size -= chunk; read += chunk; } return read; } static void virtio_cleanup(struct hwrng *rng) { struct virtrng_info *vi = (struct virtrng_info *)rng->priv; complete(&vi->have_data); } static int probe_common(struct virtio_device *vdev) { int err, index; struct virtrng_info *vi = NULL; vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL); if (!vi) return -ENOMEM; vi->index = index = ida_alloc(&rng_index_ida, GFP_KERNEL); if (index < 0) { err = index; goto err_ida; } sprintf(vi->name, "virtio_rng.%d", index); init_completion(&vi->have_data); vi->hwrng = (struct hwrng) { .read = virtio_read, .cleanup = virtio_cleanup, .priv = (unsigned long)vi, .name = vi->name, }; vdev->priv = vi; /* We expect a single virtqueue. */ vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input"); if (IS_ERR(vi->vq)) { err = PTR_ERR(vi->vq); goto err_find; } virtio_device_ready(vdev); /* we always have a pending entropy request */ request_entropy(vi); return 0; err_find: ida_free(&rng_index_ida, index); err_ida: kfree(vi); return err; } static void remove_common(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; vi->hwrng_removed = true; vi->data_avail = 0; vi->data_idx = 0; complete(&vi->have_data); if (vi->hwrng_register_done) hwrng_unregister(&vi->hwrng); virtio_reset_device(vdev); vdev->config->del_vqs(vdev); ida_free(&rng_index_ida, vi->index); kfree(vi); } static int virtrng_probe(struct virtio_device *vdev) { return probe_common(vdev); } static void virtrng_remove(struct virtio_device *vdev) { remove_common(vdev); } static void virtrng_scan(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; int err; err = hwrng_register(&vi->hwrng); if (!err) vi->hwrng_register_done = true; } static int virtrng_freeze(struct virtio_device *vdev) { remove_common(vdev); return 0; } static int virtrng_restore(struct virtio_device *vdev) { int err; err = probe_common(vdev); if (!err) { struct virtrng_info *vi = vdev->priv; /* * Set hwrng_removed to ensure that virtio_read() * does not block waiting for data before the * registration is complete. */ vi->hwrng_removed = true; err = hwrng_register(&vi->hwrng); if (!err) { vi->hwrng_register_done = true; vi->hwrng_removed = false; } } return err; } static const struct virtio_device_id id_table[] = { { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, { 0 }, }; static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, .scan = virtrng_scan, .freeze = pm_sleep_ptr(virtrng_freeze), .restore = pm_sleep_ptr(virtrng_restore), }; module_virtio_driver(virtio_rng_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio random number driver"); MODULE_LICENSE("GPL");
104 1 2 13 13 2 2 31 30 31 32 2 31 31 29 3 10 27 31 31 31 34 12 37 22 30 22 8 22 1 20 34 1 45 37 36 36 2 10 2 49 45 13 34 34 109 77 27 5 104 103 103 1 10 3 3 13 32 51 89 16 600 601 595 44 35 10 39 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 // SPDX-License-Identifier: GPL-2.0-or-later /* Request a key from userspace * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * See Documentation/security/keys/request-key.rst */ #include <linux/export.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/keyctl.h> #include <linux/slab.h> #include <net/net_namespace.h> #include "internal.h" #include <keys/request_key_auth-type.h> #define key_negative_timeout 60 /* default timeout on a negative key's existence */ static struct key *check_cached_key(struct keyring_search_context *ctx) { #ifdef CONFIG_KEYS_REQUEST_CACHE struct key *key = current->cached_requested_key; if (key && ctx->match_data.cmp(key, &ctx->match_data) && !(key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED)))) return key_get(key); #endif return NULL; } static void cache_requested_key(struct key *key) { #ifdef CONFIG_KEYS_REQUEST_CACHE struct task_struct *t = current; /* Do not cache key if it is a kernel thread */ if (!(t->flags & PF_KTHREAD)) { key_put(t->cached_requested_key); t->cached_requested_key = key_get(key); set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); } #endif } /** * complete_request_key - Complete the construction of a key. * @authkey: The authorisation key. * @error: The success or failute of the construction. * * Complete the attempt to construct a key. The key will be negated * if an error is indicated. The authorisation key will be revoked * unconditionally. */ void complete_request_key(struct key *authkey, int error) { struct request_key_auth *rka = get_request_key_auth(authkey); struct key *key = rka->target_key; kenter("%d{%d},%d", authkey->serial, key->serial, error); if (error < 0) key_negate_and_link(key, key_negative_timeout, NULL, authkey); else key_revoke(authkey); } EXPORT_SYMBOL(complete_request_key); /* * Initialise a usermode helper that is going to have a specific session * keyring. * * This is called in context of freshly forked kthread before kernel_execve(), * so we can simply install the desired session_keyring at this point. */ static int umh_keys_init(struct subprocess_info *info, struct cred *cred) { struct key *keyring = info->data; return install_session_keyring_to_cred(cred, keyring); } /* * Clean up a usermode helper with session keyring. */ static void umh_keys_cleanup(struct subprocess_info *info) { struct key *keyring = info->data; key_put(keyring); } /* * Call a usermode helper with a specific session keyring. */ static int call_usermodehelper_keys(const char *path, char **argv, char **envp, struct key *session_keyring, int wait) { struct subprocess_info *info; info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL, umh_keys_init, umh_keys_cleanup, session_keyring); if (!info) return -ENOMEM; key_get(session_keyring); return call_usermodehelper_exec(info, wait); } /* * Request userspace finish the construction of a key * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" */ static int call_sbin_request_key(struct key *authkey, void *aux) { static char const request_key[] = "/sbin/request-key"; struct request_key_auth *rka = get_request_key_auth(authkey); const struct cred *cred = current_cred(); key_serial_t prkey, sskey; struct key *key = rka->target_key, *keyring, *session, *user_session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op); ret = look_up_user_keyrings(NULL, &user_session); if (ret < 0) goto error_us; /* allocate a new session keyring */ sprintf(desc, "_req.%u", key->serial); cred = get_current_cred(); keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL); put_cred(cred); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error_alloc; } /* attach the auth key to the session keyring */ ret = key_link(keyring, authkey); if (ret < 0) goto error_link; /* record the UID and GID */ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; if (cred->process_keyring) prkey = cred->process_keyring->serial; sprintf(keyring_str[1], "%d", prkey); session = cred->session_keyring; if (!session) session = user_session; sskey = session->serial; sprintf(keyring_str[2], "%d", sskey); /* set up a minimal environment */ i = 0; envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[i] = NULL; /* set up the argument list */ i = 0; argv[i++] = (char *)request_key; argv[i++] = (char *)rka->op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; argv[i++] = keyring_str[0]; argv[i++] = keyring_str[1]; argv[i++] = keyring_str[2]; argv[i] = NULL; /* do it */ ret = call_usermodehelper_keys(request_key, argv, envp, keyring, UMH_WAIT_PROC); kdebug("usermode -> 0x%x", ret); if (ret >= 0) { /* ret is the exit/wait code */ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || key_validate(key) < 0) ret = -ENOKEY; else /* ignore any errors from userspace if the key was * instantiated */ ret = 0; } error_link: key_put(keyring); error_alloc: key_put(user_session); error_us: complete_request_key(authkey, ret); kleave(" = %d", ret); return ret; } /* * Call out to userspace for key construction. * * Program failure is ignored in favour of key status. */ static int construct_key(struct key *key, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring) { request_key_actor_t actor; struct key *authkey; int ret; kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); /* allocate an authorisation key */ authkey = request_key_auth_new(key, "create", callout_info, callout_len, dest_keyring); if (IS_ERR(authkey)) return PTR_ERR(authkey); /* Make the call */ actor = call_sbin_request_key; if (key->type->request_key) actor = key->type->request_key; ret = actor(authkey, aux); /* check that the actor called complete_request_key() prior to * returning an error */ WARN_ON(ret < 0 && !test_bit(KEY_FLAG_INVALIDATED, &authkey->flags)); key_put(authkey); kleave(" = %d", ret); return ret; } /* * Get the appropriate destination keyring for the request. * * The keyring selected is returned with an extra reference upon it which the * caller must release. */ static int construct_get_dest_keyring(struct key **_dest_keyring) { struct request_key_auth *rka; const struct cred *cred = current_cred(); struct key *dest_keyring = *_dest_keyring, *authkey; int ret; kenter("%p", dest_keyring); /* find the appropriate keyring */ if (dest_keyring) { /* the caller supplied one */ key_get(dest_keyring); } else { bool do_perm_check = true; /* use a default keyring; falling through the cases until we * find one that we actually have */ switch (cred->jit_keyring) { case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: if (cred->request_key_auth) { authkey = cred->request_key_auth; down_read(&authkey->sem); rka = get_request_key_auth(authkey); if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags)) dest_keyring = key_get(rka->dest_keyring); up_read(&authkey->sem); if (dest_keyring) { do_perm_check = false; break; } } fallthrough; case KEY_REQKEY_DEFL_THREAD_KEYRING: dest_keyring = key_get(cred->thread_keyring); if (dest_keyring) break; fallthrough; case KEY_REQKEY_DEFL_PROCESS_KEYRING: dest_keyring = key_get(cred->process_keyring); if (dest_keyring) break; fallthrough; case KEY_REQKEY_DEFL_SESSION_KEYRING: dest_keyring = key_get(cred->session_keyring); if (dest_keyring) break; fallthrough; case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: ret = look_up_user_keyrings(NULL, &dest_keyring); if (ret < 0) return ret; break; case KEY_REQKEY_DEFL_USER_KEYRING: ret = look_up_user_keyrings(&dest_keyring, NULL); if (ret < 0) return ret; break; case KEY_REQKEY_DEFL_GROUP_KEYRING: default: BUG(); } /* * Require Write permission on the keyring. This is essential * because the default keyring may be the session keyring, and * joining a keyring only requires Search permission. * * However, this check is skipped for the "requestor keyring" so * that /sbin/request-key can itself use request_key() to add * keys to the original requestor's destination keyring. */ if (dest_keyring && do_perm_check) { ret = key_permission(make_key_ref(dest_keyring, 1), KEY_NEED_WRITE); if (ret) { key_put(dest_keyring); return ret; } } } *_dest_keyring = dest_keyring; kleave(" [dk %d]", key_serial(dest_keyring)); return 0; } /* * Allocate a new key in under-construction state and attempt to link it in to * the requested keyring. * * May return a key that's already under construction instead if there was a * race between two thread calling request_key(). */ static int construct_alloc_key(struct keyring_search_context *ctx, struct key *dest_keyring, unsigned long flags, struct key_user *user, struct key **_key) { struct assoc_array_edit *edit = NULL; struct key *key; key_perm_t perm; key_ref_t key_ref; int ret; kenter("%s,%s,,,", ctx->index_key.type->name, ctx->index_key.description); *_key = NULL; mutex_lock(&user->cons_lock); perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (ctx->index_key.type->read) perm |= KEY_POS_READ; if (ctx->index_key.type == &key_type_keyring || ctx->index_key.type->update) perm |= KEY_POS_WRITE; key = key_alloc(ctx->index_key.type, ctx->index_key.description, ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred, perm, flags, NULL); if (IS_ERR(key)) goto alloc_failed; set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); if (dest_keyring) { ret = __key_link_lock(dest_keyring, &key->index_key); if (ret < 0) goto link_lock_failed; } /* * Attach the key to the destination keyring under lock, but we do need * to do another check just in case someone beat us to it whilst we * waited for locks. * * The caller might specify a comparison function which looks for keys * that do not exactly match but are still equivalent from the caller's * perspective. The __key_link_begin() operation must be done only after * an actual key is determined. */ mutex_lock(&key_construction_mutex); rcu_read_lock(); key_ref = search_process_keyrings_rcu(ctx); rcu_read_unlock(); if (!IS_ERR(key_ref)) goto key_already_present; if (dest_keyring) { ret = __key_link_begin(dest_keyring, &key->index_key, &edit); if (ret < 0) goto link_alloc_failed; __key_link(dest_keyring, key, &edit); } mutex_unlock(&key_construction_mutex); if (dest_keyring) __key_link_end(dest_keyring, &key->index_key, edit); mutex_unlock(&user->cons_lock); *_key = key; kleave(" = 0 [%d]", key_serial(key)); return 0; /* the key is now present - we tell the caller that we found it by * returning -EINPROGRESS */ key_already_present: key_put(key); mutex_unlock(&key_construction_mutex); key = key_ref_to_ptr(key_ref); if (dest_keyring) { ret = __key_link_begin(dest_keyring, &key->index_key, &edit); if (ret < 0) goto link_alloc_failed_unlocked; ret = __key_link_check_live_key(dest_keyring, key); if (ret == 0) __key_link(dest_keyring, key, &edit); __key_link_end(dest_keyring, &key->index_key, edit); if (ret < 0) goto link_check_failed; } mutex_unlock(&user->cons_lock); *_key = key; kleave(" = -EINPROGRESS [%d]", key_serial(key)); return -EINPROGRESS; link_check_failed: mutex_unlock(&user->cons_lock); key_put(key); kleave(" = %d [linkcheck]", ret); return ret; link_alloc_failed: mutex_unlock(&key_construction_mutex); link_alloc_failed_unlocked: __key_link_end(dest_keyring, &key->index_key, edit); link_lock_failed: mutex_unlock(&user->cons_lock); key_put(key); kleave(" = %d [prelink]", ret); return ret; alloc_failed: mutex_unlock(&user->cons_lock); kleave(" = %ld", PTR_ERR(key)); return PTR_ERR(key); } /* * Commence key construction. */ static struct key *construct_key_and_link(struct keyring_search_context *ctx, const char *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct key_user *user; struct key *key; int ret; kenter(""); if (ctx->index_key.type == &key_type_keyring) return ERR_PTR(-EPERM); ret = construct_get_dest_keyring(&dest_keyring); if (ret) goto error; user = key_user_lookup(current_fsuid()); if (!user) { ret = -ENOMEM; goto error_put_dest_keyring; } ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); key_user_put(user); if (ret == 0) { ret = construct_key(key, callout_info, callout_len, aux, dest_keyring); if (ret < 0) { kdebug("cons failed"); goto construction_failed; } } else if (ret == -EINPROGRESS) { ret = 0; } else { goto error_put_dest_keyring; } key_put(dest_keyring); kleave(" = key %d", key_serial(key)); return key; construction_failed: key_negate_and_link(key, key_negative_timeout, NULL, NULL); key_put(key); error_put_dest_keyring: key_put(dest_keyring); error: kleave(" = %d", ret); return ERR_PTR(ret); } /** * request_key_and_link - Request a key and cache it in a keyring. * @type: The type of key we want. * @description: The searchable description of the key. * @domain_tag: The domain in which the key operates. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * @dest_keyring: Where to cache the key. * @flags: Flags to key_alloc(). * * A key matching the specified criteria (type, description, domain_tag) is * searched for in the process's keyrings and returned with its usage count * incremented if found. Otherwise, if callout_info is not NULL, a key will be * allocated and some service (probably in userspace) will be asked to * instantiate it. * * If successfully found or created, the key will be linked to the destination * keyring if one is provided. * * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT * if insufficient key quota was available to create a new key; or -ENOMEM if * insufficient memory was available. * * If the returned key was created, then it may still be under construction, * and wait_for_key_construction() should be used to wait for that to complete. */ struct key *request_key_and_link(struct key_type *type, const char *description, struct key_tag *domain_tag, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.domain_tag = domain_tag, .index_key.description = description, .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = (KEYRING_SEARCH_DO_STATE_CHECK | KEYRING_SEARCH_SKIP_EXPIRED | KEYRING_SEARCH_RECURSE), }; struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", ctx.index_key.type->name, ctx.index_key.description, callout_info, callout_len, aux, dest_keyring, flags); if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) { key = ERR_PTR(ret); goto error; } } key = check_cached_key(&ctx); if (key) goto error_free; /* search all the process keyrings for a key */ rcu_read_lock(); key_ref = search_process_keyrings_rcu(&ctx); rcu_read_unlock(); if (!IS_ERR(key_ref)) { if (dest_keyring) { ret = key_task_permission(key_ref, current_cred(), KEY_NEED_LINK); if (ret < 0) { key_ref_put(key_ref); key = ERR_PTR(ret); goto error_free; } } key = key_ref_to_ptr(key_ref); if (dest_keyring) { ret = key_link(dest_keyring, key); if (ret < 0) { key_put(key); key = ERR_PTR(ret); goto error_free; } } /* Only cache the key on immediate success */ cache_requested_key(key); } else if (PTR_ERR(key_ref) != -EAGAIN) { key = ERR_CAST(key_ref); } else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); if (!callout_info) goto error_free; key = construct_key_and_link(&ctx, callout_info, callout_len, aux, dest_keyring, flags); } error_free: if (type->match_free) type->match_free(&ctx.match_data); error: kleave(" = %p", key); return key; } /** * wait_for_key_construction - Wait for construction of a key to complete * @key: The key being waited for. * @intr: Whether to wait interruptibly. * * Wait for a key to finish being constructed. * * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was * revoked or expired. */ int wait_for_key_construction(struct key *key, bool intr) { int ret; ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret) return -ERESTARTSYS; ret = key_read_state(key); if (ret < 0) return ret; return key_validate(key); } EXPORT_SYMBOL(wait_for_key_construction); /** * request_key_tag - Request a key and wait for construction * @type: Type of key. * @description: The searchable description of the key. * @domain_tag: The domain in which the key operates. * @callout_info: The data to pass to the instantiation upcall (or NULL). * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found, new keys are always allocated in the user's quota, * the callout_info must be a NUL-terminated string and no auxiliary data can * be passed. * * Furthermore, it then works as wait_for_key_construction() to wait for the * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key_tag(struct key_type *type, const char *description, struct key_tag *domain_tag, const char *callout_info) { struct key *key; size_t callout_len = 0; int ret; if (callout_info) callout_len = strlen(callout_info); key = request_key_and_link(type, description, domain_tag, callout_info, callout_len, NULL, NULL, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key)) { ret = wait_for_key_construction(key, false); if (ret < 0) { key_put(key); return ERR_PTR(ret); } } return key; } EXPORT_SYMBOL(request_key_tag); /** * request_key_with_auxdata - Request a key with auxiliary data for the upcaller * @type: The type of key we want. * @description: The searchable description of the key. * @domain_tag: The domain in which the key operates. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found and new keys are always allocated in the user's quota. * * Furthermore, it then works as wait_for_key_construction() to wait for the * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key_with_auxdata(struct key_type *type, const char *description, struct key_tag *domain_tag, const void *callout_info, size_t callout_len, void *aux) { struct key *key; int ret; key = request_key_and_link(type, description, domain_tag, callout_info, callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key)) { ret = wait_for_key_construction(key, false); if (ret < 0) { key_put(key); return ERR_PTR(ret); } } return key; } EXPORT_SYMBOL(request_key_with_auxdata); /** * request_key_rcu - Request key from RCU-read-locked context * @type: The type of key we want. * @description: The name of the key we want. * @domain_tag: The domain in which the key operates. * * Request a key from a context that we may not sleep in (such as RCU-mode * pathwalk). Keys under construction are ignored. * * Return a pointer to the found key if successful, -ENOKEY if we couldn't find * a key or some other error if the key found was unsuitable or inaccessible. */ struct key *request_key_rcu(struct key_type *type, const char *description, struct key_tag *domain_tag) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.domain_tag = domain_tag, .index_key.description = description, .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = (KEYRING_SEARCH_DO_STATE_CHECK | KEYRING_SEARCH_SKIP_EXPIRED), }; struct key *key; key_ref_t key_ref; kenter("%s,%s", type->name, description); key = check_cached_key(&ctx); if (key) return key; /* search all the process keyrings for a key */ key_ref = search_process_keyrings_rcu(&ctx); if (IS_ERR(key_ref)) { key = ERR_CAST(key_ref); if (PTR_ERR(key_ref) == -EAGAIN) key = ERR_PTR(-ENOKEY); } else { key = key_ref_to_ptr(key_ref); cache_requested_key(key); } kleave(" = %p", key); return key; } EXPORT_SYMBOL(request_key_rcu);
2 2 421 2 2 2 8 1 1 2 1 1 2 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 // SPDX-License-Identifier: GPL-2.0 #include "io_uring.h" #include "napi.h" #ifdef CONFIG_NET_RX_BUSY_POLL /* Timeout for cleanout of stale entries. */ #define NAPI_TIMEOUT (60 * SEC_CONVERSION) struct io_napi_entry { unsigned int napi_id; struct list_head list; unsigned long timeout; struct hlist_node node; struct rcu_head rcu; }; static struct io_napi_entry *io_napi_hash_find(struct hlist_head *hash_list, unsigned int napi_id) { struct io_napi_entry *e; hlist_for_each_entry_rcu(e, hash_list, node) { if (e->napi_id != napi_id) continue; return e; } return NULL; } static inline ktime_t net_to_ktime(unsigned long t) { /* napi approximating usecs, reverse busy_loop_current_time */ return ns_to_ktime(t << 10); } int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id) { struct hlist_head *hash_list; struct io_napi_entry *e; /* Non-NAPI IDs can be rejected. */ if (napi_id < MIN_NAPI_ID) return -EINVAL; hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))]; scoped_guard(rcu) { e = io_napi_hash_find(hash_list, napi_id); if (e) { WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT); return -EEXIST; } } e = kmalloc(sizeof(*e), GFP_NOWAIT); if (!e) return -ENOMEM; e->napi_id = napi_id; e->timeout = jiffies + NAPI_TIMEOUT; /* * guard(spinlock) is not used to manually unlock it before calling * kfree() */ spin_lock(&ctx->napi_lock); if (unlikely(io_napi_hash_find(hash_list, napi_id))) { spin_unlock(&ctx->napi_lock); kfree(e); return -EEXIST; } hlist_add_tail_rcu(&e->node, hash_list); list_add_tail_rcu(&e->list, &ctx->napi_list); spin_unlock(&ctx->napi_lock); return 0; } static int __io_napi_del_id(struct io_ring_ctx *ctx, unsigned int napi_id) { struct hlist_head *hash_list; struct io_napi_entry *e; /* Non-NAPI IDs can be rejected. */ if (napi_id < MIN_NAPI_ID) return -EINVAL; hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))]; guard(spinlock)(&ctx->napi_lock); e = io_napi_hash_find(hash_list, napi_id); if (!e) return -ENOENT; list_del_rcu(&e->list); hash_del_rcu(&e->node); kfree_rcu(e, rcu); return 0; } static void __io_napi_remove_stale(struct io_ring_ctx *ctx) { struct io_napi_entry *e; guard(spinlock)(&ctx->napi_lock); /* * list_for_each_entry_safe() is not required as long as: * 1. list_del_rcu() does not reset the deleted node next pointer * 2. kfree_rcu() delays the memory freeing until the next quiescent * state */ list_for_each_entry(e, &ctx->napi_list, list) { if (time_after(jiffies, READ_ONCE(e->timeout))) { list_del_rcu(&e->list); hash_del_rcu(&e->node); kfree_rcu(e, rcu); } } } static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale) { if (is_stale) __io_napi_remove_stale(ctx); } static inline bool io_napi_busy_loop_timeout(ktime_t start_time, ktime_t bp) { if (bp) { ktime_t end_time = ktime_add(start_time, bp); ktime_t now = net_to_ktime(busy_loop_current_time()); return ktime_after(now, end_time); } return true; } static bool io_napi_busy_loop_should_end(void *data, unsigned long start_time) { struct io_wait_queue *iowq = data; if (signal_pending(current)) return true; if (io_should_wake(iowq) || io_has_work(iowq->ctx)) return true; if (io_napi_busy_loop_timeout(net_to_ktime(start_time), iowq->napi_busy_poll_dt)) return true; return false; } /* * never report stale entries */ static bool static_tracking_do_busy_loop(struct io_ring_ctx *ctx, bool (*loop_end)(void *, unsigned long), void *loop_end_arg) { struct io_napi_entry *e; list_for_each_entry_rcu(e, &ctx->napi_list, list) napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg, ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET); return false; } static bool dynamic_tracking_do_busy_loop(struct io_ring_ctx *ctx, bool (*loop_end)(void *, unsigned long), void *loop_end_arg) { struct io_napi_entry *e; bool is_stale = false; list_for_each_entry_rcu(e, &ctx->napi_list, list) { napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg, ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET); if (time_after(jiffies, READ_ONCE(e->timeout))) is_stale = true; } return is_stale; } static inline bool __io_napi_do_busy_loop(struct io_ring_ctx *ctx, bool (*loop_end)(void *, unsigned long), void *loop_end_arg) { if (READ_ONCE(ctx->napi_track_mode) == IO_URING_NAPI_TRACKING_STATIC) return static_tracking_do_busy_loop(ctx, loop_end, loop_end_arg); return dynamic_tracking_do_busy_loop(ctx, loop_end, loop_end_arg); } static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq) { unsigned long start_time = busy_loop_current_time(); bool (*loop_end)(void *, unsigned long) = NULL; void *loop_end_arg = NULL; bool is_stale = false; /* Singular lists use a different napi loop end check function and are * only executed once. */ if (list_is_singular(&ctx->napi_list)) { loop_end = io_napi_busy_loop_should_end; loop_end_arg = iowq; } scoped_guard(rcu) { do { is_stale = __io_napi_do_busy_loop(ctx, loop_end, loop_end_arg); } while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg); } io_napi_remove_stale(ctx, is_stale); } /* * io_napi_init() - Init napi settings * @ctx: pointer to io-uring context structure * * Init napi settings in the io-uring context. */ void io_napi_init(struct io_ring_ctx *ctx) { u64 sys_dt = READ_ONCE(sysctl_net_busy_poll) * NSEC_PER_USEC; INIT_LIST_HEAD(&ctx->napi_list); spin_lock_init(&ctx->napi_lock); ctx->napi_prefer_busy_poll = false; ctx->napi_busy_poll_dt = ns_to_ktime(sys_dt); ctx->napi_track_mode = IO_URING_NAPI_TRACKING_INACTIVE; } /* * io_napi_free() - Deallocate napi * @ctx: pointer to io-uring context structure * * Free the napi list and the hash table in the io-uring context. */ void io_napi_free(struct io_ring_ctx *ctx) { struct io_napi_entry *e; guard(spinlock)(&ctx->napi_lock); list_for_each_entry(e, &ctx->napi_list, list) { hash_del_rcu(&e->node); kfree_rcu(e, rcu); } INIT_LIST_HEAD_RCU(&ctx->napi_list); } static int io_napi_register_napi(struct io_ring_ctx *ctx, struct io_uring_napi *napi) { switch (napi->op_param) { case IO_URING_NAPI_TRACKING_DYNAMIC: case IO_URING_NAPI_TRACKING_STATIC: break; default: return -EINVAL; } /* clean the napi list for new settings */ io_napi_free(ctx); WRITE_ONCE(ctx->napi_track_mode, napi->op_param); WRITE_ONCE(ctx->napi_busy_poll_dt, napi->busy_poll_to * NSEC_PER_USEC); WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi->prefer_busy_poll); return 0; } /* * io_napi_register() - Register napi with io-uring * @ctx: pointer to io-uring context structure * @arg: pointer to io_uring_napi structure * * Register napi in the io-uring context. */ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg) { const struct io_uring_napi curr = { .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt), .prefer_busy_poll = ctx->napi_prefer_busy_poll, .op_param = ctx->napi_track_mode }; struct io_uring_napi napi; if (ctx->flags & IORING_SETUP_IOPOLL) return -EINVAL; if (copy_from_user(&napi, arg, sizeof(napi))) return -EFAULT; if (napi.pad[0] || napi.pad[1] || napi.resv) return -EINVAL; if (copy_to_user(arg, &curr, sizeof(curr))) return -EFAULT; switch (napi.opcode) { case IO_URING_NAPI_REGISTER_OP: return io_napi_register_napi(ctx, &napi); case IO_URING_NAPI_STATIC_ADD_ID: if (curr.op_param != IO_URING_NAPI_TRACKING_STATIC) return -EINVAL; return __io_napi_add_id(ctx, napi.op_param); case IO_URING_NAPI_STATIC_DEL_ID: if (curr.op_param != IO_URING_NAPI_TRACKING_STATIC) return -EINVAL; return __io_napi_del_id(ctx, napi.op_param); default: return -EINVAL; } } /* * io_napi_unregister() - Unregister napi with io-uring * @ctx: pointer to io-uring context structure * @arg: pointer to io_uring_napi structure * * Unregister napi. If arg has been specified copy the busy poll timeout and * prefer busy poll setting to the passed in structure. */ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg) { const struct io_uring_napi curr = { .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt), .prefer_busy_poll = ctx->napi_prefer_busy_poll }; if (arg && copy_to_user(arg, &curr, sizeof(curr))) return -EFAULT; WRITE_ONCE(ctx->napi_busy_poll_dt, 0); WRITE_ONCE(ctx->napi_prefer_busy_poll, false); WRITE_ONCE(ctx->napi_track_mode, IO_URING_NAPI_TRACKING_INACTIVE); return 0; } /* * __io_napi_busy_loop() - execute busy poll loop * @ctx: pointer to io-uring context structure * @iowq: pointer to io wait queue * * Execute the busy poll loop and merge the spliced off list. */ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq) { if (ctx->flags & IORING_SETUP_SQPOLL) return; iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt); if (iowq->timeout != KTIME_MAX) { ktime_t dt = ktime_sub(iowq->timeout, io_get_time(ctx)); iowq->napi_busy_poll_dt = min_t(u64, iowq->napi_busy_poll_dt, dt); } iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll); io_napi_blocking_busy_loop(ctx, iowq); } /* * io_napi_sqpoll_busy_poll() - busy poll loop for sqpoll * @ctx: pointer to io-uring context structure * * Splice of the napi list and execute the napi busy poll loop. */ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx) { bool is_stale = false; if (!READ_ONCE(ctx->napi_busy_poll_dt)) return 0; if (list_empty_careful(&ctx->napi_list)) return 0; scoped_guard(rcu) { is_stale = __io_napi_do_busy_loop(ctx, NULL, NULL); } io_napi_remove_stale(ctx, is_stale); return 1; } #endif
3 3 3 3 3 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright 2004-2011 Red Hat, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/dlm.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/gfs2_ondisk.h> #include <linux/sched/signal.h> #include "incore.h" #include "glock.h" #include "glops.h" #include "recovery.h" #include "util.h" #include "sys.h" #include "trace_gfs2.h" /** * gfs2_update_stats - Update time based stats * @s: The stats to update (local or global) * @index: The index inside @s * @sample: New data to include */ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, s64 sample) { /* * @delta is the difference between the current rtt sample and the * running average srtt. We add 1/8 of that to the srtt in order to * update the current srtt estimate. The variance estimate is a bit * more complicated. We subtract the current variance estimate from * the abs value of the @delta and add 1/4 of that to the running * total. That's equivalent to 3/4 of the current variance * estimate plus 1/4 of the abs of @delta. * * Note that the index points at the array entry containing the * smoothed mean value, and the variance is always in the following * entry * * Reference: TCP/IP Illustrated, vol 2, p. 831,832 * All times are in units of integer nanoseconds. Unlike the TCP/IP * case, they are not scaled fixed point. */ s64 delta = sample - s->stats[index]; s->stats[index] += (delta >> 3); index++; s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; } /** * gfs2_update_reply_times - Update locking statistics * @gl: The glock to update * * This assumes that gl->gl_dstamp has been set earlier. * * The rtt (lock round trip time) is an estimate of the time * taken to perform a dlm lock request. We update it on each * reply from the dlm. * * The blocking flag is set on the glock for all dlm requests * which may potentially block due to lock requests from other nodes. * DLM requests where the current lock state is exclusive, the * requested state is null (or unlocked) or where the TRY or * TRY_1CB flags are set are classified as non-blocking. All * other DLM requests are counted as (potentially) blocking. */ static inline void gfs2_update_reply_times(struct gfs2_glock *gl) { struct gfs2_pcpu_lkstats *lks; const unsigned gltype = gl->gl_name.ln_type; unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT; s64 rtt; preempt_disable(); rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ preempt_enable(); trace_gfs2_glock_lock_time(gl, rtt); } /** * gfs2_update_request_times - Update locking statistics * @gl: The glock to update * * The irt (lock inter-request times) measures the average time * between requests to the dlm. It is updated immediately before * each dlm call. */ static inline void gfs2_update_request_times(struct gfs2_glock *gl) { struct gfs2_pcpu_lkstats *lks; const unsigned gltype = gl->gl_name.ln_type; ktime_t dstamp; s64 irt; preempt_disable(); dstamp = gl->gl_dstamp; gl->gl_dstamp = ktime_get_real(); irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ preempt_enable(); } static void gdlm_ast(void *arg) { struct gfs2_glock *gl = arg; unsigned ret = gl->gl_state; /* If the glock is dead, we only react to a dlm_unlock() reply. */ if (__lockref_is_dead(&gl->gl_lockref) && gl->gl_lksb.sb_status != -DLM_EUNLOCK) return; gfs2_update_reply_times(gl); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); switch (gl->gl_lksb.sb_status) { case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ if (gl->gl_ops->go_unlocked) gl->gl_ops->go_unlocked(gl); gfs2_glock_free(gl); return; case -DLM_ECANCEL: /* Cancel while getting lock */ ret |= LM_OUT_CANCELED; goto out; case -EAGAIN: /* Try lock fails */ case -EDEADLK: /* Deadlock detected */ goto out; case -ETIMEDOUT: /* Canceled due to timeout */ ret |= LM_OUT_ERROR; goto out; case 0: /* Success */ break; default: /* Something unexpected */ BUG(); } ret = gl->gl_req; if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { if (gl->gl_req == LM_ST_SHARED) ret = LM_ST_DEFERRED; else if (gl->gl_req == LM_ST_DEFERRED) ret = LM_ST_SHARED; else BUG(); } /* * The GLF_INITIAL flag is initially set for new glocks. Upon the * first successful new (non-conversion) request, we clear this flag to * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the * identifier to use for identifying it. * * Any failed initial requests do not create a DLM lock, so we ignore * the gl->gl_lksb.sb_lkid values that come with such requests. */ clear_bit(GLF_INITIAL, &gl->gl_flags); gfs2_glock_complete(gl, ret); return; out: if (test_bit(GLF_INITIAL, &gl->gl_flags)) gl->gl_lksb.sb_lkid = 0; gfs2_glock_complete(gl, ret); } static void gdlm_bast(void *arg, int mode) { struct gfs2_glock *gl = arg; if (__lockref_is_dead(&gl->gl_lockref)) return; switch (mode) { case DLM_LOCK_EX: gfs2_glock_cb(gl, LM_ST_UNLOCKED); break; case DLM_LOCK_CW: gfs2_glock_cb(gl, LM_ST_DEFERRED); break; case DLM_LOCK_PR: gfs2_glock_cb(gl, LM_ST_SHARED); break; default: fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); BUG(); } } /* convert gfs lock-state to dlm lock-mode */ static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) { switch (lmstate) { case LM_ST_UNLOCKED: return DLM_LOCK_NL; case LM_ST_EXCLUSIVE: return DLM_LOCK_EX; case LM_ST_DEFERRED: return DLM_LOCK_CW; case LM_ST_SHARED: return DLM_LOCK_PR; } fs_err(sdp, "unknown LM state %d\n", lmstate); BUG(); return -1; } /* Taken from fs/dlm/lock.c. */ static bool middle_conversion(int cur, int req) { return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) || (cur == DLM_LOCK_CW && req == DLM_LOCK_PR); } static bool down_conversion(int cur, int req) { return !middle_conversion(cur, req) && req < cur; } static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, const int cur, const int req) { u32 lkf = 0; if (gl->gl_lksb.sb_lvbptr) lkf |= DLM_LKF_VALBLK; if (gfs_flags & LM_FLAG_TRY) lkf |= DLM_LKF_NOQUEUE; if (gfs_flags & LM_FLAG_TRY_1CB) { lkf |= DLM_LKF_NOQUEUE; lkf |= DLM_LKF_NOQUEUEBAST; } if (gfs_flags & LM_FLAG_ANY) { if (req == DLM_LOCK_PR) lkf |= DLM_LKF_ALTCW; else if (req == DLM_LOCK_CW) lkf |= DLM_LKF_ALTPR; else BUG(); } if (!test_bit(GLF_INITIAL, &gl->gl_flags)) { lkf |= DLM_LKF_CONVERT; /* * The DLM_LKF_QUECVT flag needs to be set for "first come, * first served" semantics, but it must only be set for * "upward" lock conversions or else DLM will reject the * request as invalid. */ if (!down_conversion(cur, req)) lkf |= DLM_LKF_QUECVT; } return lkf; } static void gfs2_reverse_hex(char *c, u64 value) { *c = '0'; while (value) { *c-- = hex_asc[value & 0x0f]; value >>= 4; } } static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; int cur, req; u32 lkf; char strname[GDLM_STRNAME_BYTES] = ""; int error; cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); req = make_mode(gl->gl_name.ln_sbd, req_state); lkf = make_flags(gl, flags, cur, req); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); if (test_bit(GLF_INITIAL, &gl->gl_flags)) { memset(strname, ' ', GDLM_STRNAME_BYTES - 1); strname[GDLM_STRNAME_BYTES - 1] = '\0'; gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); gl->gl_dstamp = ktime_get_real(); } else { gfs2_update_request_times(gl); } /* * Submit the actual lock request. */ again: error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); if (error == -EBUSY) { msleep(20); goto again; } return error; } static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; BUG_ON(!__lockref_is_dead(&gl->gl_lockref)); if (test_bit(GLF_INITIAL, &gl->gl_flags)) { gfs2_glock_free(gl); return; } clear_bit(GLF_BLOCKING, &gl->gl_flags); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); /* don't want to call dlm if we've unmounted the lock protocol */ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { gfs2_glock_free(gl); return; } /* * When the lockspace is released, all remaining glocks will be * unlocked automatically. This is more efficient than unlocking them * individually, but when the lock is held in DLM_LOCK_EX or * DLM_LOCK_PW mode, the lock value block (LVB) will be lost. */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) { gfs2_glock_free_later(gl); return; } again: error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error == -EBUSY) { msleep(20); goto again; } if (error) { fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); } } static void gdlm_cancel(struct gfs2_glock *gl) { struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); } /* * dlm/gfs2 recovery coordination using dlm_recover callbacks * * 0. gfs2 checks for another cluster node withdraw, needing journal replay * 1. dlm_controld sees lockspace members change * 2. dlm_controld blocks dlm-kernel locking activity * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep) * 4. dlm_controld starts and finishes its own user level recovery * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery * 6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot) * 7. dlm_recoverd does its own lock recovery * 8. dlm_recoverd unblocks dlm-kernel locking activity * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation) * 10. gfs2_control updates control_lock lvb with new generation and jid bits * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none) * 12. gfs2_recover dequeues and recovers journals of failed nodes * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result) * 14. gfs2_control updates control_lock lvb jid bits for recovered journals * 15. gfs2_control unblocks normal locking when all journals are recovered * * - failures during recovery * * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control * clears BLOCK_LOCKS (step 15), e.g. another node fails while still * recovering for a prior failure. gfs2_control needs a way to detect * this so it can leave BLOCK_LOCKS set in step 15. This is managed using * the recover_block and recover_start values. * * recover_done() provides a new lockspace generation number each time it * is called (step 9). This generation number is saved as recover_start. * When recover_prep() is called, it sets BLOCK_LOCKS and sets * recover_block = recover_start. So, while recover_block is equal to * recover_start, BLOCK_LOCKS should remain set. (recover_spin must * be held around the BLOCK_LOCKS/recover_block/recover_start logic.) * * - more specific gfs2 steps in sequence above * * 3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start * 6. recover_slot records any failed jids (maybe none) * 9. recover_done sets recover_start = new generation number * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids * 12. gfs2_recover does journal recoveries for failed jids identified above * 14. gfs2_control clears control_lock lvb bits for recovered jids * 15. gfs2_control checks if recover_block == recover_start (step 3 occured * again) then do nothing, otherwise if recover_start > recover_block * then clear BLOCK_LOCKS. * * - parallel recovery steps across all nodes * * All nodes attempt to update the control_lock lvb with the new generation * number and jid bits, but only the first to get the control_lock EX will * do so; others will see that it's already done (lvb already contains new * generation number.) * * . All nodes get the same recover_prep/recover_slot/recover_done callbacks * . All nodes attempt to set control_lock lvb gen + bits for the new gen * . One node gets control_lock first and writes the lvb, others see it's done * . All nodes attempt to recover jids for which they see control_lock bits set * . One node succeeds for a jid, and that one clears the jid bit in the lvb * . All nodes will eventually see all lvb bits clear and unblock locks * * - is there a problem with clearing an lvb bit that should be set * and missing a journal recovery? * * 1. jid fails * 2. lvb bit set for step 1 * 3. jid recovered for step 1 * 4. jid taken again (new mount) * 5. jid fails (for step 4) * 6. lvb bit set for step 5 (will already be set) * 7. lvb bit cleared for step 3 * * This is not a problem because the failure in step 5 does not * require recovery, because the mount in step 4 could not have * progressed far enough to unblock locks and access the fs. The * control_mount() function waits for all recoveries to be complete * for the latest lockspace generation before ever unblocking locks * and returning. The mount in step 4 waits until the recovery in * step 1 is done. * * - special case of first mounter: first node to mount the fs * * The first node to mount a gfs2 fs needs to check all the journals * and recover any that need recovery before other nodes are allowed * to mount the fs. (Others may begin mounting, but they must wait * for the first mounter to be done before taking locks on the fs * or accessing the fs.) This has two parts: * * 1. The mounted_lock tells a node it's the first to mount the fs. * Each node holds the mounted_lock in PR while it's mounted. * Each node tries to acquire the mounted_lock in EX when it mounts. * If a node is granted the mounted_lock EX it means there are no * other mounted nodes (no PR locks exist), and it is the first mounter. * The mounted_lock is demoted to PR when first recovery is done, so * others will fail to get an EX lock, but will get a PR lock. * * 2. The control_lock blocks others in control_mount() while the first * mounter is doing first mount recovery of all journals. * A mounting node needs to acquire control_lock in EX mode before * it can proceed. The first mounter holds control_lock in EX while doing * the first mount recovery, blocking mounts from other nodes, then demotes * control_lock to NL when it's done (others_may_mount/first_done), * allowing other nodes to continue mounting. * * first mounter: * control_lock EX/NOQUEUE success * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters) * set first=1 * do first mounter recovery * mounted_lock EX->PR * control_lock EX->NL, write lvb generation * * other mounter: * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry) * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR) * mounted_lock PR/NOQUEUE success * read lvb generation * control_lock EX->NL * set first=0 * * - mount during recovery * * If a node mounts while others are doing recovery (not first mounter), * the mounting node will get its initial recover_done() callback without * having seen any previous failures/callbacks. * * It must wait for all recoveries preceding its mount to be finished * before it unblocks locks. It does this by repeating the "other mounter" * steps above until the lvb generation number is >= its mount generation * number (from initial recover_done) and all lvb bits are clear. * * - control_lock lvb format * * 4 bytes generation number: the latest dlm lockspace generation number * from recover_done callback. Indicates the jid bitmap has been updated * to reflect all slot failures through that generation. * 4 bytes unused. * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates * that jid N needs recovery. */ #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */ static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, char *lvb_bits) { __le32 gen; memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); memcpy(&gen, lvb_bits, sizeof(__le32)); *lvb_gen = le32_to_cpu(gen); } static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, char *lvb_bits) { __le32 gen; memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); gen = cpu_to_le32(lvb_gen); memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); } static int all_jid_bits_clear(char *lvb) { return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, GDLM_LVB_SIZE - JID_BITMAP_OFFSET); } static void sync_wait_cb(void *arg) { struct lm_lockstruct *ls = arg; complete(&ls->ls_sync_wait); } static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); if (error) { fs_err(sdp, "%s lkid %x error %d\n", name, lksb->sb_lkid, error); return error; } wait_for_completion(&ls->ls_sync_wait); if (lksb->sb_status != -DLM_EUNLOCK) { fs_err(sdp, "%s lkid %x status %d\n", name, lksb->sb_lkid, lksb->sb_status); return -1; } return 0; } static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, unsigned int num, struct dlm_lksb *lksb, char *name) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; char strname[GDLM_STRNAME_BYTES]; int error, status; memset(strname, 0, GDLM_STRNAME_BYTES); snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num); error = dlm_lock(ls->ls_dlm, mode, lksb, flags, strname, GDLM_STRNAME_BYTES - 1, 0, sync_wait_cb, ls, NULL); if (error) { fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", name, lksb->sb_lkid, flags, mode, error); return error; } wait_for_completion(&ls->ls_sync_wait); status = lksb->sb_status; if (status && status != -EAGAIN) { fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n", name, lksb->sb_lkid, flags, mode, status); } return status; } static int mounted_unlock(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); } static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK, &ls->ls_mounted_lksb, "mounted_lock"); } static int control_unlock(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); } static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK, &ls->ls_control_lksb, "control_lock"); } /** * remote_withdraw - react to a node withdrawing from the file system * @sdp: The superblock */ static void remote_withdraw(struct gfs2_sbd *sdp) { struct gfs2_jdesc *jd; int ret = 0, count = 0; list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) continue; ret = gfs2_recover_journal(jd, true); if (ret) break; count++; } /* Now drop the additional reference we acquired */ fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret); } static void gfs2_control_func(struct work_struct *work) { struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); struct lm_lockstruct *ls = &sdp->sd_lockstruct; uint32_t block_gen, start_gen, lvb_gen, flags; int recover_set = 0; int write_lvb = 0; int recover_size; int i, error; /* First check for other nodes that may have done a withdraw. */ if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) { remote_withdraw(sdp); clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); return; } spin_lock(&ls->ls_recover_spin); /* * No MOUNT_DONE means we're still mounting; control_mount() * will set this flag, after which this thread will take over * all further clearing of BLOCK_LOCKS. * * FIRST_MOUNT means this node is doing first mounter recovery, * for which recovery control is handled by * control_mount()/control_first_done(), not this thread. */ if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { spin_unlock(&ls->ls_recover_spin); return; } block_gen = ls->ls_recover_block; start_gen = ls->ls_recover_start; spin_unlock(&ls->ls_recover_spin); /* * Equal block_gen and start_gen implies we are between * recover_prep and recover_done callbacks, which means * dlm recovery is in progress and dlm locking is blocked. * There's no point trying to do any work until recover_done. */ if (block_gen == start_gen) return; /* * Propagate recover_submit[] and recover_result[] to lvb: * dlm_recoverd adds to recover_submit[] jids needing recovery * gfs2_recover adds to recover_result[] journal recovery results * * set lvb bit for jids in recover_submit[] if the lvb has not * yet been updated for the generation of the failure * * clear lvb bit for jids in recover_result[] if the result of * the journal recovery is SUCCESS */ error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK); if (error) { fs_err(sdp, "control lock EX error %d\n", error); return; } control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); spin_lock(&ls->ls_recover_spin); if (block_gen != ls->ls_recover_block || start_gen != ls->ls_recover_start) { fs_info(sdp, "recover generation %u block1 %u %u\n", start_gen, block_gen, ls->ls_recover_block); spin_unlock(&ls->ls_recover_spin); control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); return; } recover_size = ls->ls_recover_size; if (lvb_gen <= start_gen) { /* * Clear lvb bits for jids we've successfully recovered. * Because all nodes attempt to recover failed journals, * a journal can be recovered multiple times successfully * in succession. Only the first will really do recovery, * the others find it clean, but still report a successful * recovery. So, another node may have already recovered * the jid and cleared the lvb bit for it. */ for (i = 0; i < recover_size; i++) { if (ls->ls_recover_result[i] != LM_RD_SUCCESS) continue; ls->ls_recover_result[i] = 0; if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) continue; __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); write_lvb = 1; } } if (lvb_gen == start_gen) { /* * Failed slots before start_gen are already set in lvb. */ for (i = 0; i < recover_size; i++) { if (!ls->ls_recover_submit[i]) continue; if (ls->ls_recover_submit[i] < lvb_gen) ls->ls_recover_submit[i] = 0; } } else if (lvb_gen < start_gen) { /* * Failed slots before start_gen are not yet set in lvb. */ for (i = 0; i < recover_size; i++) { if (!ls->ls_recover_submit[i]) continue; if (ls->ls_recover_submit[i] < start_gen) { ls->ls_recover_submit[i] = 0; __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); } } /* even if there are no bits to set, we need to write the latest generation to the lvb */ write_lvb = 1; } else { /* * we should be getting a recover_done() for lvb_gen soon */ } spin_unlock(&ls->ls_recover_spin); if (write_lvb) { control_lvb_write(ls, start_gen, ls->ls_lvb_bits); flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; } else { flags = DLM_LKF_CONVERT; } error = control_lock(sdp, DLM_LOCK_NL, flags); if (error) { fs_err(sdp, "control lock NL error %d\n", error); return; } /* * Everyone will see jid bits set in the lvb, run gfs2_recover_set(), * and clear a jid bit in the lvb if the recovery is a success. * Eventually all journals will be recovered, all jid bits will * be cleared in the lvb, and everyone will clear BLOCK_LOCKS. */ for (i = 0; i < recover_size; i++) { if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { fs_info(sdp, "recover generation %u jid %d\n", start_gen, i); gfs2_recover_set(sdp, i); recover_set++; } } if (recover_set) return; /* * No more jid bits set in lvb, all recovery is done, unblock locks * (unless a new recover_prep callback has occured blocking locks * again while working above) */ spin_lock(&ls->ls_recover_spin); if (ls->ls_recover_block == block_gen && ls->ls_recover_start == start_gen) { clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); fs_info(sdp, "recover generation %u done\n", start_gen); gfs2_glock_thaw(sdp); } else { fs_info(sdp, "recover generation %u block2 %u %u\n", start_gen, block_gen, ls->ls_recover_block); spin_unlock(&ls->ls_recover_spin); } } static int control_mount(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; uint32_t start_gen, block_gen, mount_gen, lvb_gen; int mounted_mode; int retries = 0; int error; memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; init_completion(&ls->ls_sync_wait); set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK); if (error) { fs_err(sdp, "control_mount control_lock NL error %d\n", error); return error; } error = mounted_lock(sdp, DLM_LOCK_NL, 0); if (error) { fs_err(sdp, "control_mount mounted_lock NL error %d\n", error); control_unlock(sdp); return error; } mounted_mode = DLM_LOCK_NL; restart: if (retries++ && signal_pending(current)) { error = -EINTR; goto fail; } /* * We always start with both locks in NL. control_lock is * demoted to NL below so we don't need to do it here. */ if (mounted_mode != DLM_LOCK_NL) { error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); if (error) goto fail; mounted_mode = DLM_LOCK_NL; } /* * Other nodes need to do some work in dlm recovery and gfs2_control * before the recover_done and control_lock will be ready for us below. * A delay here is not required but often avoids having to retry. */ msleep_interruptible(500); /* * Acquire control_lock in EX and mounted_lock in either EX or PR. * control_lock lvb keeps track of any pending journal recoveries. * mounted_lock indicates if any other nodes have the fs mounted. */ error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK); if (error == -EAGAIN) { goto restart; } else if (error) { fs_err(sdp, "control_mount control_lock EX error %d\n", error); goto fail; } /** * If we're a spectator, we don't want to take the lock in EX because * we cannot do the first-mount responsibility it implies: recovery. */ if (sdp->sd_args.ar_spectator) goto locks_done; error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE); if (!error) { mounted_mode = DLM_LOCK_EX; goto locks_done; } else if (error != -EAGAIN) { fs_err(sdp, "control_mount mounted_lock EX error %d\n", error); goto fail; } error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE); if (!error) { mounted_mode = DLM_LOCK_PR; goto locks_done; } else { /* not even -EAGAIN should happen here */ fs_err(sdp, "control_mount mounted_lock PR error %d\n", error); goto fail; } locks_done: /* * If we got both locks above in EX, then we're the first mounter. * If not, then we need to wait for the control_lock lvb to be * updated by other mounted nodes to reflect our mount generation. * * In simple first mounter cases, first mounter will see zero lvb_gen, * but in cases where all existing nodes leave/fail before mounting * nodes finish control_mount, then all nodes will be mounting and * lvb_gen will be non-zero. */ control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); if (lvb_gen == 0xFFFFFFFF) { /* special value to force mount attempts to fail */ fs_err(sdp, "control_mount control_lock disabled\n"); error = -EINVAL; goto fail; } if (mounted_mode == DLM_LOCK_EX) { /* first mounter, keep both EX while doing first recovery */ spin_lock(&ls->ls_recover_spin); clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); fs_info(sdp, "first mounter control generation %u\n", lvb_gen); return 0; } error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); if (error) goto fail; /* * We are not first mounter, now we need to wait for the control_lock * lvb generation to be >= the generation from our first recover_done * and all lvb bits to be clear (no pending journal recoveries.) */ if (!all_jid_bits_clear(ls->ls_lvb_bits)) { /* journals need recovery, wait until all are clear */ fs_info(sdp, "control_mount wait for journal recovery\n"); goto restart; } spin_lock(&ls->ls_recover_spin); block_gen = ls->ls_recover_block; start_gen = ls->ls_recover_start; mount_gen = ls->ls_recover_mount; if (lvb_gen < mount_gen) { /* wait for mounted nodes to update control_lock lvb to our generation, which might include new recovery bits set */ if (sdp->sd_args.ar_spectator) { fs_info(sdp, "Recovery is required. Waiting for a " "non-spectator to mount.\n"); msleep_interruptible(1000); } else { fs_info(sdp, "control_mount wait1 block %u start %u " "mount %u lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); } spin_unlock(&ls->ls_recover_spin); goto restart; } if (lvb_gen != start_gen) { /* wait for mounted nodes to update control_lock lvb to the latest recovery generation */ fs_info(sdp, "control_mount wait2 block %u start %u mount %u " "lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); goto restart; } if (block_gen == start_gen) { /* dlm recovery in progress, wait for it to finish */ fs_info(sdp, "control_mount wait3 block %u start %u mount %u " "lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); goto restart; } clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); spin_unlock(&ls->ls_recover_spin); return 0; fail: mounted_unlock(sdp); control_unlock(sdp); return error; } static int control_first_done(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; uint32_t start_gen, block_gen; int error; restart: spin_lock(&ls->ls_recover_spin); start_gen = ls->ls_recover_start; block_gen = ls->ls_recover_block; if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { /* sanity check, should not happen */ fs_err(sdp, "control_first_done start %u block %u flags %lx\n", start_gen, block_gen, ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); control_unlock(sdp); return -1; } if (start_gen == block_gen) { /* * Wait for the end of a dlm recovery cycle to switch from * first mounter recovery. We can ignore any recover_slot * callbacks between the recover_prep and next recover_done * because we are still the first mounter and any failed nodes * have not fully mounted, so they don't need recovery. */ spin_unlock(&ls->ls_recover_spin); fs_info(sdp, "control_first_done wait gen %u\n", start_gen); wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, TASK_UNINTERRUPTIBLE); goto restart; } clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); spin_unlock(&ls->ls_recover_spin); memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); control_lvb_write(ls, start_gen, ls->ls_lvb_bits); error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); if (error) fs_err(sdp, "control_first_done mounted PR error %d\n", error); error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK); if (error) fs_err(sdp, "control_first_done control NL error %d\n", error); return error; } /* * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC) * to accommodate the largest slot number. (NB dlm slot numbers start at 1, * gfs2 jids start at 0, so jid = slot - 1) */ #define RECOVER_SIZE_INC 16 static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, int num_slots) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; uint32_t *submit = NULL; uint32_t *result = NULL; uint32_t old_size, new_size; int i, max_jid; if (!ls->ls_lvb_bits) { ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); if (!ls->ls_lvb_bits) return -ENOMEM; } max_jid = 0; for (i = 0; i < num_slots; i++) { if (max_jid < slots[i].slot - 1) max_jid = slots[i].slot - 1; } old_size = ls->ls_recover_size; new_size = old_size; while (new_size < max_jid + 1) new_size += RECOVER_SIZE_INC; if (new_size == old_size) return 0; submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); if (!submit || !result) { kfree(submit); kfree(result); return -ENOMEM; } spin_lock(&ls->ls_recover_spin); memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); kfree(ls->ls_recover_submit); kfree(ls->ls_recover_result); ls->ls_recover_submit = submit; ls->ls_recover_result = result; ls->ls_recover_size = new_size; spin_unlock(&ls->ls_recover_spin); return 0; } static void free_recover_size(struct lm_lockstruct *ls) { kfree(ls->ls_lvb_bits); kfree(ls->ls_recover_submit); kfree(ls->ls_recover_result); ls->ls_recover_submit = NULL; ls->ls_recover_result = NULL; ls->ls_recover_size = 0; ls->ls_lvb_bits = NULL; } /* dlm calls before it does lock recovery */ static void gdlm_recover_prep(void *arg) { struct gfs2_sbd *sdp = arg; struct lm_lockstruct *ls = &sdp->sd_lockstruct; if (gfs2_withdrawing_or_withdrawn(sdp)) { fs_err(sdp, "recover_prep ignored due to withdraw.\n"); return; } spin_lock(&ls->ls_recover_spin); ls->ls_recover_block = ls->ls_recover_start; set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { spin_unlock(&ls->ls_recover_spin); return; } set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); } /* dlm calls after recover_prep has been completed on all lockspace members; identifies slot/jid of failed member */ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot) { struct gfs2_sbd *sdp = arg; struct lm_lockstruct *ls = &sdp->sd_lockstruct; int jid = slot->slot - 1; if (gfs2_withdrawing_or_withdrawn(sdp)) { fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n", jid); return; } spin_lock(&ls->ls_recover_spin); if (ls->ls_recover_size < jid + 1) { fs_err(sdp, "recover_slot jid %d gen %u short size %d\n", jid, ls->ls_recover_block, ls->ls_recover_size); spin_unlock(&ls->ls_recover_spin); return; } if (ls->ls_recover_submit[jid]) { fs_info(sdp, "recover_slot jid %d gen %u prev %u\n", jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); } ls->ls_recover_submit[jid] = ls->ls_recover_block; spin_unlock(&ls->ls_recover_spin); } /* dlm calls after recover_slot and after it completes lock recovery */ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots, int our_slot, uint32_t generation) { struct gfs2_sbd *sdp = arg; struct lm_lockstruct *ls = &sdp->sd_lockstruct; if (gfs2_withdrawing_or_withdrawn(sdp)) { fs_err(sdp, "recover_done ignored due to withdraw.\n"); return; } /* ensure the ls jid arrays are large enough */ set_recover_size(sdp, slots, num_slots); spin_lock(&ls->ls_recover_spin); ls->ls_recover_start = generation; if (!ls->ls_recover_mount) { ls->ls_recover_mount = generation; ls->ls_jid = our_slot - 1; } if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); smp_mb__after_atomic(); wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); spin_unlock(&ls->ls_recover_spin); } /* gfs2_recover thread has a journal recovery result */ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid, unsigned int result) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; if (gfs2_withdrawing_or_withdrawn(sdp)) { fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n", jid); return; } if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) return; /* don't care about the recovery of own journal during mount */ if (jid == ls->ls_jid) return; spin_lock(&ls->ls_recover_spin); if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { spin_unlock(&ls->ls_recover_spin); return; } if (ls->ls_recover_size < jid + 1) { fs_err(sdp, "recovery_result jid %d short size %d\n", jid, ls->ls_recover_size); spin_unlock(&ls->ls_recover_spin); return; } fs_info(sdp, "recover jid %d result %s\n", jid, result == LM_RD_GAVEUP ? "busy" : "success"); ls->ls_recover_result[jid] = result; /* GAVEUP means another node is recovering the journal; delay our next attempt to recover it, to give the other node a chance to finish before trying again */ if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, result == LM_RD_GAVEUP ? HZ : 0); spin_unlock(&ls->ls_recover_spin); } static const struct dlm_lockspace_ops gdlm_lockspace_ops = { .recover_prep = gdlm_recover_prep, .recover_slot = gdlm_recover_slot, .recover_done = gdlm_recover_done, }; static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; char cluster[GFS2_LOCKNAME_LEN]; const char *fsname; uint32_t flags; int error, ops_result; /* * initialize everything */ INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); spin_lock_init(&ls->ls_recover_spin); ls->ls_recover_flags = 0; ls->ls_recover_mount = 0; ls->ls_recover_start = 0; ls->ls_recover_block = 0; ls->ls_recover_size = 0; ls->ls_recover_submit = NULL; ls->ls_recover_result = NULL; ls->ls_lvb_bits = NULL; error = set_recover_size(sdp, NULL, 0); if (error) goto fail; /* * prepare dlm_new_lockspace args */ fsname = strchr(table, ':'); if (!fsname) { fs_info(sdp, "no fsname found\n"); error = -EINVAL; goto fail_free; } memset(cluster, 0, sizeof(cluster)); memcpy(cluster, table, strlen(table) - strlen(fsname)); fsname++; flags = DLM_LSFL_NEWEXCL; /* * create/join lockspace */ error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE, &gdlm_lockspace_ops, sdp, &ops_result, &ls->ls_dlm); if (error) { fs_err(sdp, "dlm_new_lockspace error %d\n", error); goto fail_free; } if (ops_result < 0) { /* * dlm does not support ops callbacks, * old dlm_controld/gfs_controld are used, try without ops. */ fs_info(sdp, "dlm lockspace ops not used\n"); free_recover_size(ls); set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); return 0; } if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { fs_err(sdp, "dlm lockspace ops disallow jid preset\n"); error = -EINVAL; goto fail_release; } /* * control_mount() uses control_lock to determine first mounter, * and for later mounts, waits for any recoveries to be cleared. */ error = control_mount(sdp); if (error) { fs_err(sdp, "mount control error %d\n", error); goto fail_release; } ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); smp_mb__after_atomic(); wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); return 0; fail_release: dlm_release_lockspace(ls->ls_dlm, 2); fail_free: free_recover_size(ls); fail: return error; } static void gdlm_first_done(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) return; error = control_first_done(sdp); if (error) fs_err(sdp, "mount first_done error %d\n", error); } static void gdlm_unmount(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) goto release; /* wait for gfs2_control_wq to be done with this mount */ spin_lock(&ls->ls_recover_spin); set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); flush_delayed_work(&sdp->sd_control_work); /* mounted_lock and control_lock will be purged in dlm recovery */ release: if (ls->ls_dlm) { dlm_release_lockspace(ls->ls_dlm, 2); ls->ls_dlm = NULL; } free_recover_size(ls); } static const match_table_t dlm_tokens = { { Opt_jid, "jid=%d"}, { Opt_id, "id=%d"}, { Opt_first, "first=%d"}, { Opt_nodir, "nodir=%d"}, { Opt_err, NULL }, }; const struct lm_lockops gfs2_dlm_ops = { .lm_proto_name = "lock_dlm", .lm_mount = gdlm_mount, .lm_first_done = gdlm_first_done, .lm_recovery_result = gdlm_recovery_result, .lm_unmount = gdlm_unmount, .lm_put_lock = gdlm_put_lock, .lm_lock = gdlm_lock, .lm_cancel = gdlm_cancel, .lm_tokens = &dlm_tokens, };
2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 // SPDX-License-Identifier: GPL-2.0 /* Sysctl interface for parport devices. * * Authors: David Campbell * Tim Waugh <tim@cyberelk.demon.co.uk> * Philip Blundell <philb@gnu.org> * Andrea Arcangeli * Riccardo Facchetti <fizban@tin.it> * * based on work by Grant Guenther <grant@torque.net> * and Philip Blundell * * Cleaned up include files - Russell King <linux@arm.uk.linux.org> */ #include <linux/string.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/device.h> #include <linux/uaccess.h> #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) #define PARPORT_MIN_TIMESLICE_VALUE 1ul #define PARPORT_MAX_TIMESLICE_VALUE ((unsigned long) HZ) #define PARPORT_MIN_SPINTIME_VALUE 1 #define PARPORT_MAX_SPINTIME_VALUE 1000 static int do_active_device(const struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; char buffer[256]; struct pardevice *dev; int len = 0; if (write) /* can't happen anyway */ return -EACCES; if (*ppos) { *lenp = 0; return 0; } for (dev = port->devices; dev ; dev = dev->next) { if(dev == port->cad) { len += scnprintf(buffer, sizeof(buffer), "%s\n", dev->name); } } if(!len) { len += scnprintf(buffer, sizeof(buffer), "%s\n", "none"); } if (len > *lenp) len = *lenp; else *lenp = len; *ppos += len; memcpy(result, buffer, len); return 0; } #ifdef CONFIG_PARPORT_1284 static int do_autoprobe(const struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport_device_info *info = table->extra2; const char *str; char buffer[256]; int len = 0; if (write) /* permissions stop this */ return -EACCES; if (*ppos) { *lenp = 0; return 0; } if ((str = info->class_name) != NULL) len += scnprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str); if ((str = info->model) != NULL) len += scnprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str); if ((str = info->mfr) != NULL) len += scnprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str); if ((str = info->description) != NULL) len += scnprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str); if ((str = info->cmdset) != NULL) len += scnprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str); if (len > *lenp) len = *lenp; else *lenp = len; *ppos += len; memcpy(result, buffer, len); return 0; } #endif /* IEEE1284.3 support. */ static int do_hardware_base_addr(const struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; char buffer[64]; int len = 0; if (*ppos) { *lenp = 0; return 0; } if (write) /* permissions prevent this anyway */ return -EACCES; len += scnprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi); if (len > *lenp) len = *lenp; else *lenp = len; *ppos += len; memcpy(result, buffer, len); return 0; } static int do_hardware_irq(const struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; char buffer[20]; int len = 0; if (*ppos) { *lenp = 0; return 0; } if (write) /* permissions prevent this anyway */ return -EACCES; len += scnprintf (buffer, sizeof(buffer), "%d\n", port->irq); if (len > *lenp) len = *lenp; else *lenp = len; *ppos += len; memcpy(result, buffer, len); return 0; } static int do_hardware_dma(const struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; char buffer[20]; int len = 0; if (*ppos) { *lenp = 0; return 0; } if (write) /* permissions prevent this anyway */ return -EACCES; len += scnprintf (buffer, sizeof(buffer), "%d\n", port->dma); if (len > *lenp) len = *lenp; else *lenp = len; *ppos += len; memcpy(result, buffer, len); return 0; } static int do_hardware_modes(const struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; char buffer[40]; int len = 0; if (*ppos) { *lenp = 0; return 0; } if (write) /* permissions prevent this anyway */ return -EACCES; { #define printmode(x) \ do { \ if (port->modes & PARPORT_MODE_##x) \ len += scnprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \ } while (0) int f = 0; printmode(PCSPP); printmode(TRISTATE); printmode(COMPAT); printmode(EPP); printmode(ECP); printmode(DMA); #undef printmode } buffer[len++] = '\n'; if (len > *lenp) len = *lenp; else *lenp = len; *ppos += len; memcpy(result, buffer, len); return 0; } static const unsigned long parport_min_timeslice_value = PARPORT_MIN_TIMESLICE_VALUE; static const unsigned long parport_max_timeslice_value = PARPORT_MAX_TIMESLICE_VALUE; static const int parport_min_spintime_value = PARPORT_MIN_SPINTIME_VALUE; static const int parport_max_spintime_value = PARPORT_MAX_SPINTIME_VALUE; struct parport_sysctl_table { struct ctl_table_header *port_header; struct ctl_table_header *devices_header; #ifdef CONFIG_PARPORT_1284 struct ctl_table vars[10]; #else struct ctl_table vars[5]; #endif /* IEEE 1284 support */ struct ctl_table device_dir[1]; }; static const struct parport_sysctl_table parport_sysctl_template = { .port_header = NULL, .devices_header = NULL, { { .procname = "spintime", .data = NULL, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (void*) &parport_min_spintime_value, .extra2 = (void*) &parport_max_spintime_value }, { .procname = "base-addr", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_hardware_base_addr }, { .procname = "irq", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_hardware_irq }, { .procname = "dma", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_hardware_dma }, { .procname = "modes", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_hardware_modes }, #ifdef CONFIG_PARPORT_1284 { .procname = "autoprobe", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_autoprobe }, { .procname = "autoprobe0", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_autoprobe }, { .procname = "autoprobe1", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_autoprobe }, { .procname = "autoprobe2", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_autoprobe }, { .procname = "autoprobe3", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_autoprobe }, #endif /* IEEE 1284 support */ }, { { .procname = "active", .data = NULL, .maxlen = 0, .mode = 0444, .proc_handler = do_active_device }, }, }; struct parport_device_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table vars[1]; struct ctl_table device_dir[1]; }; static const struct parport_device_sysctl_table parport_device_sysctl_template = { .sysctl_header = NULL, { { .procname = "timeslice", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_ms_jiffies_minmax, .extra1 = (void*) &parport_min_timeslice_value, .extra2 = (void*) &parport_max_timeslice_value }, }, { { .procname = NULL, .data = NULL, .maxlen = 0, .mode = 0555, }, } }; struct parport_default_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table vars[2]; }; static struct parport_default_sysctl_table parport_default_sysctl_table = { .sysctl_header = NULL, { { .procname = "timeslice", .data = &parport_default_timeslice, .maxlen = sizeof(parport_default_timeslice), .mode = 0644, .proc_handler = proc_doulongvec_ms_jiffies_minmax, .extra1 = (void*) &parport_min_timeslice_value, .extra2 = (void*) &parport_max_timeslice_value }, { .procname = "spintime", .data = &parport_default_spintime, .maxlen = sizeof(parport_default_spintime), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (void*) &parport_min_spintime_value, .extra2 = (void*) &parport_max_spintime_value }, } }; int parport_proc_register(struct parport *port) { struct parport_sysctl_table *t; char *tmp_dir_path; int i, err = 0; t = kmemdup(&parport_sysctl_template, sizeof(*t), GFP_KERNEL); if (t == NULL) return -ENOMEM; t->device_dir[0].extra1 = port; t->vars[0].data = &port->spintime; for (i = 0; i < 5; i++) { t->vars[i].extra1 = port; #ifdef CONFIG_PARPORT_1284 t->vars[5 + i].extra2 = &port->probe_info[i]; #endif /* IEEE 1284 support */ } tmp_dir_path = kasprintf(GFP_KERNEL, "dev/parport/%s/devices", port->name); if (!tmp_dir_path) { err = -ENOMEM; goto exit_free_t; } t->devices_header = register_sysctl(tmp_dir_path, t->device_dir); if (t->devices_header == NULL) { err = -ENOENT; goto exit_free_tmp_dir_path; } kfree(tmp_dir_path); tmp_dir_path = kasprintf(GFP_KERNEL, "dev/parport/%s", port->name); if (!tmp_dir_path) { err = -ENOMEM; goto unregister_devices_h; } t->port_header = register_sysctl(tmp_dir_path, t->vars); if (t->port_header == NULL) { err = -ENOENT; goto unregister_devices_h; } port->sysctl_table = t; kfree(tmp_dir_path); return 0; unregister_devices_h: unregister_sysctl_table(t->devices_header); exit_free_tmp_dir_path: kfree(tmp_dir_path); exit_free_t: kfree(t); return err; } int parport_proc_unregister(struct parport *port) { if (port->sysctl_table) { struct parport_sysctl_table *t = port->sysctl_table; port->sysctl_table = NULL; unregister_sysctl_table(t->devices_header); unregister_sysctl_table(t->port_header); kfree(t); } return 0; } int parport_device_proc_register(struct pardevice *device) { struct parport_device_sysctl_table *t; struct parport * port = device->port; char *tmp_dir_path; int err = 0; t = kmemdup(&parport_device_sysctl_template, sizeof(*t), GFP_KERNEL); if (t == NULL) return -ENOMEM; /* Allocate a buffer for two paths: dev/parport/PORT/devices/DEVICE. */ tmp_dir_path = kasprintf(GFP_KERNEL, "dev/parport/%s/devices/%s", port->name, device->name); if (!tmp_dir_path) { err = -ENOMEM; goto exit_free_t; } t->vars[0].data = &device->timeslice; t->sysctl_header = register_sysctl(tmp_dir_path, t->vars); if (t->sysctl_header == NULL) { kfree(t); t = NULL; } device->sysctl_table = t; kfree(tmp_dir_path); return 0; exit_free_t: kfree(t); return err; } int parport_device_proc_unregister(struct pardevice *device) { if (device->sysctl_table) { struct parport_device_sysctl_table *t = device->sysctl_table; device->sysctl_table = NULL; unregister_sysctl_table(t->sysctl_header); kfree(t); } return 0; } static int __init parport_default_proc_register(void) { int ret; parport_default_sysctl_table.sysctl_header = register_sysctl("dev/parport/default", parport_default_sysctl_table.vars); if (!parport_default_sysctl_table.sysctl_header) return -ENOMEM; ret = parport_bus_init(); if (ret) { unregister_sysctl_table(parport_default_sysctl_table. sysctl_header); return ret; } return 0; } static void __exit parport_default_proc_unregister(void) { if (parport_default_sysctl_table.sysctl_header) { unregister_sysctl_table(parport_default_sysctl_table. sysctl_header); parport_default_sysctl_table.sysctl_header = NULL; } parport_bus_exit(); } #else /* no sysctl or no procfs*/ int parport_proc_register(struct parport *pp) { return 0; } int parport_proc_unregister(struct parport *pp) { return 0; } int parport_device_proc_register(struct pardevice *device) { return 0; } int parport_device_proc_unregister(struct pardevice *device) { return 0; } static int __init parport_default_proc_register (void) { return parport_bus_init(); } static void __exit parport_default_proc_unregister (void) { parport_bus_exit(); } #endif subsys_initcall(parport_default_proc_register) module_exit(parport_default_proc_unregister)
10 1 1 2 6 2 4 4 4 2 2 2 2 3 2 39 39 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/act_connmark.c netfilter connmark retriever action * skb mark is over-written * * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/pkt_cls.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/act_api.h> #include <net/pkt_cls.h> #include <uapi/linux/tc_act/tc_connmark.h> #include <net/tc_act/tc_connmark.h> #include <net/tc_wrapper.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_zones.h> static struct tc_action_ops act_connmark_ops; TC_INDIRECT_SCOPE int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { const struct nf_conntrack_tuple_hash *thash; struct nf_conntrack_tuple tuple; enum ip_conntrack_info ctinfo; struct tcf_connmark_info *ca = to_connmark(a); struct tcf_connmark_parms *parms; struct nf_conntrack_zone zone; struct nf_conn *c; int proto; tcf_lastuse_update(&ca->tcf_tm); tcf_action_update_bstats(&ca->common, skb); parms = rcu_dereference_bh(ca->parms); switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): if (skb->len < sizeof(struct iphdr)) goto out; proto = NFPROTO_IPV4; break; case htons(ETH_P_IPV6): if (skb->len < sizeof(struct ipv6hdr)) goto out; proto = NFPROTO_IPV6; break; default: goto out; } c = nf_ct_get(skb, &ctinfo); if (c) { skb->mark = READ_ONCE(c->mark); goto count; } if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, parms->net, &tuple)) goto out; zone.id = parms->zone; zone.dir = NF_CT_DEFAULT_ZONE_DIR; thash = nf_conntrack_find_get(parms->net, &zone, &tuple); if (!thash) goto out; c = nf_ct_tuplehash_to_ctrack(thash); skb->mark = READ_ONCE(c->mark); nf_ct_put(c); count: /* using overlimits stats to count how many packets marked */ tcf_action_inc_overlimit_qstats(&ca->common); out: return READ_ONCE(ca->tcf_action); } static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = { [TCA_CONNMARK_PARMS] = { .len = sizeof(struct tc_connmark) }, }; static int tcf_connmark_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id); struct tcf_connmark_parms *nparms, *oparms; struct nlattr *tb[TCA_CONNMARK_MAX + 1]; bool bind = flags & TCA_ACT_FLAGS_BIND; struct tcf_chain *goto_ch = NULL; struct tcf_connmark_info *ci; struct tc_connmark *parm; int ret = 0, err; u32 index; if (!nla) return -EINVAL; ret = nla_parse_nested_deprecated(tb, TCA_CONNMARK_MAX, nla, connmark_policy, NULL); if (ret < 0) return ret; if (!tb[TCA_CONNMARK_PARMS]) return -EINVAL; nparms = kzalloc(sizeof(*nparms), GFP_KERNEL); if (!nparms) return -ENOMEM; parm = nla_data(tb[TCA_CONNMARK_PARMS]); index = parm->index; ret = tcf_idr_check_alloc(tn, &index, a, bind); if (!ret) { ret = tcf_idr_create_from_flags(tn, index, est, a, &act_connmark_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); err = ret; goto out_free; } ci = to_connmark(*a); nparms->net = net; nparms->zone = parm->zone; ret = ACT_P_CREATED; } else if (ret > 0) { ci = to_connmark(*a); if (bind) { err = ACT_P_BOUND; goto out_free; } if (!(flags & TCA_ACT_FLAGS_REPLACE)) { err = -EEXIST; goto release_idr; } nparms->net = rtnl_dereference(ci->parms)->net; nparms->zone = parm->zone; ret = 0; } else { err = ret; goto out_free; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; spin_lock_bh(&ci->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); oparms = rcu_replace_pointer(ci->parms, nparms, lockdep_is_held(&ci->tcf_lock)); spin_unlock_bh(&ci->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); if (oparms) kfree_rcu(oparms, rcu); return ret; release_idr: tcf_idr_release(*a, bind); out_free: kfree(nparms); return err; } static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_connmark_info *ci = to_connmark(a); struct tc_connmark opt = { .index = ci->tcf_index, .refcnt = refcount_read(&ci->tcf_refcnt) - ref, .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, }; struct tcf_connmark_parms *parms; struct tcf_t t; spin_lock_bh(&ci->tcf_lock); parms = rcu_dereference_protected(ci->parms, lockdep_is_held(&ci->tcf_lock)); opt.action = ci->tcf_action; opt.zone = parms->zone; if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &ci->tcf_tm); if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t, TCA_CONNMARK_PAD)) goto nla_put_failure; spin_unlock_bh(&ci->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&ci->tcf_lock); nlmsg_trim(skb, b); return -1; } static void tcf_connmark_cleanup(struct tc_action *a) { struct tcf_connmark_info *ci = to_connmark(a); struct tcf_connmark_parms *parms; parms = rcu_dereference_protected(ci->parms, 1); if (parms) kfree_rcu(parms, rcu); } static struct tc_action_ops act_connmark_ops = { .kind = "connmark", .id = TCA_ID_CONNMARK, .owner = THIS_MODULE, .act = tcf_connmark_act, .dump = tcf_connmark_dump, .init = tcf_connmark_init, .cleanup = tcf_connmark_cleanup, .size = sizeof(struct tcf_connmark_info), }; MODULE_ALIAS_NET_ACT("connmark"); static __net_init int connmark_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id); return tc_action_net_init(net, tn, &act_connmark_ops); } static void __net_exit connmark_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_connmark_ops.net_id); } static struct pernet_operations connmark_net_ops = { .init = connmark_init_net, .exit_batch = connmark_exit_net, .id = &act_connmark_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init connmark_init_module(void) { return tcf_register_action(&act_connmark_ops, &connmark_net_ops); } static void __exit connmark_cleanup_module(void) { tcf_unregister_action(&act_connmark_ops, &connmark_net_ops); } module_init(connmark_init_module); module_exit(connmark_cleanup_module); MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>"); MODULE_DESCRIPTION("Connection tracking mark restoring"); MODULE_LICENSE("GPL");
54 54 5 54 53 29 5 29 5 29 29 20 20 24 29 29 13 21 21 5 29 5 13 5 29 29 29 28 29 28 39 1 39 29 12 12 29 29 17 29 29 29 29 17 29 17 29 29 29 29 29 29 29 24 24 13 1 12 20 12 12 12 12 12 12 54 52 2 54 54 54 104 99 6 22 23 19 5 2 95 4 95 67 6 11 18 5 3 4 36 16 11 15 7 3 108 2 6 28 2 137 136 126 11 3 2 1 5 76 1 13 4 4 12 2 31 13 27 1 77 7 1 110 1 1 102 1 9 82 14 93 90 79 15 35 57 1 54 3 20 52 2 54 54 4 5 54 89 88 49 42 30 30 30 13 30 30 30 30 30 17 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 /* * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/gfp.h> #include <net/sock.h> #include <linux/in.h> #include <linux/list.h> #include <linux/ratelimit.h> #include <linux/export.h> #include <linux/sizes.h> #include "rds.h" /* When transmitting messages in rds_send_xmit, we need to emerge from * time to time and briefly release the CPU. Otherwise the softlock watchdog * will kick our shin. * Also, it seems fairer to not let one busy connection stall all the * others. * * send_batch_count is the number of times we'll loop in send_xmit. Setting * it to 0 will restore the old behavior (where we looped until we had * drained the queue). */ static int send_batch_count = SZ_1K; module_param(send_batch_count, int, 0444); MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); static void rds_send_remove_from_sock(struct list_head *messages, int status); /* * Reset the send state. Callers must ensure that this doesn't race with * rds_send_xmit(). */ void rds_send_path_reset(struct rds_conn_path *cp) { struct rds_message *rm, *tmp; unsigned long flags; if (cp->cp_xmit_rm) { rm = cp->cp_xmit_rm; cp->cp_xmit_rm = NULL; /* Tell the user the RDMA op is no longer mapped by the * transport. This isn't entirely true (it's flushed out * independently) but as the connection is down, there's * no ongoing RDMA to/from that memory */ rds_message_unmapped(rm); rds_message_put(rm); } cp->cp_xmit_sg = 0; cp->cp_xmit_hdr_off = 0; cp->cp_xmit_data_off = 0; cp->cp_xmit_atomic_sent = 0; cp->cp_xmit_rdma_sent = 0; cp->cp_xmit_data_sent = 0; cp->cp_conn->c_map_queued = 0; cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; /* Mark messages as retransmissions, and move them to the send q */ spin_lock_irqsave(&cp->cp_lock, flags); list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); } list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); spin_unlock_irqrestore(&cp->cp_lock, flags); } EXPORT_SYMBOL_GPL(rds_send_path_reset); static int acquire_in_xmit(struct rds_conn_path *cp) { return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0; } static void release_in_xmit(struct rds_conn_path *cp) { clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags); /* * We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk * the system-wide hashed waitqueue buckets in the fast path only to * almost never find waiters. */ if (waitqueue_active(&cp->cp_waitq)) wake_up_all(&cp->cp_waitq); } /* * We're making the conscious trade-off here to only send one message * down the connection at a time. * Pro: * - tx queueing is a simple fifo list * - reassembly is optional and easily done by transports per conn * - no per flow rx lookup at all, straight to the socket * - less per-frag memory and wire overhead * Con: * - queued acks can be delayed behind large messages * Depends: * - small message latency is higher behind queued large messages * - large message latency isn't starved by intervening small sends */ int rds_send_xmit(struct rds_conn_path *cp) { struct rds_connection *conn = cp->cp_conn; struct rds_message *rm; unsigned long flags; unsigned int tmp; struct scatterlist *sg; int ret = 0; LIST_HEAD(to_be_dropped); int batch_count; unsigned long send_gen = 0; int same_rm = 0; restart: batch_count = 0; /* * sendmsg calls here after having queued its message on the send * queue. We only have one task feeding the connection at a time. If * another thread is already feeding the queue then we back off. This * avoids blocking the caller and trading per-connection data between * caches per message. */ if (!acquire_in_xmit(cp)) { rds_stats_inc(s_send_lock_contention); ret = -ENOMEM; goto out; } if (rds_destroy_pending(cp->cp_conn)) { release_in_xmit(cp); ret = -ENETUNREACH; /* dont requeue send work */ goto out; } /* * we record the send generation after doing the xmit acquire. * if someone else manages to jump in and do some work, we'll use * this to avoid a goto restart farther down. * * The acquire_in_xmit() check above ensures that only one * caller can increment c_send_gen at any time. */ send_gen = READ_ONCE(cp->cp_send_gen) + 1; WRITE_ONCE(cp->cp_send_gen, send_gen); /* * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, * we do the opposite to avoid races. */ if (!rds_conn_path_up(cp)) { release_in_xmit(cp); ret = 0; goto out; } if (conn->c_trans->xmit_path_prepare) conn->c_trans->xmit_path_prepare(cp); /* * spin trying to push headers and data down the connection until * the connection doesn't make forward progress. */ while (1) { rm = cp->cp_xmit_rm; if (!rm) { same_rm = 0; } else { same_rm++; if (same_rm >= 4096) { rds_stats_inc(s_send_stuck_rm); ret = -EAGAIN; break; } } /* * If between sending messages, we can send a pending congestion * map update. */ if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { rm = rds_cong_update_alloc(conn); if (IS_ERR(rm)) { ret = PTR_ERR(rm); break; } rm->data.op_active = 1; rm->m_inc.i_conn_path = cp; rm->m_inc.i_conn = cp->cp_conn; cp->cp_xmit_rm = rm; } /* * If not already working on one, grab the next message. * * cp_xmit_rm holds a ref while we're sending this message down * the connction. We can use this ref while holding the * send_sem.. rds_send_reset() is serialized with it. */ if (!rm) { unsigned int len; batch_count++; /* we want to process as big a batch as we can, but * we also want to avoid softlockups. If we've been * through a lot of messages, lets back off and see * if anyone else jumps in */ if (batch_count >= send_batch_count) goto over_batch; spin_lock_irqsave(&cp->cp_lock, flags); if (!list_empty(&cp->cp_send_queue)) { rm = list_entry(cp->cp_send_queue.next, struct rds_message, m_conn_item); rds_message_addref(rm); /* * Move the message from the send queue to the retransmit * list right away. */ list_move_tail(&rm->m_conn_item, &cp->cp_retrans); } spin_unlock_irqrestore(&cp->cp_lock, flags); if (!rm) break; /* Unfortunately, the way Infiniband deals with * RDMA to a bad MR key is by moving the entire * queue pair to error state. We could possibly * recover from that, but right now we drop the * connection. * Therefore, we never retransmit messages with RDMA ops. */ if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) || (rm->rdma.op_active && test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) { spin_lock_irqsave(&cp->cp_lock, flags); if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) list_move(&rm->m_conn_item, &to_be_dropped); spin_unlock_irqrestore(&cp->cp_lock, flags); continue; } /* Require an ACK every once in a while */ len = ntohl(rm->m_inc.i_hdr.h_len); if (cp->cp_unacked_packets == 0 || cp->cp_unacked_bytes < len) { set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; rds_stats_inc(s_send_ack_required); } else { cp->cp_unacked_bytes -= len; cp->cp_unacked_packets--; } cp->cp_xmit_rm = rm; } /* The transport either sends the whole rdma or none of it */ if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { rm->m_final_op = &rm->rdma; /* The transport owns the mapped memory for now. * You can't unmap it while it's on the send queue */ set_bit(RDS_MSG_MAPPED, &rm->m_flags); ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); if (ret) { clear_bit(RDS_MSG_MAPPED, &rm->m_flags); wake_up_interruptible(&rm->m_flush_wait); break; } cp->cp_xmit_rdma_sent = 1; } if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { rm->m_final_op = &rm->atomic; /* The transport owns the mapped memory for now. * You can't unmap it while it's on the send queue */ set_bit(RDS_MSG_MAPPED, &rm->m_flags); ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); if (ret) { clear_bit(RDS_MSG_MAPPED, &rm->m_flags); wake_up_interruptible(&rm->m_flush_wait); break; } cp->cp_xmit_atomic_sent = 1; } /* * A number of cases require an RDS header to be sent * even if there is no data. * We permit 0-byte sends; rds-ping depends on this. * However, if there are exclusively attached silent ops, * we skip the hdr/data send, to enable silent operation. */ if (rm->data.op_nents == 0) { int ops_present; int all_ops_are_silent = 1; ops_present = (rm->atomic.op_active || rm->rdma.op_active); if (rm->atomic.op_active && !rm->atomic.op_silent) all_ops_are_silent = 0; if (rm->rdma.op_active && !rm->rdma.op_silent) all_ops_are_silent = 0; if (ops_present && all_ops_are_silent && !rm->m_rdma_cookie) rm->data.op_active = 0; } if (rm->data.op_active && !cp->cp_xmit_data_sent) { rm->m_final_op = &rm->data; ret = conn->c_trans->xmit(conn, rm, cp->cp_xmit_hdr_off, cp->cp_xmit_sg, cp->cp_xmit_data_off); if (ret <= 0) break; if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { tmp = min_t(int, ret, sizeof(struct rds_header) - cp->cp_xmit_hdr_off); cp->cp_xmit_hdr_off += tmp; ret -= tmp; } sg = &rm->data.op_sg[cp->cp_xmit_sg]; while (ret) { tmp = min_t(int, ret, sg->length - cp->cp_xmit_data_off); cp->cp_xmit_data_off += tmp; ret -= tmp; if (cp->cp_xmit_data_off == sg->length) { cp->cp_xmit_data_off = 0; sg++; cp->cp_xmit_sg++; BUG_ON(ret != 0 && cp->cp_xmit_sg == rm->data.op_nents); } } if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && (cp->cp_xmit_sg == rm->data.op_nents)) cp->cp_xmit_data_sent = 1; } /* * A rm will only take multiple times through this loop * if there is a data op. Thus, if the data is sent (or there was * none), then we're done with the rm. */ if (!rm->data.op_active || cp->cp_xmit_data_sent) { cp->cp_xmit_rm = NULL; cp->cp_xmit_sg = 0; cp->cp_xmit_hdr_off = 0; cp->cp_xmit_data_off = 0; cp->cp_xmit_rdma_sent = 0; cp->cp_xmit_atomic_sent = 0; cp->cp_xmit_data_sent = 0; rds_message_put(rm); } } over_batch: if (conn->c_trans->xmit_path_complete) conn->c_trans->xmit_path_complete(cp); release_in_xmit(cp); /* Nuke any messages we decided not to retransmit. */ if (!list_empty(&to_be_dropped)) { /* irqs on here, so we can put(), unlike above */ list_for_each_entry(rm, &to_be_dropped, m_conn_item) rds_message_put(rm); rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); } /* * Other senders can queue a message after we last test the send queue * but before we clear RDS_IN_XMIT. In that case they'd back off and * not try and send their newly queued message. We need to check the * send queue after having cleared RDS_IN_XMIT so that their message * doesn't get stuck on the send queue. * * If the transport cannot continue (i.e ret != 0), then it must * call us when more room is available, such as from the tx * completion handler. * * We have an extra generation check here so that if someone manages * to jump in after our release_in_xmit, we'll see that they have done * some work and we will skip our goto */ if (ret == 0) { bool raced; smp_mb(); raced = send_gen != READ_ONCE(cp->cp_send_gen); if ((test_bit(0, &conn->c_map_queued) || !list_empty(&cp->cp_send_queue)) && !raced) { if (batch_count < send_batch_count) goto restart; rcu_read_lock(); if (rds_destroy_pending(cp->cp_conn)) ret = -ENETUNREACH; else queue_delayed_work(rds_wq, &cp->cp_send_w, 1); rcu_read_unlock(); } else if (raced) { rds_stats_inc(s_send_lock_queue_raced); } } out: return ret; } EXPORT_SYMBOL_GPL(rds_send_xmit); static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) { u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); assert_spin_locked(&rs->rs_lock); BUG_ON(rs->rs_snd_bytes < len); rs->rs_snd_bytes -= len; if (rs->rs_snd_bytes == 0) rds_stats_inc(s_send_queue_empty); } static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, is_acked_func is_acked) { if (is_acked) return is_acked(rm, ack); return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; } /* * This is pretty similar to what happens below in the ACK * handling code - except that we call here as soon as we get * the IB send completion on the RDMA op and the accompanying * message. */ void rds_rdma_send_complete(struct rds_message *rm, int status) { struct rds_sock *rs = NULL; struct rm_rdma_op *ro; struct rds_notifier *notifier; unsigned long flags; spin_lock_irqsave(&rm->m_rs_lock, flags); ro = &rm->rdma; if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && ro->op_active && ro->op_notify && ro->op_notifier) { notifier = ro->op_notifier; rs = rm->m_rs; sock_hold(rds_rs_to_sk(rs)); notifier->n_status = status; spin_lock(&rs->rs_lock); list_add_tail(&notifier->n_list, &rs->rs_notify_queue); spin_unlock(&rs->rs_lock); ro->op_notifier = NULL; } spin_unlock_irqrestore(&rm->m_rs_lock, flags); if (rs) { rds_wake_sk_sleep(rs); sock_put(rds_rs_to_sk(rs)); } } EXPORT_SYMBOL_GPL(rds_rdma_send_complete); /* * Just like above, except looks at atomic op */ void rds_atomic_send_complete(struct rds_message *rm, int status) { struct rds_sock *rs = NULL; struct rm_atomic_op *ao; struct rds_notifier *notifier; unsigned long flags; spin_lock_irqsave(&rm->m_rs_lock, flags); ao = &rm->atomic; if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && ao->op_active && ao->op_notify && ao->op_notifier) { notifier = ao->op_notifier; rs = rm->m_rs; sock_hold(rds_rs_to_sk(rs)); notifier->n_status = status; spin_lock(&rs->rs_lock); list_add_tail(&notifier->n_list, &rs->rs_notify_queue); spin_unlock(&rs->rs_lock); ao->op_notifier = NULL; } spin_unlock_irqrestore(&rm->m_rs_lock, flags); if (rs) { rds_wake_sk_sleep(rs); sock_put(rds_rs_to_sk(rs)); } } EXPORT_SYMBOL_GPL(rds_atomic_send_complete); /* * This is the same as rds_rdma_send_complete except we * don't do any locking - we have all the ingredients (message, * socket, socket lock) and can just move the notifier. */ static inline void __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) { struct rm_rdma_op *ro; struct rm_atomic_op *ao; ro = &rm->rdma; if (ro->op_active && ro->op_notify && ro->op_notifier) { ro->op_notifier->n_status = status; list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); ro->op_notifier = NULL; } ao = &rm->atomic; if (ao->op_active && ao->op_notify && ao->op_notifier) { ao->op_notifier->n_status = status; list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); ao->op_notifier = NULL; } /* No need to wake the app - caller does this */ } /* * This removes messages from the socket's list if they're on it. The list * argument must be private to the caller, we must be able to modify it * without locks. The messages must have a reference held for their * position on the list. This function will drop that reference after * removing the messages from the 'messages' list regardless of if it found * the messages on the socket list or not. */ static void rds_send_remove_from_sock(struct list_head *messages, int status) { unsigned long flags; struct rds_sock *rs = NULL; struct rds_message *rm; while (!list_empty(messages)) { int was_on_sock = 0; rm = list_entry(messages->next, struct rds_message, m_conn_item); list_del_init(&rm->m_conn_item); /* * If we see this flag cleared then we're *sure* that someone * else beat us to removing it from the sock. If we race * with their flag update we'll get the lock and then really * see that the flag has been cleared. * * The message spinlock makes sure nobody clears rm->m_rs * while we're messing with it. It does not prevent the * message from being removed from the socket, though. */ spin_lock_irqsave(&rm->m_rs_lock, flags); if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) goto unlock_and_drop; if (rs != rm->m_rs) { if (rs) { rds_wake_sk_sleep(rs); sock_put(rds_rs_to_sk(rs)); } rs = rm->m_rs; if (rs) sock_hold(rds_rs_to_sk(rs)); } if (!rs) goto unlock_and_drop; spin_lock(&rs->rs_lock); if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { struct rm_rdma_op *ro = &rm->rdma; struct rds_notifier *notifier; list_del_init(&rm->m_sock_item); rds_send_sndbuf_remove(rs, rm); if (ro->op_active && ro->op_notifier && (ro->op_notify || (ro->op_recverr && status))) { notifier = ro->op_notifier; list_add_tail(&notifier->n_list, &rs->rs_notify_queue); if (!notifier->n_status) notifier->n_status = status; rm->rdma.op_notifier = NULL; } was_on_sock = 1; } spin_unlock(&rs->rs_lock); unlock_and_drop: spin_unlock_irqrestore(&rm->m_rs_lock, flags); rds_message_put(rm); if (was_on_sock) rds_message_put(rm); } if (rs) { rds_wake_sk_sleep(rs); sock_put(rds_rs_to_sk(rs)); } } /* * Transports call here when they've determined that the receiver queued * messages up to, and including, the given sequence number. Messages are * moved to the retrans queue when rds_send_xmit picks them off the send * queue. This means that in the TCP case, the message may not have been * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked * checks the RDS_MSG_HAS_ACK_SEQ bit. */ void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, is_acked_func is_acked) { struct rds_message *rm, *tmp; unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&cp->cp_lock, flags); list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { if (!rds_send_is_acked(rm, ack, is_acked)) break; list_move(&rm->m_conn_item, &list); clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); } /* order flag updates with spin locks */ if (!list_empty(&list)) smp_mb__after_atomic(); spin_unlock_irqrestore(&cp->cp_lock, flags); /* now remove the messages from the sock list as needed */ rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); } EXPORT_SYMBOL_GPL(rds_send_path_drop_acked); void rds_send_drop_acked(struct rds_connection *conn, u64 ack, is_acked_func is_acked) { WARN_ON(conn->c_trans->t_mp_capable); rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); } EXPORT_SYMBOL_GPL(rds_send_drop_acked); void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest) { struct rds_message *rm, *tmp; struct rds_connection *conn; struct rds_conn_path *cp; unsigned long flags; LIST_HEAD(list); /* get all the messages we're dropping under the rs lock */ spin_lock_irqsave(&rs->rs_lock, flags); list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { if (dest && (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) || dest->sin6_port != rm->m_inc.i_hdr.h_dport)) continue; list_move(&rm->m_sock_item, &list); rds_send_sndbuf_remove(rs, rm); clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); } /* order flag updates with the rs lock */ smp_mb__after_atomic(); spin_unlock_irqrestore(&rs->rs_lock, flags); if (list_empty(&list)) return; /* Remove the messages from the conn */ list_for_each_entry(rm, &list, m_sock_item) { conn = rm->m_inc.i_conn; if (conn->c_trans->t_mp_capable) cp = rm->m_inc.i_conn_path; else cp = &conn->c_path[0]; spin_lock_irqsave(&cp->cp_lock, flags); /* * Maybe someone else beat us to removing rm from the conn. * If we race with their flag update we'll get the lock and * then really see that the flag has been cleared. */ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { spin_unlock_irqrestore(&cp->cp_lock, flags); continue; } list_del_init(&rm->m_conn_item); spin_unlock_irqrestore(&cp->cp_lock, flags); /* * Couldn't grab m_rs_lock in top loop (lock ordering), * but we can now. */ spin_lock_irqsave(&rm->m_rs_lock, flags); spin_lock(&rs->rs_lock); __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); spin_unlock(&rs->rs_lock); spin_unlock_irqrestore(&rm->m_rs_lock, flags); rds_message_put(rm); } rds_wake_sk_sleep(rs); while (!list_empty(&list)) { rm = list_entry(list.next, struct rds_message, m_sock_item); list_del_init(&rm->m_sock_item); rds_message_wait(rm); /* just in case the code above skipped this message * because RDS_MSG_ON_CONN wasn't set, run it again here * taking m_rs_lock is the only thing that keeps us * from racing with ack processing. */ spin_lock_irqsave(&rm->m_rs_lock, flags); spin_lock(&rs->rs_lock); __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); spin_unlock(&rs->rs_lock); spin_unlock_irqrestore(&rm->m_rs_lock, flags); rds_message_put(rm); } } /* * we only want this to fire once so we use the callers 'queued'. It's * possible that another thread can race with us and remove the * message from the flow with RDS_CANCEL_SENT_TO. */ static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, struct rds_conn_path *cp, struct rds_message *rm, __be16 sport, __be16 dport, int *queued) { unsigned long flags; u32 len; if (*queued) goto out; len = be32_to_cpu(rm->m_inc.i_hdr.h_len); /* this is the only place which holds both the socket's rs_lock * and the connection's c_lock */ spin_lock_irqsave(&rs->rs_lock, flags); /* * If there is a little space in sndbuf, we don't queue anything, * and userspace gets -EAGAIN. But poll() indicates there's send * room. This can lead to bad behavior (spinning) if snd_bytes isn't * freed up by incoming acks. So we check the *old* value of * rs_snd_bytes here to allow the last msg to exceed the buffer, * and poll() now knows no more data can be sent. */ if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { rs->rs_snd_bytes += len; /* let recv side know we are close to send space exhaustion. * This is probably not the optimal way to do it, as this * means we set the flag on *all* messages as soon as our * throughput hits a certain threshold. */ if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); rds_message_addref(rm); sock_hold(rds_rs_to_sk(rs)); rm->m_rs = rs; /* The code ordering is a little weird, but we're trying to minimize the time we hold c_lock */ rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); rm->m_inc.i_conn = conn; rm->m_inc.i_conn_path = cp; rds_message_addref(rm); spin_lock(&cp->cp_lock); rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); set_bit(RDS_MSG_ON_CONN, &rm->m_flags); spin_unlock(&cp->cp_lock); rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", rm, len, rs, rs->rs_snd_bytes, (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); *queued = 1; } spin_unlock_irqrestore(&rs->rs_lock, flags); out: return *queued; } /* * rds_message is getting to be quite complicated, and we'd like to allocate * it all in one go. This figures out how big it needs to be up front. */ static int rds_rm_size(struct msghdr *msg, int num_sgs, struct rds_iov_vector_arr *vct) { struct cmsghdr *cmsg; int size = 0; int cmsg_groups = 0; int retval; bool zcopy_cookie = false; struct rds_iov_vector *iov, *tmp_iov; if (num_sgs < 0) return -EINVAL; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_RDS) continue; switch (cmsg->cmsg_type) { case RDS_CMSG_RDMA_ARGS: if (vct->indx >= vct->len) { vct->len += vct->incr; tmp_iov = krealloc(vct->vec, vct->len * sizeof(struct rds_iov_vector), GFP_KERNEL); if (!tmp_iov) { vct->len -= vct->incr; return -ENOMEM; } vct->vec = tmp_iov; } iov = &vct->vec[vct->indx]; memset(iov, 0, sizeof(struct rds_iov_vector)); vct->indx++; cmsg_groups |= 1; retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov); if (retval < 0) return retval; size += retval; break; case RDS_CMSG_ZCOPY_COOKIE: zcopy_cookie = true; fallthrough; case RDS_CMSG_RDMA_DEST: case RDS_CMSG_RDMA_MAP: cmsg_groups |= 2; /* these are valid but do no add any size */ break; case RDS_CMSG_ATOMIC_CSWP: case RDS_CMSG_ATOMIC_FADD: case RDS_CMSG_MASKED_ATOMIC_CSWP: case RDS_CMSG_MASKED_ATOMIC_FADD: cmsg_groups |= 1; size += sizeof(struct scatterlist); break; default: return -EINVAL; } } if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie) return -EINVAL; size += num_sgs * sizeof(struct scatterlist); /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ if (cmsg_groups == 3) return -EINVAL; return size; } static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { u32 *cookie; if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) || !rm->data.op_mmp_znotifier) return -EINVAL; cookie = CMSG_DATA(cmsg); rm->data.op_mmp_znotifier->z_cookie = *cookie; return 0; } static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, struct msghdr *msg, int *allocated_mr, struct rds_iov_vector_arr *vct) { struct cmsghdr *cmsg; int ret = 0, ind = 0; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_RDS) continue; /* As a side effect, RDMA_DEST and RDMA_MAP will set * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. */ switch (cmsg->cmsg_type) { case RDS_CMSG_RDMA_ARGS: if (ind >= vct->indx) return -ENOMEM; ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]); ind++; break; case RDS_CMSG_RDMA_DEST: ret = rds_cmsg_rdma_dest(rs, rm, cmsg); break; case RDS_CMSG_RDMA_MAP: ret = rds_cmsg_rdma_map(rs, rm, cmsg); if (!ret) *allocated_mr = 1; else if (ret == -ENODEV) /* Accommodate the get_mr() case which can fail * if connection isn't established yet. */ ret = -EAGAIN; break; case RDS_CMSG_ATOMIC_CSWP: case RDS_CMSG_ATOMIC_FADD: case RDS_CMSG_MASKED_ATOMIC_CSWP: case RDS_CMSG_MASKED_ATOMIC_FADD: ret = rds_cmsg_atomic(rs, rm, cmsg); break; case RDS_CMSG_ZCOPY_COOKIE: ret = rds_cmsg_zcopy(rs, rm, cmsg); break; default: return -EINVAL; } if (ret) break; } return ret; } static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn, int nonblock) { int hash; if (conn->c_npaths == 0) hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS); else hash = RDS_MPATH_HASH(rs, conn->c_npaths); if (conn->c_npaths == 0 && hash != 0) { rds_send_ping(conn, 0); /* The underlying connection is not up yet. Need to wait * until it is up to be sure that the non-zero c_path can be * used. But if we are interrupted, we have to use the zero * c_path in case the connection ends up being non-MP capable. */ if (conn->c_npaths == 0) { /* Cannot wait for the connection be made, so just use * the base c_path. */ if (nonblock) return 0; if (wait_event_interruptible(conn->c_hs_waitq, conn->c_npaths != 0)) hash = 0; } if (conn->c_npaths == 1) hash = 0; } return hash; } static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes) { struct rds_rdma_args *args; struct cmsghdr *cmsg; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_RDS) continue; if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))) return -EINVAL; args = CMSG_DATA(cmsg); *rdma_bytes += args->remote_vec.bytes; } } return 0; } int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); __be16 dport; struct rds_message *rm = NULL; struct rds_connection *conn; int ret = 0; int queued = 0, allocated_mr = 0; int nonblock = msg->msg_flags & MSG_DONTWAIT; long timeo = sock_sndtimeo(sk, nonblock); struct rds_conn_path *cpath; struct in6_addr daddr; __u32 scope_id = 0; size_t rdma_payload_len = 0; bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE); int namelen; struct rds_iov_vector_arr vct; int ind; memset(&vct, 0, sizeof(vct)); /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */ vct.incr = 1; /* Mirror Linux UDP mirror of BSD error message compatibility */ /* XXX: Perhaps MSG_MORE someday */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) { ret = -EOPNOTSUPP; goto out; } namelen = msg->msg_namelen; if (namelen != 0) { if (namelen < sizeof(*usin)) { ret = -EINVAL; goto out; } switch (usin->sin_family) { case AF_INET: if (usin->sin_addr.s_addr == htonl(INADDR_ANY) || usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || ipv4_is_multicast(usin->sin_addr.s_addr)) { ret = -EINVAL; goto out; } ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr); dport = usin->sin_port; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { int addr_type; if (namelen < sizeof(*sin6)) { ret = -EINVAL; goto out; } addr_type = ipv6_addr_type(&sin6->sin6_addr); if (!(addr_type & IPV6_ADDR_UNICAST)) { __be32 addr4; if (!(addr_type & IPV6_ADDR_MAPPED)) { ret = -EINVAL; goto out; } /* It is a mapped address. Need to do some * sanity checks. */ addr4 = sin6->sin6_addr.s6_addr32[3]; if (addr4 == htonl(INADDR_ANY) || addr4 == htonl(INADDR_BROADCAST) || ipv4_is_multicast(addr4)) { ret = -EINVAL; goto out; } } if (addr_type & IPV6_ADDR_LINKLOCAL) { if (sin6->sin6_scope_id == 0) { ret = -EINVAL; goto out; } scope_id = sin6->sin6_scope_id; } daddr = sin6->sin6_addr; dport = sin6->sin6_port; break; } #endif default: ret = -EINVAL; goto out; } } else { /* We only care about consistency with ->connect() */ lock_sock(sk); daddr = rs->rs_conn_addr; dport = rs->rs_conn_port; scope_id = rs->rs_bound_scope_id; release_sock(sk); } lock_sock(sk); if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) { release_sock(sk); ret = -ENOTCONN; goto out; } else if (namelen != 0) { /* Cannot send to an IPv4 address using an IPv6 source * address and cannot send to an IPv6 address using an * IPv4 source address. */ if (ipv6_addr_v4mapped(&daddr) ^ ipv6_addr_v4mapped(&rs->rs_bound_addr)) { release_sock(sk); ret = -EOPNOTSUPP; goto out; } /* If the socket is already bound to a link local address, * it can only send to peers on the same link. But allow * communicating between link local and non-link local address. */ if (scope_id != rs->rs_bound_scope_id) { if (!scope_id) { scope_id = rs->rs_bound_scope_id; } else if (rs->rs_bound_scope_id) { release_sock(sk); ret = -EINVAL; goto out; } } } release_sock(sk); ret = rds_rdma_bytes(msg, &rdma_payload_len); if (ret) goto out; if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) { ret = -EMSGSIZE; goto out; } if (payload_len > rds_sk_sndbuf(rs)) { ret = -EMSGSIZE; goto out; } if (zcopy) { if (rs->rs_transport->t_type != RDS_TRANS_TCP) { ret = -EOPNOTSUPP; goto out; } num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX); } /* size of rm including all sgs */ ret = rds_rm_size(msg, num_sgs, &vct); if (ret < 0) goto out; rm = rds_message_alloc(ret, GFP_KERNEL); if (!rm) { ret = -ENOMEM; goto out; } /* Attach data to the rm */ if (payload_len) { rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); if (IS_ERR(rm->data.op_sg)) { ret = PTR_ERR(rm->data.op_sg); goto out; } ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); if (ret) goto out; } rm->data.op_active = 1; rm->m_daddr = daddr; /* rds_conn_create has a spinlock that runs with IRQ off. * Caching the conn in the socket helps a lot. */ if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) && rs->rs_tos == rs->rs_conn->c_tos) { conn = rs->rs_conn; } else { conn = rds_conn_create_outgoing(sock_net(sock->sk), &rs->rs_bound_addr, &daddr, rs->rs_transport, rs->rs_tos, sock->sk->sk_allocation, scope_id); if (IS_ERR(conn)) { ret = PTR_ERR(conn); goto out; } rs->rs_conn = conn; } if (conn->c_trans->t_mp_capable) cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)]; else cpath = &conn->c_path[0]; rm->m_conn_path = cpath; /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct); if (ret) goto out; if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", &rm->rdma, conn->c_trans->xmit_rdma); ret = -EOPNOTSUPP; goto out; } if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", &rm->atomic, conn->c_trans->xmit_atomic); ret = -EOPNOTSUPP; goto out; } if (rds_destroy_pending(conn)) { ret = -EAGAIN; goto out; } if (rds_conn_path_down(cpath)) rds_check_all_paths(conn); ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); if (ret) { rs->rs_seen_congestion = 1; goto out; } while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port, dport, &queued)) { rds_stats_inc(s_send_queue_full); if (nonblock) { ret = -EAGAIN; goto out; } timeo = wait_event_interruptible_timeout(*sk_sleep(sk), rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port, dport, &queued), timeo); rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) continue; ret = timeo; if (ret == 0) ret = -ETIMEDOUT; goto out; } /* * By now we've committed to the send. We reuse rds_send_worker() * to retry sends in the rds thread if the transport asks us to. */ rds_stats_inc(s_send_queued); ret = rds_send_xmit(cpath); if (ret == -ENOMEM || ret == -EAGAIN) { ret = 0; rcu_read_lock(); if (rds_destroy_pending(cpath->cp_conn)) ret = -ENETUNREACH; else queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); rcu_read_unlock(); } if (ret) goto out; rds_message_put(rm); for (ind = 0; ind < vct.indx; ind++) kfree(vct.vec[ind].iov); kfree(vct.vec); return payload_len; out: for (ind = 0; ind < vct.indx; ind++) kfree(vct.vec[ind].iov); kfree(vct.vec); /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN * or in any other way, we need to destroy the MR again */ if (allocated_mr) rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); if (rm) rds_message_put(rm); return ret; } /* * send out a probe. Can be shared by rds_send_ping, * rds_send_pong, rds_send_hb. * rds_send_hb should use h_flags * RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED * or * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED */ static int rds_send_probe(struct rds_conn_path *cp, __be16 sport, __be16 dport, u8 h_flags) { struct rds_message *rm; unsigned long flags; int ret = 0; rm = rds_message_alloc(0, GFP_ATOMIC); if (!rm) { ret = -ENOMEM; goto out; } rm->m_daddr = cp->cp_conn->c_faddr; rm->data.op_active = 1; rds_conn_path_connect_if_down(cp); ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); if (ret) goto out; spin_lock_irqsave(&cp->cp_lock, flags); list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); set_bit(RDS_MSG_ON_CONN, &rm->m_flags); rds_message_addref(rm); rm->m_inc.i_conn = cp->cp_conn; rm->m_inc.i_conn_path = cp; rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, cp->cp_next_tx_seq); rm->m_inc.i_hdr.h_flags |= h_flags; cp->cp_next_tx_seq++; if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) && cp->cp_conn->c_trans->t_mp_capable) { u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS); u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num); rds_message_add_extension(&rm->m_inc.i_hdr, RDS_EXTHDR_NPATHS, &npaths, sizeof(npaths)); rds_message_add_extension(&rm->m_inc.i_hdr, RDS_EXTHDR_GEN_NUM, &my_gen_num, sizeof(u32)); } spin_unlock_irqrestore(&cp->cp_lock, flags); rds_stats_inc(s_send_queued); rds_stats_inc(s_send_pong); /* schedule the send work on rds_wq */ rcu_read_lock(); if (!rds_destroy_pending(cp->cp_conn)) queue_delayed_work(rds_wq, &cp->cp_send_w, 1); rcu_read_unlock(); rds_message_put(rm); return 0; out: if (rm) rds_message_put(rm); return ret; } int rds_send_pong(struct rds_conn_path *cp, __be16 dport) { return rds_send_probe(cp, 0, dport, 0); } void rds_send_ping(struct rds_connection *conn, int cp_index) { unsigned long flags; struct rds_conn_path *cp = &conn->c_path[cp_index]; spin_lock_irqsave(&cp->cp_lock, flags); if (conn->c_ping_triggered) { spin_unlock_irqrestore(&cp->cp_lock, flags); return; } conn->c_ping_triggered = 1; spin_unlock_irqrestore(&cp->cp_lock, flags); rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0); } EXPORT_SYMBOL_GPL(rds_send_ping);
6 6 7 6 3 4 7 7 1 1 7 7 7 7 7 6 1 1 6 5 1 6 6 6 6 3 6 1 7 7 7 8 1 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 // SPDX-License-Identifier: GPL-2.0-only /* Network filesystem high-level buffered write support. * * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/pagevec.h> #include "internal.h" static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) { if (netfs_group) folio_attach_private(folio, netfs_get_group(netfs_group)); } static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) { void *priv = folio_get_private(folio); if (unlikely(priv != netfs_group)) { if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE)) folio_attach_private(folio, netfs_get_group(netfs_group)); else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE) folio_detach_private(folio); } } /* * Grab a folio for writing and lock it. Attempt to allocate as large a folio * as possible to hold as much of the remaining length as possible in one go. */ static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, loff_t pos, size_t part) { pgoff_t index = pos / PAGE_SIZE; fgf_t fgp_flags = FGP_WRITEBEGIN; if (mapping_large_folio_support(mapping)) fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part); return __filemap_get_folio(mapping, index, fgp_flags, mapping_gfp_mask(mapping)); } /* * Update i_size and estimate the update to i_blocks to reflect the additional * data written into the pagecache until we can find out from the server what * the values actually are. */ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, loff_t i_size, loff_t pos, size_t copied) { blkcnt_t add; size_t gap; if (ctx->ops->update_i_size) { ctx->ops->update_i_size(inode, pos); return; } i_size_write(inode, pos); #if IS_ENABLED(CONFIG_FSCACHE) fscache_update_cookie(ctx->cache, NULL, &pos); #endif gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1)); if (copied > gap) { add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE); inode->i_blocks = min_t(blkcnt_t, DIV_ROUND_UP(pos, SECTOR_SIZE), inode->i_blocks + add); } } /** * netfs_perform_write - Copy data into the pagecache. * @iocb: The operation parameters * @iter: The source buffer * @netfs_group: Grouping for dirty folios (eg. ceph snaps). * * Copy data into pagecache folios attached to the inode specified by @iocb. * The caller must hold appropriate inode locks. * * Dirty folios are tagged with a netfs_folio struct if they're not up to date * to indicate the range modified. Dirty folios may also be tagged with a * netfs-specific grouping such that data from an old group gets flushed before * a new one is started. */ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, struct netfs_group *netfs_group) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; struct netfs_inode *ctx = netfs_inode(inode); struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .for_sync = true, .nr_to_write = LONG_MAX, .range_start = iocb->ki_pos, .range_end = iocb->ki_pos + iter->count, }; struct netfs_io_request *wreq = NULL; struct folio *folio = NULL, *writethrough = NULL; unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0; ssize_t written = 0, ret, ret2; loff_t i_size, pos = iocb->ki_pos; size_t max_chunk = mapping_max_folio_size(mapping); bool maybe_trouble = false; if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) || iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) ) { wbc_attach_fdatawrite_inode(&wbc, mapping->host); ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count); if (ret < 0) { wbc_detach_inode(&wbc); goto out; } wreq = netfs_begin_writethrough(iocb, iter->count); if (IS_ERR(wreq)) { wbc_detach_inode(&wbc); ret = PTR_ERR(wreq); wreq = NULL; goto out; } if (!is_sync_kiocb(iocb)) wreq->iocb = iocb; netfs_stat(&netfs_n_wh_writethrough); } else { netfs_stat(&netfs_n_wh_buffered_write); } do { struct netfs_folio *finfo; struct netfs_group *group; unsigned long long fpos; size_t flen; size_t offset; /* Offset into pagecache folio */ size_t part; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ offset = pos & (max_chunk - 1); part = min(max_chunk - offset, iov_iter_count(iter)); /* Bring in the user pages that we will copy from _first_ lest * we hit a nasty deadlock on copying from the same page as * we're writing to, without it being marked uptodate. * * Not only is this an optimisation, but it is also required to * check that the address is actually valid, when atomic * usercopies are used below. * * We rely on the page being held onto long enough by the LRU * that we can grab it below if this causes it to be read. */ ret = -EFAULT; if (unlikely(fault_in_iov_iter_readable(iter, part) == part)) break; folio = netfs_grab_folio_for_write(mapping, pos, part); if (IS_ERR(folio)) { ret = PTR_ERR(folio); break; } flen = folio_size(folio); fpos = folio_pos(folio); offset = pos - fpos; part = min_t(size_t, flen - offset, part); /* Wait for writeback to complete. The writeback engine owns * the info in folio->private and may change it until it * removes the WB mark. */ if (folio_get_private(folio) && folio_wait_writeback_killable(folio)) { ret = written ? -EINTR : -ERESTARTSYS; goto error_folio_unlock; } if (signal_pending(current)) { ret = written ? -EINTR : -ERESTARTSYS; goto error_folio_unlock; } /* Decide how we should modify a folio. We might be attempting * to do write-streaming, in which case we don't want to a * local RMW cycle if we can avoid it. If we're doing local * caching or content crypto, we award that priority over * avoiding RMW. If the file is open readably, then we also * assume that we may want to read what we wrote. */ finfo = netfs_folio_info(folio); group = netfs_folio_group(folio); if (unlikely(group != netfs_group) && group != NETFS_FOLIO_COPY_TO_CACHE) goto flush_content; if (folio_test_uptodate(folio)) { if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; netfs_set_group(folio, netfs_group); trace_netfs_folio(folio, netfs_folio_is_uptodate); goto copied; } /* If the page is above the zero-point then we assume that the * server would just return a block of zeros or a short read if * we try to read it. */ if (fpos >= ctx->zero_point) { folio_zero_segment(folio, 0, offset); copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; folio_zero_segment(folio, offset + copied, flen); __netfs_set_group(folio, netfs_group); folio_mark_uptodate(folio); trace_netfs_folio(folio, netfs_modify_and_clear); goto copied; } /* See if we can write a whole folio in one go. */ if (!maybe_trouble && offset == 0 && part >= flen) { copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; if (unlikely(copied < part)) { maybe_trouble = true; iov_iter_revert(iter, copied); copied = 0; folio_unlock(folio); goto retry; } __netfs_set_group(folio, netfs_group); folio_mark_uptodate(folio); trace_netfs_folio(folio, netfs_whole_folio_modify); goto copied; } /* We don't want to do a streaming write on a file that loses * caching service temporarily because the backing store got * culled and we don't really want to get a streaming write on * a file that's open for reading as ->read_folio() then has to * be able to flush it. */ if ((file->f_mode & FMODE_READ) || netfs_is_cache_enabled(ctx)) { if (finfo) { netfs_stat(&netfs_n_wh_wstream_conflict); goto flush_content; } ret = netfs_prefetch_for_write(file, folio, offset, part); if (ret < 0) { _debug("prefetch = %zd", ret); goto error_folio_unlock; } /* Note that copy-to-cache may have been set. */ copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; netfs_set_group(folio, netfs_group); trace_netfs_folio(folio, netfs_just_prefetch); goto copied; } if (!finfo) { ret = -EIO; if (WARN_ON(folio_get_private(folio))) goto error_folio_unlock; copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; if (offset == 0 && copied == flen) { __netfs_set_group(folio, netfs_group); folio_mark_uptodate(folio); trace_netfs_folio(folio, netfs_streaming_filled_page); goto copied; } finfo = kzalloc(sizeof(*finfo), GFP_KERNEL); if (!finfo) { iov_iter_revert(iter, copied); ret = -ENOMEM; goto error_folio_unlock; } finfo->netfs_group = netfs_get_group(netfs_group); finfo->dirty_offset = offset; finfo->dirty_len = copied; folio_attach_private(folio, (void *)((unsigned long)finfo | NETFS_FOLIO_INFO)); trace_netfs_folio(folio, netfs_streaming_write); goto copied; } /* We can continue a streaming write only if it continues on * from the previous. If it overlaps, we must flush lest we * suffer a partial copy and disjoint dirty regions. */ if (offset == finfo->dirty_offset + finfo->dirty_len) { copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; finfo->dirty_len += copied; if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) { if (finfo->netfs_group) folio_change_private(folio, finfo->netfs_group); else folio_detach_private(folio); folio_mark_uptodate(folio); kfree(finfo); trace_netfs_folio(folio, netfs_streaming_cont_filled_page); } else { trace_netfs_folio(folio, netfs_streaming_write_cont); } goto copied; } /* Incompatible write; flush the folio and try again. */ flush_content: trace_netfs_folio(folio, netfs_flush_content); folio_unlock(folio); folio_put(folio); ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1); if (ret < 0) goto error_folio_unlock; continue; copied: flush_dcache_folio(folio); /* Update the inode size if we moved the EOF marker */ pos += copied; i_size = i_size_read(inode); if (pos > i_size) netfs_update_i_size(ctx, inode, i_size, pos, copied); written += copied; if (likely(!wreq)) { folio_mark_dirty(folio); folio_unlock(folio); } else { netfs_advance_writethrough(wreq, &wbc, folio, copied, offset + copied == flen, &writethrough); /* Folio unlocked */ } retry: folio_put(folio); folio = NULL; ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); if (unlikely(ret < 0)) break; cond_resched(); } while (iov_iter_count(iter)); out: if (likely(written)) { /* Set indication that ctime and mtime got updated in case * close is deferred. */ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ctx->flags); if (unlikely(ctx->ops->post_modify)) ctx->ops->post_modify(inode); } if (unlikely(wreq)) { ret2 = netfs_end_writethrough(wreq, &wbc, writethrough); wbc_detach_inode(&wbc); if (ret2 == -EIOCBQUEUED) return ret2; if (ret == 0) ret = ret2; } iocb->ki_pos += written; _leave(" = %zd [%zd]", written, ret); return written ? written : ret; copy_failed: ret = -EFAULT; error_folio_unlock: folio_unlock(folio); folio_put(folio); goto out; } EXPORT_SYMBOL(netfs_perform_write); /** * netfs_buffered_write_iter_locked - write data to a file * @iocb: IO state structure (file, offset, etc.) * @from: iov_iter with data to write * @netfs_group: Grouping for dirty folios (eg. ceph snaps). * * This function does all the work needed for actually writing data to a * file. It does all basic checks, removes SUID from the file, updates * modification times and calls proper subroutines depending on whether we * do direct IO or a standard buffered write. * * The caller must hold appropriate locks around this function and have called * generic_write_checks() already. The caller is also responsible for doing * any necessary syncing afterwards. * * This function does *not* take care of syncing data in case of O_SYNC write. * A caller has to handle it. This is mainly due to the fact that we want to * avoid syncing under i_rwsem. * * Return: * * number of bytes written, even for truncated writes * * negative error code if no data has been written at all */ ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, struct netfs_group *netfs_group) { struct file *file = iocb->ki_filp; ssize_t ret; trace_netfs_write_iter(iocb, from); ret = file_remove_privs(file); if (ret) return ret; ret = file_update_time(file); if (ret) return ret; return netfs_perform_write(iocb, from, netfs_group); } EXPORT_SYMBOL(netfs_buffered_write_iter_locked); /** * netfs_file_write_iter - write data to a file * @iocb: IO state structure * @from: iov_iter with data to write * * Perform a write to a file, writing into the pagecache if possible and doing * an unbuffered write instead if not. * * Return: * * Negative error code if no data has been written at all of * vfs_fsync_range() failed for a synchronous write * * Number of bytes written, even for truncated writes */ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct netfs_inode *ictx = netfs_inode(inode); ssize_t ret; _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); if (!iov_iter_count(from)) return 0; if ((iocb->ki_flags & IOCB_DIRECT) || test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) return netfs_unbuffered_write_iter(iocb, from); ret = netfs_start_io_write(inode); if (ret < 0) return ret; ret = generic_write_checks(iocb, from); if (ret > 0) ret = netfs_buffered_write_iter_locked(iocb, from, NULL); netfs_end_io_write(inode); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } EXPORT_SYMBOL(netfs_file_write_iter); /* * Notification that a previously read-only page is about to become writable. * The caller indicates the precise page that needs to be written to, but * we only track group on a per-folio basis, so we block more often than * we might otherwise. */ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) { struct netfs_group *group; struct folio *folio = page_folio(vmf->page); struct file *file = vmf->vma->vm_file; struct address_space *mapping = file->f_mapping; struct inode *inode = file_inode(file); struct netfs_inode *ictx = netfs_inode(inode); vm_fault_t ret = VM_FAULT_NOPAGE; int err; _enter("%lx", folio->index); sb_start_pagefault(inode->i_sb); if (folio_lock_killable(folio) < 0) goto out; if (folio->mapping != mapping) goto unlock; if (folio_wait_writeback_killable(folio) < 0) goto unlock; /* Can we see a streaming write here? */ if (WARN_ON(!folio_test_uptodate(folio))) { ret = VM_FAULT_SIGBUS; goto unlock; } group = netfs_folio_group(folio); if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) { folio_unlock(folio); err = filemap_fdatawrite_range(mapping, folio_pos(folio), folio_pos(folio) + folio_size(folio)); switch (err) { case 0: ret = VM_FAULT_RETRY; goto out; case -ENOMEM: ret = VM_FAULT_OOM; goto out; default: ret = VM_FAULT_SIGBUS; goto out; } } if (folio_test_dirty(folio)) trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus); else trace_netfs_folio(folio, netfs_folio_trace_mkwrite); netfs_set_group(folio, netfs_group); file_update_time(file); set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags); if (ictx->ops->post_modify) ictx->ops->post_modify(inode); ret = VM_FAULT_LOCKED; out: sb_end_pagefault(inode->i_sb); return ret; unlock: folio_unlock(folio); goto out; } EXPORT_SYMBOL(netfs_page_mkwrite);
2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 // SPDX-License-Identifier: GPL-2.0+ /* Framework for finding and configuring PHYs. * Also contains generic PHY driver * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> #include <linux/bitmap.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/phylib_stubs.h> #include <linux/phy_led_triggers.h> #include <linux/phy_link_topology.h> #include <linux/pse-pd/pse.h> #include <linux/property.h> #include <linux/ptp_clock_kernel.h> #include <linux/rtnetlink.h> #include <linux/sfp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/unistd.h> MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_basic_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_basic_t1_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_gbit_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_gbit_fibre_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_10gbit_features); const int phy_basic_ports_array[3] = { ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_MII_BIT, }; EXPORT_SYMBOL_GPL(phy_basic_ports_array); static const int phy_all_ports_features_array[7] = { ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_MII_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT, ETHTOOL_LINK_MODE_AUI_BIT, ETHTOOL_LINK_MODE_BNC_BIT, ETHTOOL_LINK_MODE_Backplane_BIT, }; const int phy_10_100_features_array[4] = { ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_10_100_features_array); const int phy_basic_t1_features_array[3] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, ETHTOOL_LINK_MODE_100baseT1_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); const int phy_basic_t1s_p2mp_features_array[2] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, }; EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array); const int phy_gbit_features_array[2] = { ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_gbit_features_array); const int phy_10gbit_features_array[1] = { ETHTOOL_LINK_MODE_10000baseT_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_10gbit_features_array); static const int phy_eee_cap1_features_array[] = { ETHTOOL_LINK_MODE_100baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, }; __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_eee_cap1_features); static const int phy_eee_cap2_features_array[] = { ETHTOOL_LINK_MODE_2500baseT_Full_BIT, ETHTOOL_LINK_MODE_5000baseT_Full_BIT, }; __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_eee_cap2_features); static void features_init(void) { /* 10/100 half/full*/ linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), phy_basic_features); linkmode_set_bit_array(phy_10_100_features_array, ARRAY_SIZE(phy_10_100_features_array), phy_basic_features); /* 100 full, TP */ linkmode_set_bit_array(phy_basic_t1_features_array, ARRAY_SIZE(phy_basic_t1_features_array), phy_basic_t1_features); /* 10 half, P2MP, TP */ linkmode_set_bit_array(phy_basic_t1s_p2mp_features_array, ARRAY_SIZE(phy_basic_t1s_p2mp_features_array), phy_basic_t1s_p2mp_features); /* 10/100 half/full + 1000 half/full */ linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), phy_gbit_features); linkmode_set_bit_array(phy_10_100_features_array, ARRAY_SIZE(phy_10_100_features_array), phy_gbit_features); linkmode_set_bit_array(phy_gbit_features_array, ARRAY_SIZE(phy_gbit_features_array), phy_gbit_features); /* 10/100 half/full + 1000 half/full + fibre*/ linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), phy_gbit_fibre_features); linkmode_set_bit_array(phy_10_100_features_array, ARRAY_SIZE(phy_10_100_features_array), phy_gbit_fibre_features); linkmode_set_bit_array(phy_gbit_features_array, ARRAY_SIZE(phy_gbit_features_array), phy_gbit_fibre_features); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phy_gbit_fibre_features); /* 10/100 half/full + 1000 half/full + 10G full*/ linkmode_set_bit_array(phy_all_ports_features_array, ARRAY_SIZE(phy_all_ports_features_array), phy_10gbit_features); linkmode_set_bit_array(phy_10_100_features_array, ARRAY_SIZE(phy_10_100_features_array), phy_10gbit_features); linkmode_set_bit_array(phy_gbit_features_array, ARRAY_SIZE(phy_gbit_features_array), phy_10gbit_features); linkmode_set_bit_array(phy_10gbit_features_array, ARRAY_SIZE(phy_10gbit_features_array), phy_10gbit_features); linkmode_set_bit_array(phy_eee_cap1_features_array, ARRAY_SIZE(phy_eee_cap1_features_array), phy_eee_cap1_features); linkmode_set_bit_array(phy_eee_cap2_features_array, ARRAY_SIZE(phy_eee_cap2_features_array), phy_eee_cap2_features); } void phy_device_free(struct phy_device *phydev) { put_device(&phydev->mdio.dev); } EXPORT_SYMBOL(phy_device_free); static void phy_mdio_device_free(struct mdio_device *mdiodev) { struct phy_device *phydev; phydev = container_of(mdiodev, struct phy_device, mdio); phy_device_free(phydev); } static void phy_device_release(struct device *dev) { fwnode_handle_put(dev->fwnode); kfree(to_phy_device(dev)); } static void phy_mdio_device_remove(struct mdio_device *mdiodev) { struct phy_device *phydev; phydev = container_of(mdiodev, struct phy_device, mdio); phy_device_remove(phydev); } static struct phy_driver genphy_driver; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); static bool phy_drv_wol_enabled(struct phy_device *phydev) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; phy_ethtool_get_wol(phydev, &wol); return wol.wolopts != 0; } static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; if (!drv || !phydrv->suspend) return false; /* If the PHY on the mido bus is not attached but has WOL enabled * we cannot suspend the PHY. */ if (!netdev && phy_drv_wol_enabled(phydev)) return false; /* PHY not attached? May suspend if the PHY has not already been * suspended as part of a prior call to phy_disconnect() -> * phy_detach() -> phy_suspend() because the parent netdev might be the * MDIO bus driver and clock gated at this point. */ if (!netdev) goto out; if (netdev->ethtool->wol_enabled) return false; /* As long as not all affected network drivers support the * wol_enabled flag, let's check for hints that WoL is enabled. * Don't suspend PHY if the attached netdev parent may wake up. * The parent may point to a PCI device, as in tg3 driver. */ if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) return false; /* Also don't suspend PHY if the netdev itself may wakeup. This * is the case for devices w/o underlaying pwr. mgmt. aware bus, * e.g. SoC devices. */ if (device_may_wakeup(&netdev->dev)) return false; out: return !phydev->suspended; } static __maybe_unused int mdio_bus_phy_suspend(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); if (phydev->mac_managed_pm) return 0; /* Wakeup interrupts may occur during the system sleep transition when * the PHY is inaccessible. Set flag to postpone handling until the PHY * has resumed. Wait for concurrent interrupt handler to complete. */ if (phy_interrupt_is_valid(phydev)) { phydev->irq_suspended = 1; synchronize_irq(phydev->irq); } /* We must stop the state machine manually, otherwise it stops out of * control, possibly with the phydev->lock held. Upon resume, netdev * may call phy routines that try to grab the same lock, and that may * lead to a deadlock. */ if (phydev->attached_dev && phydev->adjust_link) phy_stop_machine(phydev); if (!mdio_bus_phy_may_suspend(phydev)) return 0; phydev->suspended_by_mdio_bus = 1; return phy_suspend(phydev); } static __maybe_unused int mdio_bus_phy_resume(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); int ret; if (phydev->mac_managed_pm) return 0; if (!phydev->suspended_by_mdio_bus) goto no_resume; phydev->suspended_by_mdio_bus = 0; /* If we managed to get here with the PHY state machine in a state * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication * that something went wrong and we should most likely be using * MAC managed PM, but we are not. */ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY && phydev->state != PHY_UP); ret = phy_init_hw(phydev); if (ret < 0) return ret; ret = phy_resume(phydev); if (ret < 0) return ret; no_resume: if (phy_interrupt_is_valid(phydev)) { phydev->irq_suspended = 0; synchronize_irq(phydev->irq); /* Rerun interrupts which were postponed by phy_interrupt() * because they occurred during the system sleep transition. */ if (phydev->irq_rerun) { phydev->irq_rerun = 0; enable_irq(phydev->irq); irq_wake_thread(phydev->irq, phydev); } } if (phydev->attached_dev && phydev->adjust_link) phy_start_machine(phydev); return 0; } static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend, mdio_bus_phy_resume); /** * phy_register_fixup - creates a new phy_fixup and adds it to the list * @bus_id: A string which matches phydev->mdio.dev.bus_id (or PHY_ANY_ID) * @phy_uid: Used to match against phydev->phy_id (the UID of the PHY) * It can also be PHY_ANY_UID * @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before * comparison * @run: The actual code to be run when a matching PHY is found */ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)) { struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); if (!fixup) return -ENOMEM; strscpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id)); fixup->phy_uid = phy_uid; fixup->phy_uid_mask = phy_uid_mask; fixup->run = run; mutex_lock(&phy_fixup_lock); list_add_tail(&fixup->list, &phy_fixup_list); mutex_unlock(&phy_fixup_lock); return 0; } EXPORT_SYMBOL(phy_register_fixup); /* Registers a fixup to be run on any PHY with the UID in phy_uid */ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)) { return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run); } EXPORT_SYMBOL(phy_register_fixup_for_uid); /* Registers a fixup to be run on the PHY with id string bus_id */ int phy_register_fixup_for_id(const char *bus_id, int (*run)(struct phy_device *)) { return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run); } EXPORT_SYMBOL(phy_register_fixup_for_id); /** * phy_unregister_fixup - remove a phy_fixup from the list * @bus_id: A string matches fixup->bus_id (or PHY_ANY_ID) in phy_fixup_list * @phy_uid: A phy id matches fixup->phy_id (or PHY_ANY_UID) in phy_fixup_list * @phy_uid_mask: Applied to phy_uid and fixup->phy_uid before comparison */ int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask) { struct list_head *pos, *n; struct phy_fixup *fixup; int ret; ret = -ENODEV; mutex_lock(&phy_fixup_lock); list_for_each_safe(pos, n, &phy_fixup_list) { fixup = list_entry(pos, struct phy_fixup, list); if ((!strcmp(fixup->bus_id, bus_id)) && phy_id_compare(fixup->phy_uid, phy_uid, phy_uid_mask)) { list_del(&fixup->list); kfree(fixup); ret = 0; break; } } mutex_unlock(&phy_fixup_lock); return ret; } EXPORT_SYMBOL(phy_unregister_fixup); /* Unregisters a fixup of any PHY with the UID in phy_uid */ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask) { return phy_unregister_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask); } EXPORT_SYMBOL(phy_unregister_fixup_for_uid); /* Unregisters a fixup of the PHY with id string bus_id */ int phy_unregister_fixup_for_id(const char *bus_id) { return phy_unregister_fixup(bus_id, PHY_ANY_UID, 0xffffffff); } EXPORT_SYMBOL(phy_unregister_fixup_for_id); /* Returns 1 if fixup matches phydev in bus_id and phy_uid. * Fixups can be set to match any in one or more fields. */ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup) { if (strcmp(fixup->bus_id, phydev_name(phydev)) != 0) if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0) return 0; if (!phy_id_compare(phydev->phy_id, fixup->phy_uid, fixup->phy_uid_mask)) if (fixup->phy_uid != PHY_ANY_UID) return 0; return 1; } /* Runs any matching fixups for this phydev */ static int phy_scan_fixups(struct phy_device *phydev) { struct phy_fixup *fixup; mutex_lock(&phy_fixup_lock); list_for_each_entry(fixup, &phy_fixup_list, list) { if (phy_needs_fixup(phydev, fixup)) { int err = fixup->run(phydev); if (err < 0) { mutex_unlock(&phy_fixup_lock); return err; } phydev->has_fixups = true; } } mutex_unlock(&phy_fixup_lock); return 0; } static int phy_bus_match(struct device *dev, const struct device_driver *drv) { struct phy_device *phydev = to_phy_device(dev); const struct phy_driver *phydrv = to_phy_driver(drv); const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); int i; if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)) return 0; if (phydrv->match_phy_device) return phydrv->match_phy_device(phydev); if (phydev->is_c45) { for (i = 1; i < num_ids; i++) { if (phydev->c45_ids.device_ids[i] == 0xffffffff) continue; if (phy_id_compare(phydev->c45_ids.device_ids[i], phydrv->phy_id, phydrv->phy_id_mask)) return 1; } return 0; } else { return phy_id_compare(phydev->phy_id, phydrv->phy_id, phydrv->phy_id_mask); } } static ssize_t phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); return sysfs_emit(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); } static DEVICE_ATTR_RO(phy_id); static ssize_t phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); const char *mode = NULL; if (phy_is_internal(phydev)) mode = "internal"; else mode = phy_modes(phydev->interface); return sysfs_emit(buf, "%s\n", mode); } static DEVICE_ATTR_RO(phy_interface); static ssize_t phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); return sysfs_emit(buf, "%d\n", phydev->has_fixups); } static DEVICE_ATTR_RO(phy_has_fixups); static ssize_t phy_dev_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); return sysfs_emit(buf, "0x%08x\n", phydev->dev_flags); } static DEVICE_ATTR_RO(phy_dev_flags); static struct attribute *phy_dev_attrs[] = { &dev_attr_phy_id.attr, &dev_attr_phy_interface.attr, &dev_attr_phy_has_fixups.attr, &dev_attr_phy_dev_flags.attr, NULL, }; ATTRIBUTE_GROUPS(phy_dev); static const struct device_type mdio_bus_phy_type = { .name = "PHY", .groups = phy_dev_groups, .release = phy_device_release, .pm = pm_ptr(&mdio_bus_phy_pm_ops), }; static int phy_request_driver_module(struct phy_device *dev, u32 phy_id) { int ret; ret = request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); /* We only check for failures in executing the usermode binary, * not whether a PHY driver module exists for the PHY ID. * Accept -ENOENT because this may occur in case no initramfs exists, * then modprobe isn't available. */ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) { phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n", ret, (unsigned long)phy_id); return ret; } return 0; } struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { struct phy_device *dev; struct mdio_device *mdiodev; int ret = 0; /* We allocate the device, and initialize the default values */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); mdiodev = &dev->mdio; mdiodev->dev.parent = &bus->dev; mdiodev->dev.bus = &mdio_bus_type; mdiodev->dev.type = &mdio_bus_phy_type; mdiodev->bus = bus; mdiodev->bus_match = phy_bus_match; mdiodev->addr = addr; mdiodev->flags = MDIO_DEVICE_FLAG_PHY; mdiodev->device_free = phy_mdio_device_free; mdiodev->device_remove = phy_mdio_device_remove; mdiodev->reset_state = -1; dev->speed = SPEED_UNKNOWN; dev->duplex = DUPLEX_UNKNOWN; dev->pause = 0; dev->asym_pause = 0; dev->link = 0; dev->port = PORT_TP; dev->interface = PHY_INTERFACE_MODE_GMII; dev->autoneg = AUTONEG_ENABLE; dev->pma_extable = -ENODATA; dev->is_c45 = is_c45; dev->phy_id = phy_id; if (c45_ids) dev->c45_ids = *c45_ids; dev->irq = bus->irq[addr]; dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); device_initialize(&mdiodev->dev); dev->state = PHY_DOWN; INIT_LIST_HEAD(&dev->leds); mutex_init(&dev->lock); INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); /* Request the appropriate module unconditionally; don't * bother trying to do so only if it isn't already loaded, * because that gets complicated. A hotplug event would have * done an unconditional modprobe anyway. * We don't do normal hotplug because it won't work for MDIO * -- because it relies on the device staying around for long * enough for the driver to get loaded. With MDIO, the NIC * driver will get bored and give up as soon as it finds that * there's no driver _already_ loaded. */ if (is_c45 && c45_ids) { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); int i; for (i = 1; i < num_ids; i++) { if (c45_ids->device_ids[i] == 0xffffffff) continue; ret = phy_request_driver_module(dev, c45_ids->device_ids[i]); if (ret) break; } } else { ret = phy_request_driver_module(dev, phy_id); } if (ret) { put_device(&mdiodev->dev); dev = ERR_PTR(ret); } return dev; } EXPORT_SYMBOL(phy_device_create); /* phy_c45_probe_present - checks to see if a MMD is present in the package * @bus: the target MII bus * @prtad: PHY package address on the MII bus * @devad: PHY device (MMD) address * * Read the MDIO_STAT2 register, and check whether a device is responding * at this address. * * Returns: negative error number on bus access error, zero if no device * is responding, or positive if a device is present. */ static int phy_c45_probe_present(struct mii_bus *bus, int prtad, int devad) { int stat2; stat2 = mdiobus_c45_read(bus, prtad, devad, MDIO_STAT2); if (stat2 < 0) return stat2; return (stat2 & MDIO_STAT2_DEVPRST) == MDIO_STAT2_DEVPRST_VAL; } /* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. * @bus: the target MII bus * @addr: PHY address on the MII bus * @dev_addr: MMD address in the PHY. * @devices_in_package: where to store the devices in package information. * * Description: reads devices in package registers of a MMD at @dev_addr * from PHY at @addr on @bus. * * Returns: 0 on success, -EIO on failure. */ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, u32 *devices_in_package) { int phy_reg; phy_reg = mdiobus_c45_read(bus, addr, dev_addr, MDIO_DEVS2); if (phy_reg < 0) return -EIO; *devices_in_package = phy_reg << 16; phy_reg = mdiobus_c45_read(bus, addr, dev_addr, MDIO_DEVS1); if (phy_reg < 0) return -EIO; *devices_in_package |= phy_reg; return 0; } /** * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs. * @bus: the target MII bus * @addr: PHY address on the MII bus * @c45_ids: where to store the c45 ID information. * * Read the PHY "devices in package". If this appears to be valid, read * the PHY identifiers for each device. Return the "devices in package" * and identifiers in @c45_ids. * * Returns zero on success, %-EIO on bus access error, or %-ENODEV if * the "devices in package" is invalid or no device responds. */ static int get_phy_c45_ids(struct mii_bus *bus, int addr, struct phy_c45_device_ids *c45_ids) { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); u32 devs_in_pkg = 0; int i, ret, phy_reg; /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ for (i = 1; i < MDIO_MMD_NUM && (devs_in_pkg == 0 || (devs_in_pkg & 0x1fffffff) == 0x1fffffff); i++) { if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { /* Check that there is a device present at this * address before reading the devices-in-package * register to avoid reading garbage from the PHY. * Some PHYs (88x3310) vendor space is not IEEE802.3 * compliant. */ ret = phy_c45_probe_present(bus, addr, i); if (ret < 0) /* returning -ENODEV doesn't stop bus * scanning */ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; if (!ret) continue; } phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, &devs_in_pkg); if (phy_reg < 0) return -EIO; } if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) { /* If mostly Fs, there is no device there, then let's probe * MMD 0, as some 10G PHYs have zero Devices In package, * e.g. Cortina CS4315/CS4340 PHY. */ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, &devs_in_pkg); if (phy_reg < 0) return -EIO; /* no device there, let's get out of here */ if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) return -ENODEV; } /* Now probe Device Identifiers for each device present. */ for (i = 1; i < num_ids; i++) { if (!(devs_in_pkg & (1 << i))) continue; if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { /* Probe the "Device Present" bits for the vendor MMDs * to ignore these if they do not contain IEEE 802.3 * registers. */ ret = phy_c45_probe_present(bus, addr, i); if (ret < 0) return ret; if (!ret) continue; } phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1); if (phy_reg < 0) return -EIO; c45_ids->device_ids[i] = phy_reg << 16; phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID2); if (phy_reg < 0) return -EIO; c45_ids->device_ids[i] |= phy_reg; } c45_ids->devices_in_package = devs_in_pkg; /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ c45_ids->mmds_present = devs_in_pkg & ~BIT(0); return 0; } /** * get_phy_c22_id - reads the specified addr for its clause 22 ID. * @bus: the target MII bus * @addr: PHY address on the MII bus * @phy_id: where to store the ID retrieved. * * Read the 802.3 clause 22 PHY ID from the PHY at @addr on the @bus, * placing it in @phy_id. Return zero on successful read and the ID is * valid, %-EIO on bus access error, or %-ENODEV if no device responds * or invalid ID. */ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id) { int phy_reg; /* Grab the bits from PHYIR1, and put them in the upper half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); if (phy_reg < 0) { /* returning -ENODEV doesn't stop bus scanning */ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; } *phy_id = phy_reg << 16; /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); if (phy_reg < 0) { /* returning -ENODEV doesn't stop bus scanning */ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; } *phy_id |= phy_reg; /* If the phy_id is mostly Fs, there is no device there */ if ((*phy_id & 0x1fffffff) == 0x1fffffff) return -ENODEV; return 0; } /* Extract the phy ID from the compatible string of the form * ethernet-phy-idAAAA.BBBB. */ int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id) { unsigned int upper, lower; const char *cp; int ret; ret = fwnode_property_read_string(fwnode, "compatible", &cp); if (ret) return ret; if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) != 2) return -EINVAL; *phy_id = ((upper & GENMASK(15, 0)) << 16) | (lower & GENMASK(15, 0)); return 0; } EXPORT_SYMBOL(fwnode_get_phy_id); /** * get_phy_device - reads the specified PHY device and returns its @phy_device * struct * @bus: the target MII bus * @addr: PHY address on the MII bus * @is_c45: If true the PHY uses the 802.3 clause 45 protocol * * Probe for a PHY at @addr on @bus. * * When probing for a clause 22 PHY, then read the ID registers. If we find * a valid ID, allocate and return a &struct phy_device. * * When probing for a clause 45 PHY, read the "devices in package" registers. * If the "devices in package" appears valid, read the ID registers for each * MMD, allocate and return a &struct phy_device. * * Returns an allocated &struct phy_device on success, %-ENODEV if there is * no PHY present, or %-EIO on bus access error. */ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { struct phy_c45_device_ids c45_ids; u32 phy_id = 0; int r; c45_ids.devices_in_package = 0; c45_ids.mmds_present = 0; memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); if (is_c45) r = get_phy_c45_ids(bus, addr, &c45_ids); else r = get_phy_c22_id(bus, addr, &phy_id); if (r) return ERR_PTR(r); /* PHY device such as the Marvell Alaska 88E2110 will return a PHY ID * of 0 when probed using get_phy_c22_id() with no error. Proceed to * probe with C45 to see if we're able to get a valid PHY ID in the C45 * space, if successful, create the C45 PHY device. */ if (!is_c45 && phy_id == 0 && bus->read_c45) { r = get_phy_c45_ids(bus, addr, &c45_ids); if (!r) return phy_device_create(bus, addr, phy_id, true, &c45_ids); } return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids); } EXPORT_SYMBOL(get_phy_device); /** * phy_device_register - Register the phy device on the MDIO bus * @phydev: phy_device structure to be added to the MDIO bus */ int phy_device_register(struct phy_device *phydev) { int err; err = mdiobus_register_device(&phydev->mdio); if (err) return err; /* Deassert the reset signal */ phy_device_reset(phydev, 0); /* Run all of the fixups for this PHY */ err = phy_scan_fixups(phydev); if (err) { phydev_err(phydev, "failed to initialize\n"); goto out; } err = device_add(&phydev->mdio.dev); if (err) { phydev_err(phydev, "failed to add\n"); goto out; } return 0; out: /* Assert the reset signal */ phy_device_reset(phydev, 1); mdiobus_unregister_device(&phydev->mdio); return err; } EXPORT_SYMBOL(phy_device_register); /** * phy_device_remove - Remove a previously registered phy device from the MDIO bus * @phydev: phy_device structure to remove * * This doesn't free the phy_device itself, it merely reverses the effects * of phy_device_register(). Use phy_device_free() to free the device * after calling this function. */ void phy_device_remove(struct phy_device *phydev) { unregister_mii_timestamper(phydev->mii_ts); pse_control_put(phydev->psec); device_del(&phydev->mdio.dev); /* Assert the reset signal */ phy_device_reset(phydev, 1); mdiobus_unregister_device(&phydev->mdio); } EXPORT_SYMBOL(phy_device_remove); /** * phy_get_c45_ids - Read 802.3-c45 IDs for phy device. * @phydev: phy_device structure to read 802.3-c45 IDs * * Returns zero on success, %-EIO on bus access error, or %-ENODEV if * the "devices in package" is invalid. */ int phy_get_c45_ids(struct phy_device *phydev) { return get_phy_c45_ids(phydev->mdio.bus, phydev->mdio.addr, &phydev->c45_ids); } EXPORT_SYMBOL(phy_get_c45_ids); /** * phy_find_first - finds the first PHY device on the bus * @bus: the target MII bus */ struct phy_device *phy_find_first(struct mii_bus *bus) { struct phy_device *phydev; int addr; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { phydev = mdiobus_get_phy(bus, addr); if (phydev) return phydev; } return NULL; } EXPORT_SYMBOL(phy_find_first); static void phy_link_change(struct phy_device *phydev, bool up) { struct net_device *netdev = phydev->attached_dev; if (up) netif_carrier_on(netdev); else netif_carrier_off(netdev); phydev->adjust_link(netdev); if (phydev->mii_ts && phydev->mii_ts->link_state) phydev->mii_ts->link_state(phydev->mii_ts, phydev); } /** * phy_prepare_link - prepares the PHY layer to monitor link status * @phydev: target phy_device struct * @handler: callback function for link status change notifications * * Description: Tells the PHY infrastructure to handle the * gory details on monitoring link status (whether through * polling or an interrupt), and to call back to the * connected device driver when the link status changes. * If you want to monitor your own link state, don't call * this function. */ static void phy_prepare_link(struct phy_device *phydev, void (*handler)(struct net_device *)) { phydev->adjust_link = handler; } /** * phy_connect_direct - connect an ethernet device to a specific phy_device * @dev: the network device to connect * @phydev: the pointer to the phy device * @handler: callback function for state change notifications * @interface: PHY device's interface */ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, void (*handler)(struct net_device *), phy_interface_t interface) { int rc; if (!dev) return -EINVAL; rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); if (rc) return rc; phy_prepare_link(phydev, handler); if (phy_interrupt_is_valid(phydev)) phy_request_interrupt(phydev); return 0; } EXPORT_SYMBOL(phy_connect_direct); /** * phy_connect - connect an ethernet device to a PHY device * @dev: the network device to connect * @bus_id: the id string of the PHY device to connect * @handler: callback function for state change notifications * @interface: PHY device's interface * * Description: Convenience function for connecting ethernet * devices to PHY devices. The default behavior is for * the PHY infrastructure to handle everything, and only notify * the connected driver when the link status changes. If you * don't want, or can't use the provided functionality, you may * choose to call only the subset of functions which provide * the desired functionality. */ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, void (*handler)(struct net_device *), phy_interface_t interface) { struct phy_device *phydev; struct device *d; int rc; /* Search the list of PHY devices on the mdio bus for the * PHY with the requested name */ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id); if (!d) { pr_err("PHY %s not found\n", bus_id); return ERR_PTR(-ENODEV); } phydev = to_phy_device(d); rc = phy_connect_direct(dev, phydev, handler, interface); put_device(d); if (rc) return ERR_PTR(rc); return phydev; } EXPORT_SYMBOL(phy_connect); /** * phy_disconnect - disable interrupts, stop state machine, and detach a PHY * device * @phydev: target phy_device struct */ void phy_disconnect(struct phy_device *phydev) { if (phy_is_started(phydev)) phy_stop(phydev); if (phy_interrupt_is_valid(phydev)) phy_free_interrupt(phydev); phydev->adjust_link = NULL; phy_detach(phydev); } EXPORT_SYMBOL(phy_disconnect); /** * phy_poll_reset - Safely wait until a PHY reset has properly completed * @phydev: The PHY device to poll * * Description: According to IEEE 802.3, Section 2, Subsection 22.2.4.1.1, as * published in 2008, a PHY reset may take up to 0.5 seconds. The MII BMCR * register must be polled until the BMCR_RESET bit clears. * * Furthermore, any attempts to write to PHY registers may have no effect * or even generate MDIO bus errors until this is complete. * * Some PHYs (such as the Marvell 88E1111) don't entirely conform to the * standard and do not fully reset after the BMCR_RESET bit is set, and may * even *REQUIRE* a soft-reset to properly restart autonegotiation. In an * effort to support such broken PHYs, this function is separate from the * standard phy_init_hw() which will zero all the other bits in the BMCR * and reapply all driver-specific and board-specific fixups. */ static int phy_poll_reset(struct phy_device *phydev) { /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */ int ret, val; ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET), 50000, 600000, true); if (ret) return ret; /* Some chips (smsc911x) may still need up to another 1ms after the * BMCR_RESET bit is cleared before they are usable. */ msleep(1); return 0; } int phy_init_hw(struct phy_device *phydev) { int ret = 0; /* Deassert the reset signal */ phy_device_reset(phydev, 0); if (!phydev->drv) return 0; if (phydev->drv->soft_reset) { ret = phydev->drv->soft_reset(phydev); if (ret < 0) return ret; /* see comment in genphy_soft_reset for an explanation */ phydev->suspended = 0; } ret = phy_scan_fixups(phydev); if (ret < 0) return ret; phy_interface_zero(phydev->possible_interfaces); if (phydev->drv->config_init) { ret = phydev->drv->config_init(phydev); if (ret < 0) return ret; } if (phydev->drv->config_intr) { ret = phydev->drv->config_intr(phydev); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL(phy_init_hw); void phy_attached_info(struct phy_device *phydev) { phy_attached_print(phydev, NULL); } EXPORT_SYMBOL(phy_attached_info); #define ATTACHED_FMT "attached PHY driver %s(mii_bus:phy_addr=%s, irq=%s)" char *phy_attached_info_irq(struct phy_device *phydev) { char *irq_str; char irq_num[8]; switch(phydev->irq) { case PHY_POLL: irq_str = "POLL"; break; case PHY_MAC_INTERRUPT: irq_str = "MAC"; break; default: snprintf(irq_num, sizeof(irq_num), "%d", phydev->irq); irq_str = irq_num; break; } return kasprintf(GFP_KERNEL, "%s", irq_str); } EXPORT_SYMBOL(phy_attached_info_irq); void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { const char *unbound = phydev->drv ? "" : "[unbound] "; char *irq_str = phy_attached_info_irq(phydev); if (!fmt) { phydev_info(phydev, ATTACHED_FMT "\n", unbound, phydev_name(phydev), irq_str); } else { va_list ap; phydev_info(phydev, ATTACHED_FMT, unbound, phydev_name(phydev), irq_str); va_start(ap, fmt); vprintk(fmt, ap); va_end(ap); } kfree(irq_str); } EXPORT_SYMBOL(phy_attached_print); static void phy_sysfs_create_links(struct phy_device *phydev) { struct net_device *dev = phydev->attached_dev; int err; if (!dev) return; err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, "attached_dev"); if (err) return; err = sysfs_create_link_nowarn(&dev->dev.kobj, &phydev->mdio.dev.kobj, "phydev"); if (err) { dev_err(&dev->dev, "could not add device link to %s err %d\n", kobject_name(&phydev->mdio.dev.kobj), err); /* non-fatal - some net drivers can use one netdevice * with more then one phy */ } phydev->sysfs_links = true; } static ssize_t phy_standalone_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); return sysfs_emit(buf, "%d\n", !phydev->attached_dev); } static DEVICE_ATTR_RO(phy_standalone); /** * phy_sfp_connect_phy - Connect the SFP module's PHY to the upstream PHY * @upstream: pointer to the upstream phy device * @phy: pointer to the SFP module's phy device * * This helper allows keeping track of PHY devices on the link. It adds the * SFP module's phy to the phy namespace of the upstream phy * * Return: 0 on success, otherwise a negative error code. */ int phy_sfp_connect_phy(void *upstream, struct phy_device *phy) { struct phy_device *phydev = upstream; struct net_device *dev = phydev->attached_dev; if (dev) return phy_link_topo_add_phy(dev, phy, PHY_UPSTREAM_PHY, phydev); return 0; } EXPORT_SYMBOL(phy_sfp_connect_phy); /** * phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY * @upstream: pointer to the upstream phy device * @phy: pointer to the SFP module's phy device * * This helper allows keeping track of PHY devices on the link. It removes the * SFP module's phy to the phy namespace of the upstream phy. As the module phy * will be destroyed, re-inserting the same module will add a new phy with a * new index. */ void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy) { struct phy_device *phydev = upstream; struct net_device *dev = phydev->attached_dev; if (dev) phy_link_topo_del_phy(dev, phy); } EXPORT_SYMBOL(phy_sfp_disconnect_phy); /** * phy_sfp_attach - attach the SFP bus to the PHY upstream network device * @upstream: pointer to the phy device * @bus: sfp bus representing cage being attached * * This is used to fill in the sfp_upstream_ops .attach member. */ void phy_sfp_attach(void *upstream, struct sfp_bus *bus) { struct phy_device *phydev = upstream; if (phydev->attached_dev) phydev->attached_dev->sfp_bus = bus; phydev->sfp_bus_attached = true; } EXPORT_SYMBOL(phy_sfp_attach); /** * phy_sfp_detach - detach the SFP bus from the PHY upstream network device * @upstream: pointer to the phy device * @bus: sfp bus representing cage being attached * * This is used to fill in the sfp_upstream_ops .detach member. */ void phy_sfp_detach(void *upstream, struct sfp_bus *bus) { struct phy_device *phydev = upstream; if (phydev->attached_dev) phydev->attached_dev->sfp_bus = NULL; phydev->sfp_bus_attached = false; } EXPORT_SYMBOL(phy_sfp_detach); /** * phy_sfp_probe - probe for a SFP cage attached to this PHY device * @phydev: Pointer to phy_device * @ops: SFP's upstream operations */ int phy_sfp_probe(struct phy_device *phydev, const struct sfp_upstream_ops *ops) { struct sfp_bus *bus; int ret = 0; if (phydev->mdio.dev.fwnode) { bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode); if (IS_ERR(bus)) return PTR_ERR(bus); phydev->sfp_bus = bus; ret = sfp_bus_add_upstream(bus, phydev, ops); sfp_bus_put(bus); } return ret; } EXPORT_SYMBOL(phy_sfp_probe); static bool phy_drv_supports_irq(const struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->handle_interrupt; } /** * phy_attach_direct - attach a network device to a given PHY device pointer * @dev: network device to attach * @phydev: Pointer to phy_device to attach * @flags: PHY device's dev_flags * @interface: PHY device's interface * * Description: Called by drivers to attach to a particular PHY * device. The phy_device is found, and properly hooked up * to the phy_driver. If no driver is attached, then a * generic driver is used. The phy_device is given a ptr to * the attaching device, and given a callback for link status * change. The phy_device is returned to the attaching driver. * This function takes a reference on the phy device. */ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, u32 flags, phy_interface_t interface) { struct mii_bus *bus = phydev->mdio.bus; struct device *d = &phydev->mdio.dev; struct module *ndev_owner = NULL; bool using_genphy = false; int err; /* For Ethernet device drivers that register their own MDIO bus, we * will have bus->owner match ndev_mod, so we do not want to increment * our own module->refcnt here, otherwise we would not be able to * unload later on. */ if (dev) ndev_owner = dev->dev.parent->driver->owner; if (ndev_owner != bus->owner && !try_module_get(bus->owner)) { phydev_err(phydev, "failed to get the bus module\n"); return -EIO; } get_device(d); /* Assume that if there is no driver, that it doesn't * exist, and we should use the genphy driver. */ if (!d->driver) { if (phydev->is_c45) d->driver = &genphy_c45_driver.mdiodrv.driver; else d->driver = &genphy_driver.mdiodrv.driver; using_genphy = true; } if (!try_module_get(d->driver->owner)) { phydev_err(phydev, "failed to get the device driver module\n"); err = -EIO; goto error_put_device; } if (using_genphy) { err = d->driver->probe(d); if (err >= 0) err = device_bind_driver(d); if (err) goto error_module_put; } if (phydev->attached_dev) { dev_err(&dev->dev, "PHY already attached\n"); err = -EBUSY; goto error; } phydev->phy_link_change = phy_link_change; if (dev) { phydev->attached_dev = dev; dev->phydev = phydev; if (phydev->sfp_bus_attached) dev->sfp_bus = phydev->sfp_bus; err = phy_link_topo_add_phy(dev, phydev, PHY_UPSTREAM_MAC, dev); if (err) goto error; } /* Some Ethernet drivers try to connect to a PHY device before * calling register_netdevice() -> netdev_register_kobject() and * does the dev->dev.kobj initialization. Here we only check for * success which indicates that the network device kobject is * ready. Once we do that we still need to keep track of whether * links were successfully set up or not for phy_detach() to * remove them accordingly. */ phydev->sysfs_links = false; phy_sysfs_create_links(phydev); if (!phydev->attached_dev) { err = sysfs_create_file(&phydev->mdio.dev.kobj, &dev_attr_phy_standalone.attr); if (err) phydev_err(phydev, "error creating 'phy_standalone' sysfs entry\n"); } phydev->dev_flags |= flags; phydev->interface = interface; phydev->state = PHY_READY; phydev->interrupts = PHY_INTERRUPT_DISABLED; /* PHYs can request to use poll mode even though they have an * associated interrupt line. This could be the case if they * detect a broken interrupt handling. */ if (phydev->dev_flags & PHY_F_NO_IRQ) phydev->irq = PHY_POLL; if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev)) phydev->irq = PHY_POLL; /* Port is set to PORT_TP by default and the actual PHY driver will set * it to different value depending on the PHY configuration. If we have * the generic PHY driver we can't figure it out, thus set the old * legacy PORT_MII value. */ if (using_genphy) phydev->port = PORT_MII; /* Initial carrier state is off as the phy is about to be * (re)initialized. */ if (dev) netif_carrier_off(phydev->attached_dev); /* Do initial configuration here, now that * we have certain key parameters * (dev_flags and interface) */ err = phy_init_hw(phydev); if (err) goto error; phy_resume(phydev); if (!phydev->is_on_sfp_module) phy_led_triggers_register(phydev); /** * If the external phy used by current mac interface is managed by * another mac interface, so we should create a device link between * phy dev and mac dev. */ if (dev && phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent) phydev->devlink = device_link_add(dev->dev.parent, &phydev->mdio.dev, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); return err; error: /* phy_detach() does all of the cleanup below */ phy_detach(phydev); return err; error_module_put: module_put(d->driver->owner); d->driver = NULL; error_put_device: put_device(d); if (ndev_owner != bus->owner) module_put(bus->owner); return err; } EXPORT_SYMBOL(phy_attach_direct); /** * phy_attach - attach a network device to a particular PHY device * @dev: network device to attach * @bus_id: Bus ID of PHY device to attach * @interface: PHY device's interface * * Description: Same as phy_attach_direct() except that a PHY bus_id * string is passed instead of a pointer to a struct phy_device. */ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phy_interface_t interface) { struct phy_device *phydev; struct device *d; int rc; if (!dev) return ERR_PTR(-EINVAL); /* Search the list of PHY devices on the mdio bus for the * PHY with the requested name */ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id); if (!d) { pr_err("PHY %s not found\n", bus_id); return ERR_PTR(-ENODEV); } phydev = to_phy_device(d); rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); put_device(d); if (rc) return ERR_PTR(rc); return phydev; } EXPORT_SYMBOL(phy_attach); static bool phy_driver_is_genphy_kind(struct phy_device *phydev, struct device_driver *driver) { struct device *d = &phydev->mdio.dev; bool ret = false; if (!phydev->drv) return ret; get_device(d); ret = d->driver == driver; put_device(d); return ret; } bool phy_driver_is_genphy(struct phy_device *phydev) { return phy_driver_is_genphy_kind(phydev, &genphy_driver.mdiodrv.driver); } EXPORT_SYMBOL_GPL(phy_driver_is_genphy); bool phy_driver_is_genphy_10g(struct phy_device *phydev) { return phy_driver_is_genphy_kind(phydev, &genphy_c45_driver.mdiodrv.driver); } EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g); /** * phy_package_join - join a common PHY group * @phydev: target phy_device struct * @base_addr: cookie and base PHY address of PHY package for offset * calculation of global register access * @priv_size: if non-zero allocate this amount of bytes for private data * * This joins a PHY group and provides a shared storage for all phydevs in * this group. This is intended to be used for packages which contain * more than one PHY, for example a quad PHY transceiver. * * The base_addr parameter serves as cookie which has to have the same values * for all members of one group and as the base PHY address of the PHY package * for offset calculation to access generic registers of a PHY package. * Usually, one of the PHY addresses of the different PHYs in the package * provides access to these global registers. * The address which is given here, will be used in the phy_package_read() * and phy_package_write() convenience functions as base and added to the * passed offset in those functions. * * This will set the shared pointer of the phydev to the shared storage. * If this is the first call for a this cookie the shared storage will be * allocated. If priv_size is non-zero, the given amount of bytes are * allocated for the priv member. * * Returns < 1 on error, 0 on success. Esp. calling phy_package_join() * with the same cookie but a different priv_size is an error. */ int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size) { struct mii_bus *bus = phydev->mdio.bus; struct phy_package_shared *shared; int ret; if (base_addr < 0 || base_addr >= PHY_MAX_ADDR) return -EINVAL; mutex_lock(&bus->shared_lock); shared = bus->shared[base_addr]; if (!shared) { ret = -ENOMEM; shared = kzalloc(sizeof(*shared), GFP_KERNEL); if (!shared) goto err_unlock; if (priv_size) { shared->priv = kzalloc(priv_size, GFP_KERNEL); if (!shared->priv) goto err_free; shared->priv_size = priv_size; } shared->base_addr = base_addr; shared->np = NULL; refcount_set(&shared->refcnt, 1); bus->shared[base_addr] = shared; } else { ret = -EINVAL; if (priv_size && priv_size != shared->priv_size) goto err_unlock; refcount_inc(&shared->refcnt); } mutex_unlock(&bus->shared_lock); phydev->shared = shared; return 0; err_free: kfree(shared); err_unlock: mutex_unlock(&bus->shared_lock); return ret; } EXPORT_SYMBOL_GPL(phy_package_join); /** * of_phy_package_join - join a common PHY group in PHY package * @phydev: target phy_device struct * @priv_size: if non-zero allocate this amount of bytes for private data * * This is a variant of phy_package_join for PHY package defined in DT. * * The parent node of the @phydev is checked as a valid PHY package node * structure (by matching the node name "ethernet-phy-package") and the * base_addr for the PHY package is passed to phy_package_join. * * With this configuration the shared struct will also have the np value * filled to use additional DT defined properties in PHY specific * probe_once and config_init_once PHY package OPs. * * Returns < 0 on error, 0 on success. Esp. calling phy_package_join() * with the same cookie but a different priv_size is an error. Or a parent * node is not detected or is not valid or doesn't match the expected node * name for PHY package. */ int of_phy_package_join(struct phy_device *phydev, size_t priv_size) { struct device_node *node = phydev->mdio.dev.of_node; struct device_node *package_node; u32 base_addr; int ret; if (!node) return -EINVAL; package_node = of_get_parent(node); if (!package_node) return -EINVAL; if (!of_node_name_eq(package_node, "ethernet-phy-package")) { ret = -EINVAL; goto exit; } if (of_property_read_u32(package_node, "reg", &base_addr)) { ret = -EINVAL; goto exit; } ret = phy_package_join(phydev, base_addr, priv_size); if (ret) goto exit; phydev->shared->np = package_node; return 0; exit: of_node_put(package_node); return ret; } EXPORT_SYMBOL_GPL(of_phy_package_join); /** * phy_package_leave - leave a common PHY group * @phydev: target phy_device struct * * This leaves a PHY group created by phy_package_join(). If this phydev * was the last user of the shared data between the group, this data is * freed. Resets the phydev->shared pointer to NULL. */ void phy_package_leave(struct phy_device *phydev) { struct phy_package_shared *shared = phydev->shared; struct mii_bus *bus = phydev->mdio.bus; if (!shared) return; /* Decrease the node refcount on leave if present */ if (shared->np) of_node_put(shared->np); if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) { bus->shared[shared->base_addr] = NULL; mutex_unlock(&bus->shared_lock); kfree(shared->priv); kfree(shared); } phydev->shared = NULL; } EXPORT_SYMBOL_GPL(phy_package_leave); static void devm_phy_package_leave(struct device *dev, void *res) { phy_package_leave(*(struct phy_device **)res); } /** * devm_phy_package_join - resource managed phy_package_join() * @dev: device that is registering this PHY package * @phydev: target phy_device struct * @base_addr: cookie and base PHY address of PHY package for offset * calculation of global register access * @priv_size: if non-zero allocate this amount of bytes for private data * * Managed phy_package_join(). Shared storage fetched by this function, * phy_package_leave() is automatically called on driver detach. See * phy_package_join() for more information. */ int devm_phy_package_join(struct device *dev, struct phy_device *phydev, int base_addr, size_t priv_size) { struct phy_device **ptr; int ret; ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = phy_package_join(phydev, base_addr, priv_size); if (!ret) { *ptr = phydev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_phy_package_join); /** * devm_of_phy_package_join - resource managed of_phy_package_join() * @dev: device that is registering this PHY package * @phydev: target phy_device struct * @priv_size: if non-zero allocate this amount of bytes for private data * * Managed of_phy_package_join(). Shared storage fetched by this function, * phy_package_leave() is automatically called on driver detach. See * of_phy_package_join() for more information. */ int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev, size_t priv_size) { struct phy_device **ptr; int ret; ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = of_phy_package_join(phydev, priv_size); if (!ret) { *ptr = phydev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_of_phy_package_join); /** * phy_detach - detach a PHY device from its network device * @phydev: target phy_device struct * * This detaches the phy device from its network device and the phy * driver, and drops the reference count taken in phy_attach_direct(). */ void phy_detach(struct phy_device *phydev) { struct net_device *dev = phydev->attached_dev; struct module *ndev_owner = NULL; struct mii_bus *bus; if (phydev->devlink) device_link_del(phydev->devlink); if (phydev->sysfs_links) { if (dev) sysfs_remove_link(&dev->dev.kobj, "phydev"); sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); } if (!phydev->attached_dev) sysfs_remove_file(&phydev->mdio.dev.kobj, &dev_attr_phy_standalone.attr); phy_suspend(phydev); if (dev) { struct hwtstamp_provider *hwprov; hwprov = rtnl_dereference(dev->hwprov); /* Disable timestamp if it is the one selected */ if (hwprov && hwprov->phydev == phydev) { rcu_assign_pointer(dev->hwprov, NULL); kfree_rcu(hwprov, rcu_head); } phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_link_topo_del_phy(dev, phydev); } phydev->phylink = NULL; if (!phydev->is_on_sfp_module) phy_led_triggers_unregister(phydev); if (phydev->mdio.dev.driver) module_put(phydev->mdio.dev.driver->owner); /* If the device had no specific driver before (i.e. - it * was using the generic driver), we unbind the device * from the generic driver so that there's a chance a * real driver could be loaded */ if (phy_driver_is_genphy(phydev) || phy_driver_is_genphy_10g(phydev)) device_release_driver(&phydev->mdio.dev); /* Assert the reset signal */ phy_device_reset(phydev, 1); /* * The phydev might go away on the put_device() below, so avoid * a use-after-free bug by reading the underlying bus first. */ bus = phydev->mdio.bus; put_device(&phydev->mdio.dev); if (dev) ndev_owner = dev->dev.parent->driver->owner; if (ndev_owner != bus->owner) module_put(bus->owner); } EXPORT_SYMBOL(phy_detach); int phy_suspend(struct phy_device *phydev) { struct net_device *netdev = phydev->attached_dev; const struct phy_driver *phydrv = phydev->drv; int ret; if (phydev->suspended || !phydrv) return 0; phydev->wol_enabled = phy_drv_wol_enabled(phydev) || (netdev && netdev->ethtool->wol_enabled); /* If the device has WOL enabled, we cannot suspend the PHY */ if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND)) return -EBUSY; if (!phydrv->suspend) return 0; ret = phydrv->suspend(phydev); if (!ret) phydev->suspended = true; return ret; } EXPORT_SYMBOL(phy_suspend); int __phy_resume(struct phy_device *phydev) { const struct phy_driver *phydrv = phydev->drv; int ret; lockdep_assert_held(&phydev->lock); if (!phydrv || !phydrv->resume) return 0; ret = phydrv->resume(phydev); if (!ret) phydev->suspended = false; return ret; } EXPORT_SYMBOL(__phy_resume); int phy_resume(struct phy_device *phydev) { int ret; mutex_lock(&phydev->lock); ret = __phy_resume(phydev); mutex_unlock(&phydev->lock); return ret; } EXPORT_SYMBOL(phy_resume); int phy_loopback(struct phy_device *phydev, bool enable) { int ret = 0; if (!phydev->drv) return -EIO; mutex_lock(&phydev->lock); if (enable && phydev->loopback_enabled) { ret = -EBUSY; goto out; } if (!enable && !phydev->loopback_enabled) { ret = -EINVAL; goto out; } if (phydev->drv->set_loopback) ret = phydev->drv->set_loopback(phydev, enable); else ret = genphy_loopback(phydev, enable); if (ret) goto out; phydev->loopback_enabled = enable; out: mutex_unlock(&phydev->lock); return ret; } EXPORT_SYMBOL(phy_loopback); /** * phy_reset_after_clk_enable - perform a PHY reset if needed * @phydev: target phy_device struct * * Description: Some PHYs are known to need a reset after their refclk was * enabled. This function evaluates the flags and perform the reset if it's * needed. Returns < 0 on error, 0 if the phy wasn't reset and 1 if the phy * was reset. */ int phy_reset_after_clk_enable(struct phy_device *phydev) { if (!phydev || !phydev->drv) return -ENODEV; if (phydev->drv->flags & PHY_RST_AFTER_CLK_EN) { phy_device_reset(phydev, 1); phy_device_reset(phydev, 0); return 1; } return 0; } EXPORT_SYMBOL(phy_reset_after_clk_enable); /* Generic PHY support and helper functions */ /** * genphy_config_advert - sanitize and advertise auto-negotiation parameters * @phydev: target phy_device struct * @advert: auto-negotiation parameters to advertise * * Description: Writes MII_ADVERTISE with the appropriate values, * after sanitizing the values to make sure we only advertise * what is supported. Returns < 0 on error, 0 if the PHY's advertisement * hasn't changed, and > 0 if it has changed. */ static int genphy_config_advert(struct phy_device *phydev, const unsigned long *advert) { int err, bmsr, changed = 0; u32 adv; adv = linkmode_adv_to_mii_adv_t(advert); /* Setup standard advertisement */ err = phy_modify_changed(phydev, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, adv); if (err < 0) return err; if (err > 0) changed = 1; bmsr = phy_read(phydev, MII_BMSR); if (bmsr < 0) return bmsr; /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a * logical 1. */ if (!(bmsr & BMSR_ESTATEN)) return changed; adv = linkmode_adv_to_mii_ctrl1000_t(advert); err = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL | ADVERTISE_1000HALF, adv); if (err < 0) return err; if (err > 0) changed = 1; return changed; } /** * genphy_c37_config_advert - sanitize and advertise auto-negotiation parameters * @phydev: target phy_device struct * * Description: Writes MII_ADVERTISE with the appropriate values, * after sanitizing the values to make sure we only advertise * what is supported. Returns < 0 on error, 0 if the PHY's advertisement * hasn't changed, and > 0 if it has changed. This function is intended * for Clause 37 1000Base-X mode. */ static int genphy_c37_config_advert(struct phy_device *phydev) { u16 adv = 0; /* Only allow advertising what this PHY supports */ linkmode_and(phydev->advertising, phydev->advertising, phydev->supported); if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, phydev->advertising)) adv |= ADVERTISE_1000XFULL; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising)) adv |= ADVERTISE_1000XPAUSE; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising)) adv |= ADVERTISE_1000XPSE_ASYM; return phy_modify_changed(phydev, MII_ADVERTISE, ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | ADVERTISE_1000XHALF | ADVERTISE_1000XPSE_ASYM, adv); } /** * genphy_setup_forced - configures/forces speed/duplex from @phydev * @phydev: target phy_device struct * * Description: Configures MII_BMCR to force speed/duplex * to the values in phydev. Assumes that the values are valid. * Please see phy_sanitize_settings(). */ int genphy_setup_forced(struct phy_device *phydev) { u16 ctl; phydev->pause = 0; phydev->asym_pause = 0; ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); return phy_modify(phydev, MII_BMCR, ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl); } EXPORT_SYMBOL(genphy_setup_forced); static int genphy_setup_master_slave(struct phy_device *phydev) { u16 ctl = 0; if (!phydev->is_gigabit_capable) return 0; switch (phydev->master_slave_set) { case MASTER_SLAVE_CFG_MASTER_PREFERRED: ctl |= CTL1000_PREFER_MASTER; break; case MASTER_SLAVE_CFG_SLAVE_PREFERRED: break; case MASTER_SLAVE_CFG_MASTER_FORCE: ctl |= CTL1000_AS_MASTER; fallthrough; case MASTER_SLAVE_CFG_SLAVE_FORCE: ctl |= CTL1000_ENABLE_MASTER; break; case MASTER_SLAVE_CFG_UNKNOWN: case MASTER_SLAVE_CFG_UNSUPPORTED: return 0; default: phydev_warn(phydev, "Unsupported Master/Slave mode\n"); return -EOPNOTSUPP; } return phy_modify_changed(phydev, MII_CTRL1000, (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER | CTL1000_PREFER_MASTER), ctl); } int genphy_read_master_slave(struct phy_device *phydev) { int cfg, state; int val; phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; val = phy_read(phydev, MII_CTRL1000); if (val < 0) return val; if (val & CTL1000_ENABLE_MASTER) { if (val & CTL1000_AS_MASTER) cfg = MASTER_SLAVE_CFG_MASTER_FORCE; else cfg = MASTER_SLAVE_CFG_SLAVE_FORCE; } else { if (val & CTL1000_PREFER_MASTER) cfg = MASTER_SLAVE_CFG_MASTER_PREFERRED; else cfg = MASTER_SLAVE_CFG_SLAVE_PREFERRED; } val = phy_read(phydev, MII_STAT1000); if (val < 0) return val; if (val & LPA_1000MSFAIL) { state = MASTER_SLAVE_STATE_ERR; } else if (phydev->link) { /* this bits are valid only for active link */ if (val & LPA_1000MSRES) state = MASTER_SLAVE_STATE_MASTER; else state = MASTER_SLAVE_STATE_SLAVE; } else { state = MASTER_SLAVE_STATE_UNKNOWN; } phydev->master_slave_get = cfg; phydev->master_slave_state = state; return 0; } EXPORT_SYMBOL(genphy_read_master_slave); /** * genphy_restart_aneg - Enable and Restart Autonegotiation * @phydev: target phy_device struct */ int genphy_restart_aneg(struct phy_device *phydev) { /* Don't isolate the PHY if we're negotiating */ return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, BMCR_ANENABLE | BMCR_ANRESTART); } EXPORT_SYMBOL(genphy_restart_aneg); /** * genphy_check_and_restart_aneg - Enable and restart auto-negotiation * @phydev: target phy_device struct * @restart: whether aneg restart is requested * * Check, and restart auto-negotiation if needed. */ int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart) { int ret; if (!restart) { /* Advertisement hasn't changed, but maybe aneg was never on to * begin with? Or maybe phy was isolated? */ ret = phy_read(phydev, MII_BMCR); if (ret < 0) return ret; if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE)) restart = true; } if (restart) return genphy_restart_aneg(phydev); return 0; } EXPORT_SYMBOL(genphy_check_and_restart_aneg); /** * __genphy_config_aneg - restart auto-negotiation or write BMCR * @phydev: target phy_device struct * @changed: whether autoneg is requested * * Description: If auto-negotiation is enabled, we configure the * advertising, and then restart auto-negotiation. If it is not * enabled, then we write the BMCR. */ int __genphy_config_aneg(struct phy_device *phydev, bool changed) { __ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert); const struct phy_setting *set; unsigned long *advert; int err; err = genphy_c45_an_config_eee_aneg(phydev); if (err < 0) return err; else if (err) changed = true; err = genphy_setup_master_slave(phydev); if (err < 0) return err; else if (err) changed = true; if (phydev->autoneg == AUTONEG_ENABLE) { /* Only allow advertising what this PHY supports */ linkmode_and(phydev->advertising, phydev->advertising, phydev->supported); advert = phydev->advertising; } else if (phydev->speed < SPEED_1000) { return genphy_setup_forced(phydev); } else { linkmode_zero(fixed_advert); set = phy_lookup_setting(phydev->speed, phydev->duplex, phydev->supported, true); if (set) linkmode_set_bit(set->bit, fixed_advert); advert = fixed_advert; } err = genphy_config_advert(phydev, advert); if (err < 0) /* error */ return err; else if (err) changed = true; return genphy_check_and_restart_aneg(phydev, changed); } EXPORT_SYMBOL(__genphy_config_aneg); /** * genphy_c37_config_aneg - restart auto-negotiation or write BMCR * @phydev: target phy_device struct * * Description: If auto-negotiation is enabled, we configure the * advertising, and then restart auto-negotiation. If it is not * enabled, then we write the BMCR. This function is intended * for use with Clause 37 1000Base-X mode. */ int genphy_c37_config_aneg(struct phy_device *phydev) { int err, changed; if (phydev->autoneg != AUTONEG_ENABLE) return genphy_setup_forced(phydev); err = phy_modify(phydev, MII_BMCR, BMCR_SPEED1000 | BMCR_SPEED100, BMCR_SPEED1000); if (err) return err; changed = genphy_c37_config_advert(phydev); if (changed < 0) /* error */ return changed; if (!changed) { /* Advertisement hasn't changed, but maybe aneg was never on to * begin with? Or maybe phy was isolated? */ int ctl = phy_read(phydev, MII_BMCR); if (ctl < 0) return ctl; if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE)) changed = 1; /* do restart aneg */ } /* Only restart aneg if we are advertising something different * than we were before. */ if (changed > 0) return genphy_restart_aneg(phydev); return 0; } EXPORT_SYMBOL(genphy_c37_config_aneg); /** * genphy_aneg_done - return auto-negotiation status * @phydev: target phy_device struct * * Description: Reads the status register and returns 0 either if * auto-negotiation is incomplete, or if there was an error. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. */ int genphy_aneg_done(struct phy_device *phydev) { int retval = phy_read(phydev, MII_BMSR); return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); } EXPORT_SYMBOL(genphy_aneg_done); /** * genphy_update_link - update link status in @phydev * @phydev: target phy_device struct * * Description: Update the value in phydev->link to reflect the * current link value. In order to do this, we need to read * the status register twice, keeping the second value. */ int genphy_update_link(struct phy_device *phydev) { int status = 0, bmcr; bmcr = phy_read(phydev, MII_BMCR); if (bmcr < 0) return bmcr; /* Autoneg is being started, therefore disregard BMSR value and * report link as down. */ if (bmcr & BMCR_ANRESTART) goto done; /* The link state is latched low so that momentary link * drops can be detected. Do not double-read the status * in polling mode to detect such short link drops except * the link was already down. */ if (!phy_polling_mode(phydev) || !phydev->link) { status = phy_read(phydev, MII_BMSR); if (status < 0) return status; else if (status & BMSR_LSTATUS) goto done; } /* Read link and autonegotiation status */ status = phy_read(phydev, MII_BMSR); if (status < 0) return status; done: phydev->link = status & BMSR_LSTATUS ? 1 : 0; phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0; /* Consider the case that autoneg was started and "aneg complete" * bit has been reset, but "link up" bit not yet. */ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete) phydev->link = 0; return 0; } EXPORT_SYMBOL(genphy_update_link); int genphy_read_lpa(struct phy_device *phydev) { int lpa, lpagb; if (phydev->autoneg == AUTONEG_ENABLE) { if (!phydev->autoneg_complete) { mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, 0); mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0); return 0; } if (phydev->is_gigabit_capable) { lpagb = phy_read(phydev, MII_STAT1000); if (lpagb < 0) return lpagb; if (lpagb & LPA_1000MSFAIL) { int adv = phy_read(phydev, MII_CTRL1000); if (adv < 0) return adv; if (adv & CTL1000_ENABLE_MASTER) phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); else phydev_err(phydev, "Master/Slave resolution failed\n"); return -ENOLINK; } mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb); } lpa = phy_read(phydev, MII_LPA); if (lpa < 0) return lpa; mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); } else { linkmode_zero(phydev->lp_advertising); } return 0; } EXPORT_SYMBOL(genphy_read_lpa); /** * genphy_read_status_fixed - read the link parameters for !aneg mode * @phydev: target phy_device struct * * Read the current duplex and speed state for a PHY operating with * autonegotiation disabled. */ int genphy_read_status_fixed(struct phy_device *phydev) { int bmcr = phy_read(phydev, MII_BMCR); if (bmcr < 0) return bmcr; if (bmcr & BMCR_FULLDPLX) phydev->duplex = DUPLEX_FULL; else phydev->duplex = DUPLEX_HALF; if (bmcr & BMCR_SPEED1000) phydev->speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) phydev->speed = SPEED_100; else phydev->speed = SPEED_10; return 0; } EXPORT_SYMBOL(genphy_read_status_fixed); /** * genphy_read_status - check the link status and update current link state * @phydev: target phy_device struct * * Description: Check the link, then figure out the current state * by comparing what we advertise with what the link partner * advertises. Start by checking the gigabit possibilities, * then move on to 10/100. */ int genphy_read_status(struct phy_device *phydev) { int err, old_link = phydev->link; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); if (err) return err; /* why bother the PHY if nothing can have changed */ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) return 0; phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED; phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; phydev->pause = 0; phydev->asym_pause = 0; if (phydev->is_gigabit_capable) { err = genphy_read_master_slave(phydev); if (err < 0) return err; } err = genphy_read_lpa(phydev); if (err < 0) return err; if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { phy_resolve_aneg_linkmode(phydev); } else if (phydev->autoneg == AUTONEG_DISABLE) { err = genphy_read_status_fixed(phydev); if (err < 0) return err; } return 0; } EXPORT_SYMBOL(genphy_read_status); /** * genphy_c37_read_status - check the link status and update current link state * @phydev: target phy_device struct * @changed: pointer where to store if link changed * * Description: Check the link, then figure out the current state * by comparing what we advertise with what the link partner * advertises. This function is for Clause 37 1000Base-X mode. * * If link has changed, @changed is set to true, false otherwise. */ int genphy_c37_read_status(struct phy_device *phydev, bool *changed) { int lpa, err, old_link = phydev->link; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); if (err) return err; /* why bother the PHY if nothing can have changed */ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) { *changed = false; return 0; } /* Signal link has changed */ *changed = true; phydev->duplex = DUPLEX_UNKNOWN; phydev->pause = 0; phydev->asym_pause = 0; if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { lpa = phy_read(phydev, MII_LPA); if (lpa < 0) return lpa; linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, lpa & LPA_LPACK); linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, phydev->lp_advertising, lpa & LPA_1000XFULL); linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->lp_advertising, lpa & LPA_1000XPAUSE); linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->lp_advertising, lpa & LPA_1000XPAUSE_ASYM); phy_resolve_aneg_linkmode(phydev); } else if (phydev->autoneg == AUTONEG_DISABLE) { int bmcr = phy_read(phydev, MII_BMCR); if (bmcr < 0) return bmcr; if (bmcr & BMCR_FULLDPLX) phydev->duplex = DUPLEX_FULL; else phydev->duplex = DUPLEX_HALF; } return 0; } EXPORT_SYMBOL(genphy_c37_read_status); /** * genphy_soft_reset - software reset the PHY via BMCR_RESET bit * @phydev: target phy_device struct * * Description: Perform a software PHY reset using the standard * BMCR_RESET bit and poll for the reset bit to be cleared. * * Returns: 0 on success, < 0 on failure */ int genphy_soft_reset(struct phy_device *phydev) { u16 res = BMCR_RESET; int ret; if (phydev->autoneg == AUTONEG_ENABLE) res |= BMCR_ANRESTART; ret = phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, res); if (ret < 0) return ret; /* Clause 22 states that setting bit BMCR_RESET sets control registers * to their default value. Therefore the POWER DOWN bit is supposed to * be cleared after soft reset. */ phydev->suspended = 0; ret = phy_poll_reset(phydev); if (ret) return ret; /* BMCR may be reset to defaults */ if (phydev->autoneg == AUTONEG_DISABLE) ret = genphy_setup_forced(phydev); return ret; } EXPORT_SYMBOL(genphy_soft_reset); irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev) { /* It seems there are cases where the interrupts are handled by another * entity (ie an IRQ controller embedded inside the PHY) and do not * need any other interraction from phylib. In this case, just trigger * the state machine directly. */ phy_trigger_machine(phydev); return 0; } EXPORT_SYMBOL(genphy_handle_interrupt_no_ack); /** * genphy_read_abilities - read PHY abilities from Clause 22 registers * @phydev: target phy_device struct * * Description: Reads the PHY's abilities and populates * phydev->supported accordingly. * * Returns: 0 on success, < 0 on failure */ int genphy_read_abilities(struct phy_device *phydev) { int val; linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), phydev->supported); val = phy_read(phydev, MII_BMSR); if (val < 0) return val; linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported, val & BMSR_ANEGCAPABLE); linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported, val & BMSR_100FULL); linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported, val & BMSR_100HALF); linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported, val & BMSR_10FULL); linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported, val & BMSR_10HALF); if (val & BMSR_ESTATEN) { val = phy_read(phydev, MII_ESTATUS); if (val < 0) return val; linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported, val & ESTATUS_1000_TFULL); linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, phydev->supported, val & ESTATUS_1000_THALF); linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, phydev->supported, val & ESTATUS_1000_XFULL); } /* This is optional functionality. If not supported, we may get an error * which should be ignored. */ genphy_c45_read_eee_abilities(phydev); return 0; } EXPORT_SYMBOL(genphy_read_abilities); /* This is used for the phy device which doesn't support the MMD extended * register access, but it does have side effect when we are trying to access * the MMD register via indirect method. */ int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum) { return -EOPNOTSUPP; } EXPORT_SYMBOL(genphy_read_mmd_unsupported); int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, u16 regnum, u16 val) { return -EOPNOTSUPP; } EXPORT_SYMBOL(genphy_write_mmd_unsupported); int genphy_suspend(struct phy_device *phydev) { return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); } EXPORT_SYMBOL(genphy_suspend); int genphy_resume(struct phy_device *phydev) { return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN); } EXPORT_SYMBOL(genphy_resume); int genphy_loopback(struct phy_device *phydev, bool enable) { if (enable) { u16 ctl = BMCR_LOOPBACK; int ret, val; ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); phy_modify(phydev, MII_BMCR, ~0, ctl); ret = phy_read_poll_timeout(phydev, MII_BMSR, val, val & BMSR_LSTATUS, 5000, 500000, true); if (ret) return ret; } else { phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0); phy_config_aneg(phydev); } return 0; } EXPORT_SYMBOL(genphy_loopback); /** * phy_remove_link_mode - Remove a supported link mode * @phydev: phy_device structure to remove link mode from * @link_mode: Link mode to be removed * * Description: Some MACs don't support all link modes which the PHY * does. e.g. a 1G MAC often does not support 1000Half. Add a helper * to remove a link mode. */ void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode) { linkmode_clear_bit(link_mode, phydev->supported); phy_advertise_supported(phydev); } EXPORT_SYMBOL(phy_remove_link_mode); static void phy_copy_pause_bits(unsigned long *dst, unsigned long *src) { linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dst, linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, src)); linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, dst, linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, src)); } /** * phy_advertise_supported - Advertise all supported modes * @phydev: target phy_device struct * * Description: Called to advertise all supported modes, doesn't touch * pause mode advertising. */ void phy_advertise_supported(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(new); linkmode_copy(new, phydev->supported); phy_copy_pause_bits(new, phydev->advertising); linkmode_copy(phydev->advertising, new); } EXPORT_SYMBOL(phy_advertise_supported); /** * phy_advertise_eee_all - Advertise all supported EEE modes * @phydev: target phy_device struct * * Description: Per default phylib preserves the EEE advertising at the time of * phy probing, which might be a subset of the supported EEE modes. Use this * function when all supported EEE modes should be advertised. This does not * trigger auto-negotiation, so must be called before phy_start()/ * phylink_start() which will start auto-negotiation. */ void phy_advertise_eee_all(struct phy_device *phydev) { linkmode_copy(phydev->advertising_eee, phydev->supported_eee); } EXPORT_SYMBOL_GPL(phy_advertise_eee_all); /** * phy_support_eee - Set initial EEE policy configuration * @phydev: Target phy_device struct * * This function configures the initial policy for Energy Efficient Ethernet * (EEE) on the specified PHY device, influencing that EEE capabilities are * advertised before the link is established. It should be called during PHY * registration by the MAC driver and/or the PHY driver (for SmartEEE PHYs) * if MAC supports LPI or PHY is capable to compensate missing LPI functionality * of the MAC. * * The function sets default EEE policy parameters, including preparing the PHY * to advertise EEE capabilities based on hardware support. * * It also sets the expected configuration for Low Power Idle (LPI) in the MAC * driver. If the PHY framework determines that both local and remote * advertisements support EEE, and the negotiated link mode is compatible with * EEE, it will set enable_tx_lpi = true. The MAC driver is expected to act on * this setting by enabling the LPI timer if enable_tx_lpi is set. */ void phy_support_eee(struct phy_device *phydev) { linkmode_copy(phydev->advertising_eee, phydev->supported_eee); phydev->eee_cfg.tx_lpi_enabled = true; phydev->eee_cfg.eee_enabled = true; } EXPORT_SYMBOL(phy_support_eee); /** * phy_disable_eee - Disable EEE for the PHY * @phydev: Target phy_device struct * * This function is used by MAC drivers for MAC's which don't support EEE. * It disables EEE on the PHY layer. */ void phy_disable_eee(struct phy_device *phydev) { linkmode_zero(phydev->advertising_eee); phydev->eee_cfg.tx_lpi_enabled = false; phydev->eee_cfg.eee_enabled = false; /* don't let userspace re-enable EEE advertisement */ linkmode_fill(phydev->eee_disabled_modes); } EXPORT_SYMBOL_GPL(phy_disable_eee); /** * phy_support_sym_pause - Enable support of symmetrical pause * @phydev: target phy_device struct * * Description: Called by the MAC to indicate is supports symmetrical * Pause, but not asym pause. */ void phy_support_sym_pause(struct phy_device *phydev) { linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); phy_copy_pause_bits(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_support_sym_pause); /** * phy_support_asym_pause - Enable support of asym pause * @phydev: target phy_device struct * * Description: Called by the MAC to indicate is supports Asym Pause. */ void phy_support_asym_pause(struct phy_device *phydev) { phy_copy_pause_bits(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_support_asym_pause); /** * phy_set_sym_pause - Configure symmetric Pause * @phydev: target phy_device struct * @rx: Receiver Pause is supported * @tx: Transmit Pause is supported * @autoneg: Auto neg should be used * * Description: Configure advertised Pause support depending on if * receiver pause and pause auto neg is supported. Generally called * from the set_pauseparam .ndo. */ void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, bool autoneg) { linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); if (rx && tx && autoneg) linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_set_sym_pause); /** * phy_set_asym_pause - Configure Pause and Asym Pause * @phydev: target phy_device struct * @rx: Receiver Pause is supported * @tx: Transmit Pause is supported * * Description: Configure advertised Pause support depending on if * transmit and receiver pause is supported. If there has been a * change in adverting, trigger a new autoneg. Generally called from * the set_pauseparam .ndo. */ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx) { __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv); linkmode_copy(oldadv, phydev->advertising); linkmode_set_pause(phydev->advertising, tx, rx); if (!linkmode_equal(oldadv, phydev->advertising) && phydev->autoneg) phy_start_aneg(phydev); } EXPORT_SYMBOL(phy_set_asym_pause); /** * phy_validate_pause - Test if the PHY/MAC support the pause configuration * @phydev: phy_device struct * @pp: requested pause configuration * * Description: Test if the PHY/MAC combination supports the Pause * configuration the user is requesting. Returns True if it is * supported, false otherwise. */ bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp) { if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported) && pp->rx_pause) return false; if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported) && pp->rx_pause != pp->tx_pause) return false; return true; } EXPORT_SYMBOL(phy_validate_pause); /** * phy_get_pause - resolve negotiated pause modes * @phydev: phy_device struct * @tx_pause: pointer to bool to indicate whether transmit pause should be * enabled. * @rx_pause: pointer to bool to indicate whether receive pause should be * enabled. * * Resolve and return the flow control modes according to the negotiation * result. This includes checking that we are operating in full duplex mode. * See linkmode_resolve_pause() for further details. */ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause) { if (phydev->duplex != DUPLEX_FULL) { *tx_pause = false; *rx_pause = false; return; } return linkmode_resolve_pause(phydev->advertising, phydev->lp_advertising, tx_pause, rx_pause); } EXPORT_SYMBOL(phy_get_pause); #if IS_ENABLED(CONFIG_OF_MDIO) static int phy_get_int_delay_property(struct device *dev, const char *name) { s32 int_delay; int ret; ret = device_property_read_u32(dev, name, &int_delay); if (ret) return ret; return int_delay; } #else static int phy_get_int_delay_property(struct device *dev, const char *name) { return -EINVAL; } #endif /** * phy_get_internal_delay - returns the index of the internal delay * @phydev: phy_device struct * @dev: pointer to the devices device struct * @delay_values: array of delays the PHY supports * @size: the size of the delay array * @is_rx: boolean to indicate to get the rx internal delay * * Returns the index within the array of internal delay passed in. * If the device property is not present then the interface type is checked * if the interface defines use of internal delay then a 1 is returned otherwise * a 0 is returned. * The array must be in ascending order. If PHY does not have an ascending order * array then size = 0 and the value of the delay property is returned. * Return -EINVAL if the delay is invalid or cannot be found. */ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, const int *delay_values, int size, bool is_rx) { s32 delay; int i; if (is_rx) { delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps"); if (delay < 0 && size == 0) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) return 1; else return 0; } } else { delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps"); if (delay < 0 && size == 0) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) return 1; else return 0; } } if (delay < 0) return delay; if (size == 0) return delay; if (delay < delay_values[0] || delay > delay_values[size - 1]) { phydev_err(phydev, "Delay %d is out of range\n", delay); return -EINVAL; } if (delay == delay_values[0]) return 0; for (i = 1; i < size; i++) { if (delay == delay_values[i]) return i; /* Find an approximate index by looking up the table */ if (delay > delay_values[i - 1] && delay < delay_values[i]) { if (delay - delay_values[i - 1] < delay_values[i] - delay) return i - 1; else return i; } } phydev_err(phydev, "error finding internal delay index for %d\n", delay); return -EINVAL; } EXPORT_SYMBOL(phy_get_internal_delay); static int phy_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { struct phy_led *phyled = to_phy_led(led_cdev); struct phy_device *phydev = phyled->phydev; int err; mutex_lock(&phydev->lock); err = phydev->drv->led_brightness_set(phydev, phyled->index, value); mutex_unlock(&phydev->lock); return err; } static int phy_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct phy_led *phyled = to_phy_led(led_cdev); struct phy_device *phydev = phyled->phydev; int err; mutex_lock(&phydev->lock); err = phydev->drv->led_blink_set(phydev, phyled->index, delay_on, delay_off); mutex_unlock(&phydev->lock); return err; } static __maybe_unused struct device * phy_led_hw_control_get_device(struct led_classdev *led_cdev) { struct phy_led *phyled = to_phy_led(led_cdev); struct phy_device *phydev = phyled->phydev; if (phydev->attached_dev) return &phydev->attached_dev->dev; return NULL; } static int __maybe_unused phy_led_hw_control_get(struct led_classdev *led_cdev, unsigned long *rules) { struct phy_led *phyled = to_phy_led(led_cdev); struct phy_device *phydev = phyled->phydev; int err; mutex_lock(&phydev->lock); err = phydev->drv->led_hw_control_get(phydev, phyled->index, rules); mutex_unlock(&phydev->lock); return err; } static int __maybe_unused phy_led_hw_control_set(struct led_classdev *led_cdev, unsigned long rules) { struct phy_led *phyled = to_phy_led(led_cdev); struct phy_device *phydev = phyled->phydev; int err; mutex_lock(&phydev->lock); err = phydev->drv->led_hw_control_set(phydev, phyled->index, rules); mutex_unlock(&phydev->lock); return err; } static __maybe_unused int phy_led_hw_is_supported(struct led_classdev *led_cdev, unsigned long rules) { struct phy_led *phyled = to_phy_led(led_cdev); struct phy_device *phydev = phyled->phydev; int err; mutex_lock(&phydev->lock); err = phydev->drv->led_hw_is_supported(phydev, phyled->index, rules); mutex_unlock(&phydev->lock); return err; } static void phy_leds_unregister(struct phy_device *phydev) { struct phy_led *phyled, *tmp; list_for_each_entry_safe(phyled, tmp, &phydev->leds, list) { led_classdev_unregister(&phyled->led_cdev); list_del(&phyled->list); } } static int of_phy_led(struct phy_device *phydev, struct device_node *led) { struct device *dev = &phydev->mdio.dev; struct led_init_data init_data = {}; struct led_classdev *cdev; unsigned long modes = 0; struct phy_led *phyled; u32 index; int err; phyled = devm_kzalloc(dev, sizeof(*phyled), GFP_KERNEL); if (!phyled) return -ENOMEM; cdev = &phyled->led_cdev; phyled->phydev = phydev; err = of_property_read_u32(led, "reg", &index); if (err) return err; if (index > U8_MAX) return -EINVAL; if (of_property_read_bool(led, "active-high")) set_bit(PHY_LED_ACTIVE_HIGH, &modes); if (of_property_read_bool(led, "active-low")) set_bit(PHY_LED_ACTIVE_LOW, &modes); if (of_property_read_bool(led, "inactive-high-impedance")) set_bit(PHY_LED_INACTIVE_HIGH_IMPEDANCE, &modes); if (WARN_ON(modes & BIT(PHY_LED_ACTIVE_LOW) && modes & BIT(PHY_LED_ACTIVE_HIGH))) return -EINVAL; if (modes) { /* Return error if asked to set polarity modes but not supported */ if (!phydev->drv->led_polarity_set) return -EINVAL; err = phydev->drv->led_polarity_set(phydev, index, modes); if (err) return err; } phyled->index = index; if (phydev->drv->led_brightness_set) cdev->brightness_set_blocking = phy_led_set_brightness; if (phydev->drv->led_blink_set) cdev->blink_set = phy_led_blink_set; #ifdef CONFIG_LEDS_TRIGGERS if (phydev->drv->led_hw_is_supported && phydev->drv->led_hw_control_set && phydev->drv->led_hw_control_get) { cdev->hw_control_is_supported = phy_led_hw_is_supported; cdev->hw_control_set = phy_led_hw_control_set; cdev->hw_control_get = phy_led_hw_control_get; cdev->hw_control_trigger = "netdev"; } cdev->hw_control_get_device = phy_led_hw_control_get_device; #endif cdev->max_brightness = 1; init_data.devicename = dev_name(&phydev->mdio.dev); init_data.fwnode = of_fwnode_handle(led); init_data.devname_mandatory = true; err = led_classdev_register_ext(dev, cdev, &init_data); if (err) return err; list_add(&phyled->list, &phydev->leds); return 0; } static int of_phy_leds(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; struct device_node *leds; int err; if (!IS_ENABLED(CONFIG_OF_MDIO)) return 0; if (!node) return 0; leds = of_get_child_by_name(node, "leds"); if (!leds) return 0; /* Check if the PHY driver have at least an OP to * set the LEDs. */ if (!(phydev->drv->led_brightness_set || phydev->drv->led_blink_set || phydev->drv->led_hw_control_set)) { phydev_dbg(phydev, "ignoring leds node defined with no PHY driver support\n"); goto exit; } for_each_available_child_of_node_scoped(leds, led) { err = of_phy_led(phydev, led); if (err) { of_node_put(leds); phy_leds_unregister(phydev); return err; } } exit: of_node_put(leds); return 0; } /** * fwnode_mdio_find_device - Given a fwnode, find the mdio_device * @fwnode: pointer to the mdio_device's fwnode * * If successful, returns a pointer to the mdio_device with the embedded * struct device refcount incremented by one, or NULL on failure. * The caller should call put_device() on the mdio_device after its use. */ struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode) { struct device *d; if (!fwnode) return NULL; d = bus_find_device_by_fwnode(&mdio_bus_type, fwnode); if (!d) return NULL; return to_mdio_device(d); } EXPORT_SYMBOL(fwnode_mdio_find_device); /** * fwnode_phy_find_device - For provided phy_fwnode, find phy_device. * * @phy_fwnode: Pointer to the phy's fwnode. * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. */ struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode) { struct mdio_device *mdiodev; mdiodev = fwnode_mdio_find_device(phy_fwnode); if (!mdiodev) return NULL; if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) return to_phy_device(&mdiodev->dev); put_device(&mdiodev->dev); return NULL; } EXPORT_SYMBOL(fwnode_phy_find_device); /** * device_phy_find_device - For the given device, get the phy_device * @dev: Pointer to the given device * * Refer return conditions of fwnode_phy_find_device(). */ struct phy_device *device_phy_find_device(struct device *dev) { return fwnode_phy_find_device(dev_fwnode(dev)); } EXPORT_SYMBOL_GPL(device_phy_find_device); /** * fwnode_get_phy_node - Get the phy_node using the named reference. * @fwnode: Pointer to fwnode from which phy_node has to be obtained. * * Refer return conditions of fwnode_find_reference(). * For ACPI, only "phy-handle" is supported. Legacy DT properties "phy" * and "phy-device" are not supported in ACPI. DT supports all the three * named references to the phy node. */ struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode) { struct fwnode_handle *phy_node; /* Only phy-handle is used for ACPI */ phy_node = fwnode_find_reference(fwnode, "phy-handle", 0); if (is_acpi_node(fwnode) || !IS_ERR(phy_node)) return phy_node; phy_node = fwnode_find_reference(fwnode, "phy", 0); if (IS_ERR(phy_node)) phy_node = fwnode_find_reference(fwnode, "phy-device", 0); return phy_node; } EXPORT_SYMBOL_GPL(fwnode_get_phy_node); /** * phy_probe - probe and init a PHY device * @dev: device to probe and init * * Take care of setting up the phy_device structure, set the state to READY. */ static int phy_probe(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); int err = 0; phydev->drv = phydrv; /* Disable the interrupt if the PHY doesn't support it * but the interrupt is still a valid one */ if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev)) phydev->irq = PHY_POLL; if (phydrv->flags & PHY_IS_INTERNAL) phydev->is_internal = true; /* Deassert the reset signal */ phy_device_reset(phydev, 0); if (phydev->drv->probe) { err = phydev->drv->probe(phydev); if (err) goto out; } phy_disable_interrupts(phydev); /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values */ if (phydrv->features) { linkmode_copy(phydev->supported, phydrv->features); genphy_c45_read_eee_abilities(phydev); } else if (phydrv->get_features) err = phydrv->get_features(phydev); else if (phydev->is_c45) err = genphy_c45_pma_read_abilities(phydev); else err = genphy_read_abilities(phydev); if (err) goto out; if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported)) phydev->autoneg = 0; if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, phydev->supported)) phydev->is_gigabit_capable = 1; if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported)) phydev->is_gigabit_capable = 1; of_set_phy_supported(phydev); phy_advertise_supported(phydev); /* Get PHY default EEE advertising modes and handle them as potentially * safe initial configuration. */ err = genphy_c45_read_eee_adv(phydev, phydev->advertising_eee); if (err) goto out; /* There is no "enabled" flag. If PHY is advertising, assume it is * kind of enabled. */ phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee); /* Some PHYs may advertise, by default, not support EEE modes. So, * we need to clean them. */ if (phydev->eee_cfg.eee_enabled) linkmode_and(phydev->advertising_eee, phydev->supported_eee, phydev->advertising_eee); /* Get the EEE modes we want to prohibit. We will ask * the PHY stop advertising these mode later on */ of_set_phy_eee_broken(phydev); /* Get master/slave strap overrides */ of_set_phy_timing_role(phydev); /* The Pause Frame bits indicate that the PHY can support passing * pause frames. During autonegotiation, the PHYs will determine if * they should allow pause frames to pass. The MAC driver should then * use that result to determine whether to enable flow control via * pause frames. * * Normally, PHY drivers should not set the Pause bits, and instead * allow phylib to do that. However, there may be some situations * (e.g. hardware erratum) where the driver wants to set only one * of these bits. */ if (!test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported) && !test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported)) { linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); } /* Set the state to READY by default */ phydev->state = PHY_READY; /* Get the LEDs from the device tree, and instantiate standard * LEDs for them. */ if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) err = of_phy_leds(phydev); out: /* Re-assert the reset signal on error */ if (err) phy_device_reset(phydev, 1); return err; } static int phy_remove(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); cancel_delayed_work_sync(&phydev->state_queue); if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) phy_leds_unregister(phydev); phydev->state = PHY_DOWN; sfp_bus_del_upstream(phydev->sfp_bus); phydev->sfp_bus = NULL; if (phydev->drv && phydev->drv->remove) phydev->drv->remove(phydev); /* Assert the reset signal */ phy_device_reset(phydev, 1); phydev->drv = NULL; return 0; } /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register * @owner: module owning this PHY */ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; /* Either the features are hard coded, or dynamically * determined. It cannot be both. */ if (WARN_ON(new_driver->features && new_driver->get_features)) { pr_err("%s: features and get_features must not both be set\n", new_driver->name); return -EINVAL; } /* PHYLIB device drivers must not match using a DT compatible table * as this bypasses our checks that the mdiodev that is being matched * is backed by a struct phy_device. If such a case happens, we will * make out-of-bounds accesses and lockup in phydev->lock. */ if (WARN(new_driver->mdiodrv.driver.of_match_table, "%s: driver must not provide a DT match table\n", new_driver->name)) return -EINVAL; new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; new_driver->mdiodrv.driver.name = new_driver->name; new_driver->mdiodrv.driver.bus = &mdio_bus_type; new_driver->mdiodrv.driver.probe = phy_probe; new_driver->mdiodrv.driver.remove = phy_remove; new_driver->mdiodrv.driver.owner = owner; new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; retval = driver_register(&new_driver->mdiodrv.driver); if (retval) { pr_err("%s: Error %d in registering driver\n", new_driver->name, retval); return retval; } pr_debug("%s: Registered new driver\n", new_driver->name); return 0; } EXPORT_SYMBOL(phy_driver_register); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner) { int i, ret = 0; for (i = 0; i < n; i++) { ret = phy_driver_register(new_driver + i, owner); if (ret) { while (i-- > 0) phy_driver_unregister(new_driver + i); break; } } return ret; } EXPORT_SYMBOL(phy_drivers_register); void phy_driver_unregister(struct phy_driver *drv) { driver_unregister(&drv->mdiodrv.driver); } EXPORT_SYMBOL(phy_driver_unregister); void phy_drivers_unregister(struct phy_driver *drv, int n) { int i; for (i = 0; i < n; i++) phy_driver_unregister(drv + i); } EXPORT_SYMBOL(phy_drivers_unregister); static struct phy_driver genphy_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, .name = "Generic PHY", .get_features = genphy_read_abilities, .suspend = genphy_suspend, .resume = genphy_resume, .set_loopback = genphy_loopback, }; static const struct ethtool_phy_ops phy_ethtool_phy_ops = { .get_sset_count = phy_ethtool_get_sset_count, .get_strings = phy_ethtool_get_strings, .get_stats = phy_ethtool_get_stats, .get_plca_cfg = phy_ethtool_get_plca_cfg, .set_plca_cfg = phy_ethtool_set_plca_cfg, .get_plca_status = phy_ethtool_get_plca_status, .start_cable_test = phy_start_cable_test, .start_cable_test_tdr = phy_start_cable_test_tdr, }; static const struct phylib_stubs __phylib_stubs = { .hwtstamp_get = __phy_hwtstamp_get, .hwtstamp_set = __phy_hwtstamp_set, .get_phy_stats = __phy_ethtool_get_phy_stats, .get_link_ext_stats = __phy_ethtool_get_link_ext_stats, }; static void phylib_register_stubs(void) { phylib_stubs = &__phylib_stubs; } static void phylib_unregister_stubs(void) { phylib_stubs = NULL; } static int __init phy_init(void) { int rc; rtnl_lock(); ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops); phylib_register_stubs(); rtnl_unlock(); rc = mdio_bus_init(); if (rc) goto err_ethtool_phy_ops; features_init(); rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE); if (rc) goto err_mdio_bus; rc = phy_driver_register(&genphy_driver, THIS_MODULE); if (rc) goto err_c45; return 0; err_c45: phy_driver_unregister(&genphy_c45_driver); err_mdio_bus: mdio_bus_exit(); err_ethtool_phy_ops: rtnl_lock(); phylib_unregister_stubs(); ethtool_set_ethtool_phy_ops(NULL); rtnl_unlock(); return rc; } static void __exit phy_exit(void) { phy_driver_unregister(&genphy_c45_driver); phy_driver_unregister(&genphy_driver); mdio_bus_exit(); rtnl_lock(); phylib_unregister_stubs(); ethtool_set_ethtool_phy_ops(NULL); rtnl_unlock(); } subsys_initcall(phy_init); module_exit(phy_exit);
6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/cdrom.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/usb_usual.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-realtek" MODULE_DESCRIPTION("Driver for Realtek USB Card Reader"); MODULE_AUTHOR("wwang <wei_wang@realsil.com.cn>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; module_param(ss_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_en, "enable selective suspend"); static int ss_delay = 50; module_param(ss_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_delay, "seconds to delay before entering selective suspend"); enum RTS51X_STAT { RTS51X_STAT_INIT, RTS51X_STAT_IDLE, RTS51X_STAT_RUN, RTS51X_STAT_SS }; #define POLLING_INTERVAL 50 #define rts51x_set_stat(chip, stat) \ ((chip)->state = (enum RTS51X_STAT)(stat)) #define rts51x_get_stat(chip) ((chip)->state) #define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun))) #define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun))) #define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun))) #endif struct rts51x_status { u16 vid; u16 pid; u8 cur_lun; u8 card_type; u8 total_lun; u16 fw_ver; u8 phy_exist; u8 multi_flag; u8 multi_card; u8 log_exist; union { u8 detailed_type1; u8 detailed_type2; } detailed_type; u8 function[2]; }; struct rts51x_chip { u16 vendor_id; u16 product_id; char max_lun; struct rts51x_status *status; int status_len; u32 flag; struct us_data *us; #ifdef CONFIG_REALTEK_AUTOPM struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; u8 lun_ready; enum RTS51X_STAT state; int support_auto_delink; #endif /* used to back up the protocol chosen in probe1 phase */ proto_cmnd proto_handler_backup; }; /* flag definition */ #define FLIDX_AUTO_DELINK 0x01 #define SCSI_LUN(srb) ((srb)->device->lun) /* Bit Operation */ #define SET_BIT(data, idx) ((data) |= 1 << (idx)) #define CLR_BIT(data, idx) ((data) &= ~(1 << (idx))) #define CHK_BIT(data, idx) ((data) & (1 << (idx))) #define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK) #define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK) #define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK) #define RTS51X_GET_VID(chip) ((chip)->vendor_id) #define RTS51X_GET_PID(chip) ((chip)->product_id) #define VENDOR_ID(chip) ((chip)->status[0].vid) #define PRODUCT_ID(chip) ((chip)->status[0].pid) #define FW_VERSION(chip) ((chip)->status[0].fw_ver) #define STATUS_LEN(chip) ((chip)->status_len) #define STATUS_SUCCESS 0 #define STATUS_FAIL 1 /* Check card reader function */ #define SUPPORT_DETAILED_TYPE1(chip) \ CHK_BIT((chip)->status[0].function[0], 1) #define SUPPORT_OT(chip) \ CHK_BIT((chip)->status[0].function[0], 2) #define SUPPORT_OC(chip) \ CHK_BIT((chip)->status[0].function[0], 3) #define SUPPORT_AUTO_DELINK(chip) \ CHK_BIT((chip)->status[0].function[0], 4) #define SUPPORT_SDIO(chip) \ CHK_BIT((chip)->status[0].function[1], 0) #define SUPPORT_DETAILED_TYPE2(chip) \ CHK_BIT((chip)->status[0].function[1], 1) #define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid)) #define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver)) #define CHECK_ID(chip, pid, fw_ver) \ (CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver))) static int init_realtek_cr(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ {\ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) \ } static const struct usb_device_id realtek_cr_ids[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, realtek_cr_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev realtek_cr_unusual_dev_list[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; #undef UNUSUAL_DEV static int rts51x_bulk_transport(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf; int result; unsigned int residue; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : US_BULK_FLAG_OUT; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) { usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n", le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN); return USB_STOR_TRANSPORT_ERROR; } residue = bcs->Residue; if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; /* * try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue) residue = residue < buf_len ? residue : buf_len; if (act_len) *act_len = buf_len - residue; /* based on the status code, we report good or bad */ switch (bcs->Status) { case US_BULK_STAT_OK: /* command good -- note that data could be short */ return USB_STOR_TRANSPORT_GOOD; case US_BULK_STAT_FAIL: /* command failed */ return USB_STOR_TRANSPORT_FAILED; case US_BULK_STAT_PHASE: /* * phase error -- note that a transport reset will be * invoked by the invoke_transport() function */ return USB_STOR_TRANSPORT_ERROR; } /* we should never get here, but if we do, we're in trouble */ return USB_STOR_TRANSPORT_ERROR; } static int rts51x_bulk_transport_special(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : US_BULK_FLAG_OUT; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen, 250); return result; } /* Determine what the maximum LUN supported is */ static int rts51x_get_max_lun(struct us_data *us) { int result; /* issue the command */ us->iobuf[0] = 0; result = usb_stor_control_msg(us, us->recv_ctrl_pipe, US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, us->ifnum, us->iobuf, 1, 10 * HZ); usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); /* if we have a successful request, return the result */ if (result > 0) return us->iobuf[0]; return 0; } static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return -ENOMEM; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0D; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_FROM_DEVICE, NULL); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(data, buf, len); kfree(buf); return 0; } static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmemdup(data, len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) return -EIO; return 0; } static int rts51x_read_status(struct us_data *us, u8 lun, u8 *status, int len, int *actlen) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "lun = %d\n", lun); cmnd[0] = 0xF0; cmnd[1] = 0x09; retval = rts51x_bulk_transport(us, lun, cmnd, 12, buf, len, DMA_FROM_DEVICE, actlen); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(status, buf, len); kfree(buf); return 0; } static int rts51x_check_status(struct us_data *us, u8 lun) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[16]; retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) return -EIO; usb_stor_dbg(us, "chip->status_len = %d\n", chip->status_len); chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1]; chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3]; chip->status[lun].cur_lun = buf[4]; chip->status[lun].card_type = buf[5]; chip->status[lun].total_lun = buf[6]; chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8]; chip->status[lun].phy_exist = buf[9]; chip->status[lun].multi_flag = buf[10]; chip->status[lun].multi_card = buf[11]; chip->status[lun].log_exist = buf[12]; if (chip->status_len == 16) { chip->status[lun].detailed_type.detailed_type1 = buf[13]; chip->status[lun].function[0] = buf[14]; chip->status[lun].function[1] = buf[15]; } return 0; } static int enable_oscillator(struct us_data *us) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; value |= 0x04; retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (!(value & 0x04)) return -EIO; return 0; } static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) { int retval; u8 cmnd[12] = {0}; u8 *buf; usb_stor_dbg(us, "addr = 0xfe47, len = %d\n", len); buf = kmemdup(data, len, GFP_NOIO); if (!buf) return USB_STOR_TRANSPORT_ERROR; cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = 0xfe; cmnd[3] = 0x47; cmnd[4] = (u8)(len >> 8); cmnd[5] = (u8)len; retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) { return -EIO; } return 0; } static int do_config_autodelink(struct us_data *us, int enable, int force) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (enable) { if (force) value |= 0x03; else value |= 0x01; } else { value &= ~0x03; } usb_stor_dbg(us, "set 0xfe47 to 0x%x\n", value); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; return 0; } static int config_autodelink_after_power_on(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (auto_delink_en) { CLR_BIT(value, 0); CLR_BIT(value, 1); SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); SET_BIT(value, 7); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; retval = enable_oscillator(us); if (retval == 0) (void)do_config_autodelink(us, 1, 0); } else { /* Autodelink controlled by firmware */ SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { CLR_BIT(value, 0); CLR_BIT(value, 7); } /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0xFF; retval = rts51x_write_mem(us, 0xFE79, &value, 1); if (retval < 0) return -EIO; value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } #ifdef CONFIG_PM static int config_autodelink_before_power_down(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; if (auto_delink_en) { retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 0); if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; } else { if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880) || CHECK_ID(chip, 0x0138, 0x3882)) { retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { SET_BIT(value, 0); SET_BIT(value, 7); } if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; } if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } static void fw5895_init(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 val; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, &val, 1); if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) { val = 0x1F; retval = rts51x_write_mem(us, 0xFD70, &val, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail, OR (val & 0x1F) != 0\n"); } } } #endif #ifdef CONFIG_REALTEK_AUTOPM static void fw5895_set_mmc_wp(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[13]; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, buf, 1); if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) { /* SD Exist and SD WP */ retval = rts51x_read_mem(us, 0xD04E, buf, 1); if (retval == STATUS_SUCCESS) { buf[0] |= 0x04; retval = rts51x_write_mem(us, 0xFD70, buf, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail\n"); } } else { usb_stor_dbg(us, "Read memory fail, OR (buf[0]&0x24)!=0x24\n"); } } } static void rts51x_modi_suspend_timer(struct rts51x_chip *chip) { struct us_data *us = chip->us; usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay); mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires); } static void rts51x_suspend_timer_fn(struct timer_list *t) { struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer); struct us_data *us = chip->us; switch (rts51x_get_stat(chip)) { case RTS51X_STAT_INIT: case RTS51X_STAT_RUN: rts51x_modi_suspend_timer(chip); break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) { usb_stor_dbg(us, "Ready to enter SS state\n"); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); } break; default: usb_stor_dbg(us, "Unknown state !!!\n"); break; } } static inline int working_scsi(struct scsi_cmnd *srb) { if ((srb->cmnd[0] == TEST_UNIT_READY) || (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) { return 0; } return 1; } static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); static int card_first_show = 1; static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 }; static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 }; int ret; if (working_scsi(srb)) { usb_stor_dbg(us, "working scsi, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); usb_stor_dbg(us, "working scsi, ret=%d\n", ret); } if (rts51x_get_stat(chip) != RTS51X_STAT_RUN) rts51x_set_stat(chip, RTS51X_STAT_RUN); chip->proto_handler_backup(srb, us); } else { if (rts51x_get_stat(chip) == RTS51X_STAT_SS) { usb_stor_dbg(us, "NOT working scsi\n"); if ((srb->cmnd[0] == TEST_UNIT_READY) && (chip->pwr_state == US_SUSPEND)) { if (TST_LUN_READY(chip, srb->device->lun)) { srb->result = SAM_STAT_GOOD; } else { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, media_not_present, US_SENSE_SIZE); } usb_stor_dbg(us, "TEST_UNIT_READY\n"); goto out; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { int prevent = srb->cmnd[4] & 0x1; if (prevent) { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, invalid_cmd_field, US_SENSE_SIZE); } else { srb->result = SAM_STAT_GOOD; } usb_stor_dbg(us, "ALLOW_MEDIUM_REMOVAL\n"); goto out; } } else { usb_stor_dbg(us, "NOT working scsi, not SS\n"); chip->proto_handler_backup(srb, us); /* Check whether card is plugged in */ if (srb->cmnd[0] == TEST_UNIT_READY) { if (srb->result == SAM_STAT_GOOD) { SET_LUN_READY(chip, srb->device->lun); if (card_first_show) { card_first_show = 0; fw5895_set_mmc_wp(us); } } else { CLR_LUN_READY(chip, srb->device->lun); card_first_show = 1; } } if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE) rts51x_set_stat(chip, RTS51X_STAT_IDLE); } } out: usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); if (rts51x_get_stat(chip) == RTS51X_STAT_RUN) rts51x_modi_suspend_timer(chip); } static int realtek_cr_autosuspend_setup(struct us_data *us) { struct rts51x_chip *chip; struct rts51x_status *status = NULL; u8 buf[16]; int retval; chip = (struct rts51x_chip *)us->extra; chip->support_auto_delink = 0; chip->pwr_state = US_RESUME; chip->lun_ready = 0; rts51x_set_stat(chip, RTS51X_STAT_INIT); retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) { usb_stor_dbg(us, "Read status fail\n"); return -EIO; } status = chip->status; status->vid = ((u16) buf[0] << 8) | buf[1]; status->pid = ((u16) buf[2] << 8) | buf[3]; status->cur_lun = buf[4]; status->card_type = buf[5]; status->total_lun = buf[6]; status->fw_ver = ((u16) buf[7] << 8) | buf[8]; status->phy_exist = buf[9]; status->multi_flag = buf[10]; status->multi_card = buf[11]; status->log_exist = buf[12]; if (chip->status_len == 16) { status->detailed_type.detailed_type1 = buf[13]; status->function[0] = buf[14]; status->function[1] = buf[15]; } /* back up the proto_handler in us->extra */ chip = (struct rts51x_chip *)(us->extra); chip->proto_handler_backup = us->proto_handler; /* Set the autosuspend_delay to 0 */ pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0); /* override us->proto_handler setted in get_protocol() */ us->proto_handler = rts51x_invoke_transport; chip->timer_expires = 0; timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0); fw5895_init(us); /* enable autosuspend function of the usb device */ usb_enable_autosuspend(us->pusb_dev); return 0; } #endif static void realtek_cr_destructor(void *extra) { struct rts51x_chip *chip = extra; if (!chip) return; #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) { del_timer(&chip->rts51x_suspend_timer); chip->timer_expires = 0; } #endif kfree(chip->status); } #ifdef CONFIG_PM static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); /* wait until no command is running */ mutex_lock(&us->dev_mutex); config_autodelink_before_power_down(us); mutex_unlock(&us->dev_mutex); return 0; } static int realtek_cr_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); fw5895_init(us); config_autodelink_after_power_on(us); return 0; } #else #define realtek_cr_suspend NULL #define realtek_cr_resume NULL #endif static int init_realtek_cr(struct us_data *us) { struct rts51x_chip *chip; int size, i, retval; chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL); if (!chip) return -ENOMEM; us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); size = (chip->max_lun + 1) * sizeof(struct rts51x_status); chip->status = kzalloc(size, GFP_KERNEL); if (!chip->status) goto INIT_FAIL; for (i = 0; i <= (int)(chip->max_lun); i++) { retval = rts51x_check_status(us, (u8) i); if (retval < 0) goto INIT_FAIL; } if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) || CHECK_PID(chip, 0x0159)) { if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); if (STATUS_LEN(chip) == 16) { if (SUPPORT_AUTO_DELINK(chip)) SET_AUTO_DELINK(chip); } } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) realtek_cr_autosuspend_setup(us); #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); (void)config_autodelink_after_power_on(us); return 0; INIT_FAIL: if (us->extra) { kfree(chip->status); kfree(us->extra); us->extra = NULL; } return -EIO; } static struct scsi_host_template realtek_cr_host_template; static int realtek_cr_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; dev_dbg(&intf->dev, "Probe Realtek Card Reader!\n"); result = usb_stor_probe1(&us, intf, id, (id - realtek_cr_ids) + realtek_cr_unusual_dev_list, &realtek_cr_host_template); if (result) return result; result = usb_stor_probe2(us); return result; } static struct usb_driver realtek_cr_driver = { .name = DRV_NAME, .probe = realtek_cr_probe, .disconnect = usb_stor_disconnect, /* .suspend = usb_stor_suspend, */ /* .resume = usb_stor_resume, */ .reset_resume = usb_stor_reset_resume, .suspend = realtek_cr_suspend, .resume = realtek_cr_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = realtek_cr_ids, .soft_unbind = 1, .supports_autosuspend = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(realtek_cr_driver, realtek_cr_host_template, DRV_NAME);
1594 368 4 185 282 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KSM_H #define __LINUX_KSM_H /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork(). */ #include <linux/bitops.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); void ksm_add_vma(struct vm_area_struct *vma); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); int ksm_disable(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); /* * To identify zeropages that were mapped by KSM, we reuse the dirty bit * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when * deduplicating memory. */ #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) extern atomic_long_t ksm_zero_pages; static inline void ksm_map_zero_page(struct mm_struct *mm) { atomic_long_inc(&ksm_zero_pages); atomic_long_inc(&mm->ksm_zero_pages); } static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { if (is_ksm_zero_pte(pte)) { atomic_long_dec(&ksm_zero_pages); atomic_long_dec(&mm->ksm_zero_pages); } } static inline long mm_ksm_zero_pages(struct mm_struct *mm) { return atomic_long_read(&mm->ksm_zero_pages); } static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { /* Adding mm to ksm is best effort on fork. */ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) __ksm_enter(mm); } static inline int ksm_execve(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return __ksm_enter(mm); return 0; } static inline void ksm_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) __ksm_exit(mm); } /* * When do_swap_page() first faults in from swap what used to be a KSM page, * no problem, it will be assigned to this vma's anon_vma; but thereafter, * it might be faulted into a different anon_vma (or perhaps to a different * offset in the same anon_vma). do_swap_page() cannot do all the locking * needed to reconstitute a cross-anon_vma KSM page: for now it has to make * a copy, and leave remerging the pages to a later pass of ksmd. * * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ static inline void ksm_add_vma(struct vm_area_struct *vma) { } static inline int ksm_disable(struct mm_struct *mm) { return 0; } static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { } static inline int ksm_execve(struct mm_struct *mm) { return 0; } static inline void ksm_exit(struct mm_struct *mm) { } static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { } static inline void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early) { } #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { return 0; } static inline struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) { return folio; } static inline void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { } static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) { } #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ #endif /* __LINUX_KSM_H */
1319 1318 1291 28 28 25 25 1280 28 28 25 782 84 7 433 1295 1289 1438 152 1270 31 2 1301 1295 1258 31 1375 1377 1375 1376 1377 1377 1377 1376 1377 19 19 19 19 19 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Mimi Zohar <zohar@us.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * File: ima_crypto.c * Calculates md5/sha1 file hash, template hash, boot-aggreate hash */ #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/ratelimit.h> #include <linux/file.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/err.h> #include <linux/slab.h> #include <crypto/hash.h> #include "ima.h" /* minimum file size for ahash use */ static unsigned long ima_ahash_minsize; module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use"); /* default is 0 - 1 page. */ static int ima_maxorder; static unsigned int ima_bufsize = PAGE_SIZE; static int param_set_bufsize(const char *val, const struct kernel_param *kp) { unsigned long long size; int order; size = memparse(val, NULL); order = get_order(size); if (order > MAX_PAGE_ORDER) return -EINVAL; ima_maxorder = order; ima_bufsize = PAGE_SIZE << order; return 0; } static const struct kernel_param_ops param_ops_bufsize = { .set = param_set_bufsize, .get = param_get_uint, }; #define param_check_bufsize(name, p) __param_check(name, p, unsigned int) module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size"); static struct crypto_shash *ima_shash_tfm; static struct crypto_ahash *ima_ahash_tfm; int ima_sha1_idx __ro_after_init; int ima_hash_algo_idx __ro_after_init; /* * Additional number of slots reserved, as needed, for SHA1 * and IMA default algo. */ int ima_extra_slots __ro_after_init; struct ima_algo_desc *ima_algo_array __ro_after_init; static int __init ima_init_ima_crypto(void) { long rc; ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); if (IS_ERR(ima_shash_tfm)) { rc = PTR_ERR(ima_shash_tfm); pr_err("Can not allocate %s (reason: %ld)\n", hash_algo_name[ima_hash_algo], rc); return rc; } pr_info("Allocated hash algorithm: %s\n", hash_algo_name[ima_hash_algo]); return 0; } static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) { struct crypto_shash *tfm = ima_shash_tfm; int rc, i; if (algo < 0 || algo >= HASH_ALGO__LAST) algo = ima_hash_algo; if (algo == ima_hash_algo) return tfm; for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo) return ima_algo_array[i].tfm; tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); if (IS_ERR(tfm)) { rc = PTR_ERR(tfm); pr_err("Can not allocate %s (reason: %d)\n", hash_algo_name[algo], rc); } return tfm; } int __init ima_init_crypto(void) { enum hash_algo algo; long rc; int i; rc = ima_init_ima_crypto(); if (rc) return rc; ima_sha1_idx = -1; ima_hash_algo_idx = -1; for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { algo = ima_tpm_chip->allocated_banks[i].crypto_id; if (algo == HASH_ALGO_SHA1) ima_sha1_idx = i; if (algo == ima_hash_algo) ima_hash_algo_idx = i; } if (ima_sha1_idx < 0) { ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; if (ima_hash_algo == HASH_ALGO_SHA1) ima_hash_algo_idx = ima_sha1_idx; } if (ima_hash_algo_idx < 0) ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots, sizeof(*ima_algo_array), GFP_KERNEL); if (!ima_algo_array) { rc = -ENOMEM; goto out; } for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { algo = ima_tpm_chip->allocated_banks[i].crypto_id; ima_algo_array[i].algo = algo; /* unknown TPM algorithm */ if (algo == HASH_ALGO__LAST) continue; if (algo == ima_hash_algo) { ima_algo_array[i].tfm = ima_shash_tfm; continue; } ima_algo_array[i].tfm = ima_alloc_tfm(algo); if (IS_ERR(ima_algo_array[i].tfm)) { if (algo == HASH_ALGO_SHA1) { rc = PTR_ERR(ima_algo_array[i].tfm); ima_algo_array[i].tfm = NULL; goto out_array; } ima_algo_array[i].tfm = NULL; } } if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) { if (ima_hash_algo == HASH_ALGO_SHA1) { ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm; } else { ima_algo_array[ima_sha1_idx].tfm = ima_alloc_tfm(HASH_ALGO_SHA1); if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) { rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm); goto out_array; } } ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1; } if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) && ima_hash_algo_idx != ima_sha1_idx) { ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm; ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo; } return 0; out_array: for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { if (!ima_algo_array[i].tfm || ima_algo_array[i].tfm == ima_shash_tfm) continue; crypto_free_shash(ima_algo_array[i].tfm); } kfree(ima_algo_array); out: crypto_free_shash(ima_shash_tfm); return rc; } static void ima_free_tfm(struct crypto_shash *tfm) { int i; if (tfm == ima_shash_tfm) return; for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) if (ima_algo_array[i].tfm == tfm) return; crypto_free_shash(tfm); } /** * ima_alloc_pages() - Allocate contiguous pages. * @max_size: Maximum amount of memory to allocate. * @allocated_size: Returned size of actual allocation. * @last_warn: Should the min_size allocation warn or not. * * Tries to do opportunistic allocation for memory first trying to allocate * max_size amount of memory and then splitting that until zero order is * reached. Allocation is tried without generating allocation warnings unless * last_warn is set. Last_warn set affects only last allocation of zero order. * * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) * * Return pointer to allocated memory, or NULL on failure. */ static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, int last_warn) { void *ptr; int order = ima_maxorder; gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; if (order) order = min(get_order(max_size), order); for (; order; order--) { ptr = (void *)__get_free_pages(gfp_mask, order); if (ptr) { *allocated_size = PAGE_SIZE << order; return ptr; } } /* order is zero - one page */ gfp_mask = GFP_KERNEL; if (!last_warn) gfp_mask |= __GFP_NOWARN; ptr = (void *)__get_free_pages(gfp_mask, 0); if (ptr) { *allocated_size = PAGE_SIZE; return ptr; } *allocated_size = 0; return NULL; } /** * ima_free_pages() - Free pages allocated by ima_alloc_pages(). * @ptr: Pointer to allocated pages. * @size: Size of allocated buffer. */ static void ima_free_pages(void *ptr, size_t size) { if (!ptr) return; free_pages((unsigned long)ptr, get_order(size)); } static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) { struct crypto_ahash *tfm = ima_ahash_tfm; int rc; if (algo < 0 || algo >= HASH_ALGO__LAST) algo = ima_hash_algo; if (algo != ima_hash_algo || !tfm) { tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0); if (!IS_ERR(tfm)) { if (algo == ima_hash_algo) ima_ahash_tfm = tfm; } else { rc = PTR_ERR(tfm); pr_err("Can not allocate %s (reason: %d)\n", hash_algo_name[algo], rc); } } return tfm; } static void ima_free_atfm(struct crypto_ahash *tfm) { if (tfm != ima_ahash_tfm) crypto_free_ahash(tfm); } static inline int ahash_wait(int err, struct crypto_wait *wait) { err = crypto_wait_req(err, wait); if (err) pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); return err; } static int ima_calc_file_hash_atfm(struct file *file, struct ima_digest_data *hash, struct crypto_ahash *tfm) { loff_t i_size, offset; char *rbuf[2] = { NULL, }; int rc, rbuf_len, active = 0, ahash_rc = 0; struct ahash_request *req; struct scatterlist sg[1]; struct crypto_wait wait; size_t rbuf_size[2]; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out1; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out2; /* * Try to allocate maximum size of memory. * Fail if even a single page cannot be allocated. */ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); if (!rbuf[0]) { rc = -ENOMEM; goto out1; } /* Only allocate one buffer if that is enough. */ if (i_size > rbuf_size[0]) { /* * Try to allocate secondary buffer. If that fails fallback to * using single buffering. Use previous memory allocation size * as baseline for possible allocation size. */ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], &rbuf_size[1], 0); } for (offset = 0; offset < i_size; offset += rbuf_len) { if (!rbuf[1] && offset) { /* Not using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } /* read buffer */ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); rc = integrity_kernel_read(file, offset, rbuf[active], rbuf_len); if (rc != rbuf_len) { if (rc >= 0) rc = -EINVAL; /* * Forward current rc, do not overwrite with return value * from ahash_wait() */ ahash_wait(ahash_rc, &wait); goto out3; } if (rbuf[1] && offset) { /* Using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } sg_init_one(&sg[0], rbuf[active], rbuf_len); ahash_request_set_crypt(req, sg, NULL, rbuf_len); ahash_rc = crypto_ahash_update(req); if (rbuf[1]) active = !active; /* swap buffers, if we use two */ } /* wait for the last update request to complete */ rc = ahash_wait(ahash_rc, &wait); out3: ima_free_pages(rbuf[0], rbuf_size[0]); ima_free_pages(rbuf[1], rbuf_size[1]); out2: if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &wait); } out1: ahash_request_free(req); return rc; } static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) { struct crypto_ahash *tfm; int rc; tfm = ima_alloc_atfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = ima_calc_file_hash_atfm(file, hash, tfm); ima_free_atfm(tfm); return rc; } static int ima_calc_file_hash_tfm(struct file *file, struct ima_digest_data *hash, struct crypto_shash *tfm) { loff_t i_size, offset = 0; char *rbuf; int rc; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out; rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!rbuf) return -ENOMEM; while (offset < i_size) { int rbuf_len; rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; } if (rbuf_len == 0) { /* unexpected EOF */ rc = -EINVAL; break; } offset += rbuf_len; rc = crypto_shash_update(shash, rbuf, rbuf_len); if (rc) break; } kfree(rbuf); out: if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; } static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) { struct crypto_shash *tfm; int rc; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = ima_calc_file_hash_tfm(file, hash, tfm); ima_free_tfm(tfm); return rc; } /* * ima_calc_file_hash - calculate file hash * * Asynchronous hash (ahash) allows using HW acceleration for calculating * a hash. ahash performance varies for different data sizes on different * crypto accelerators. shash performance might be better for smaller files. * The 'ima.ahash_minsize' module parameter allows specifying the best * minimum file size for using ahash on the system. * * If the ima.ahash_minsize parameter is not specified, this function uses * shash for the hash calculation. If ahash fails, it falls back to using * shash. */ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) { loff_t i_size; int rc; struct file *f = file; bool new_file_instance = false; /* * For consistency, fail file's opened with the O_DIRECT flag on * filesystems mounted with/without DAX option. */ if (file->f_flags & O_DIRECT) { hash->length = hash_digest_size[ima_hash_algo]; hash->algo = ima_hash_algo; return -EINVAL; } /* Open a new file instance in O_RDONLY if we cannot read */ if (!(file->f_mode & FMODE_READ)) { int flags = file->f_flags & ~(O_WRONLY | O_APPEND | O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); flags |= O_RDONLY; f = dentry_open(&file->f_path, flags, file->f_cred); if (IS_ERR(f)) return PTR_ERR(f); new_file_instance = true; } i_size = i_size_read(file_inode(f)); if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { rc = ima_calc_file_ahash(f, hash); if (!rc) goto out; } rc = ima_calc_file_shash(f, hash); out: if (new_file_instance) fput(f); return rc; } /* * Calculate the hash of template data */ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, struct ima_template_entry *entry, int tfm_idx) { SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm); struct ima_template_desc *td = entry->template_desc; int num_fields = entry->template_desc->num_fields; int rc, i; shash->tfm = ima_algo_array[tfm_idx].tfm; rc = crypto_shash_init(shash); if (rc != 0) return rc; for (i = 0; i < num_fields; i++) { u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; u8 *data_to_hash = field_data[i].data; u32 datalen = field_data[i].len; u32 datalen_to_hash = !ima_canonical_fmt ? datalen : (__force u32)cpu_to_le32(datalen); if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { rc = crypto_shash_update(shash, (const u8 *) &datalen_to_hash, sizeof(datalen_to_hash)); if (rc) break; } else if (strcmp(td->fields[i]->field_id, "n") == 0) { memcpy(buffer, data_to_hash, datalen); data_to_hash = buffer; datalen = IMA_EVENT_NAME_LEN_MAX + 1; } rc = crypto_shash_update(shash, data_to_hash, datalen); if (rc) break; } if (!rc) rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest); return rc; } int ima_calc_field_array_hash(struct ima_field_data *field_data, struct ima_template_entry *entry) { u16 alg_id; int rc, i; rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx); if (rc) return rc; entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1; for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { if (i == ima_sha1_idx) continue; if (i < NR_BANKS(ima_tpm_chip)) { alg_id = ima_tpm_chip->allocated_banks[i].alg_id; entry->digests[i].alg_id = alg_id; } /* for unmapped TPM algorithms digest is still a padded SHA1 */ if (!ima_algo_array[i].tfm) { memcpy(entry->digests[i].digest, entry->digests[ima_sha1_idx].digest, TPM_DIGEST_SIZE); continue; } rc = ima_calc_field_array_hash_tfm(field_data, entry, i); if (rc) return rc; } return rc; } static int calc_buffer_ahash_atfm(const void *buf, loff_t len, struct ima_digest_data *hash, struct crypto_ahash *tfm) { struct ahash_request *req; struct scatterlist sg; struct crypto_wait wait; int rc, ahash_rc = 0; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out; sg_init_one(&sg, buf, len); ahash_request_set_crypt(req, &sg, NULL, len); ahash_rc = crypto_ahash_update(req); /* wait for the update request to complete */ rc = ahash_wait(ahash_rc, &wait); if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &wait); } out: ahash_request_free(req); return rc; } static int calc_buffer_ahash(const void *buf, loff_t len, struct ima_digest_data *hash) { struct crypto_ahash *tfm; int rc; tfm = ima_alloc_atfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); ima_free_atfm(tfm); return rc; } static int calc_buffer_shash_tfm(const void *buf, loff_t size, struct ima_digest_data *hash, struct crypto_shash *tfm) { SHASH_DESC_ON_STACK(shash, tfm); unsigned int len; int rc; shash->tfm = tfm; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; while (size) { len = size < PAGE_SIZE ? size : PAGE_SIZE; rc = crypto_shash_update(shash, buf, len); if (rc) break; buf += len; size -= len; } if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; } static int calc_buffer_shash(const void *buf, loff_t len, struct ima_digest_data *hash) { struct crypto_shash *tfm; int rc; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = calc_buffer_shash_tfm(buf, len, hash, tfm); ima_free_tfm(tfm); return rc; } int ima_calc_buffer_hash(const void *buf, loff_t len, struct ima_digest_data *hash) { int rc; if (ima_ahash_minsize && len >= ima_ahash_minsize) { rc = calc_buffer_ahash(buf, len, hash); if (!rc) return 0; } return calc_buffer_shash(buf, len, hash); } static void ima_pcrread(u32 idx, struct tpm_digest *d) { if (!ima_tpm_chip) return; if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0) pr_err("Error Communicating to TPM chip\n"); } /* * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, * allowing firmware to configure and enable different banks. * * Knowing which TPM bank is read to calculate the boot_aggregate digest * needs to be conveyed to a verifier. For this reason, use the same * hash algorithm for reading the TPM PCRs as for calculating the boot * aggregate digest as stored in the measurement list. */ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, struct crypto_shash *tfm) { struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; int rc; u32 i; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", d.alg_id); rc = crypto_shash_init(shash); if (rc != 0) return rc; /* cumulative digest over TPM registers 0-7 */ for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, &d); /* now accumulate with current aggregate */ rc = crypto_shash_update(shash, d.digest, crypto_shash_digestsize(tfm)); if (rc != 0) return rc; } /* * Extend cumulative digest over TPM registers 8-9, which contain * measurement for the kernel command line (reg. 8) and image (reg. 9) * in a typical PCR allocation. Registers 8-9 are only included in * non-SHA1 boot_aggregate digests to avoid ambiguity. */ if (alg_id != TPM_ALG_SHA1) { for (i = TPM_PCR8; i < TPM_PCR10; i++) { ima_pcrread(i, &d); rc = crypto_shash_update(shash, d.digest, crypto_shash_digestsize(tfm)); } } if (!rc) crypto_shash_final(shash, digest); return rc; } int ima_calc_boot_aggregate(struct ima_digest_data *hash) { struct crypto_shash *tfm; u16 crypto_id, alg_id; int rc, i, bank_idx = -1; for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; if (crypto_id == hash->algo) { bank_idx = i; break; } if (crypto_id == HASH_ALGO_SHA256) bank_idx = i; if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) bank_idx = i; } if (bank_idx == -1) { pr_err("No suitable TPM algorithm for boot aggregate\n"); return 0; } hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); hash->length = crypto_shash_digestsize(tfm); alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); ima_free_tfm(tfm); return rc; }
29 12 29 28 13 10 29 15 6 6 11 7 19 19 19 19 15 15 6 4 15 15 15 15 15 2 5 11 9 9 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 // SPDX-License-Identifier: GPL-2.0-only /* * dma-fence-util: misc functions for dma_fence objects * * Copyright (C) 2022 Advanced Micro Devices, Inc. * Authors: * Christian König <christian.koenig@amd.com> */ #include <linux/dma-fence.h> #include <linux/dma-fence-array.h> #include <linux/dma-fence-chain.h> #include <linux/dma-fence-unwrap.h> #include <linux/slab.h> #include <linux/sort.h> /* Internal helper to start new array iteration, don't use directly */ static struct dma_fence * __dma_fence_unwrap_array(struct dma_fence_unwrap *cursor) { cursor->array = dma_fence_chain_contained(cursor->chain); cursor->index = 0; return dma_fence_array_first(cursor->array); } /** * dma_fence_unwrap_first - return the first fence from fence containers * @head: the entrypoint into the containers * @cursor: current position inside the containers * * Unwraps potential dma_fence_chain/dma_fence_array containers and return the * first fence. */ struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor) { cursor->chain = dma_fence_get(head); return __dma_fence_unwrap_array(cursor); } EXPORT_SYMBOL_GPL(dma_fence_unwrap_first); /** * dma_fence_unwrap_next - return the next fence from a fence containers * @cursor: current position inside the containers * * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return * the next fence from them. */ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) { struct dma_fence *tmp; ++cursor->index; tmp = dma_fence_array_next(cursor->array, cursor->index); if (tmp) return tmp; cursor->chain = dma_fence_chain_walk(cursor->chain); return __dma_fence_unwrap_array(cursor); } EXPORT_SYMBOL_GPL(dma_fence_unwrap_next); static int fence_cmp(const void *_a, const void *_b) { struct dma_fence *a = *(struct dma_fence **)_a; struct dma_fence *b = *(struct dma_fence **)_b; if (a->context < b->context) return -1; else if (a->context > b->context) return 1; if (dma_fence_is_later(b, a)) return 1; else if (dma_fence_is_later(a, b)) return -1; return 0; } /* Implementation for the dma_fence_merge() marco, don't use directly */ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, struct dma_fence **fences, struct dma_fence_unwrap *iter) { struct dma_fence *tmp, *unsignaled = NULL, **array; struct dma_fence_array *result; ktime_t timestamp; int i, j, count; count = 0; timestamp = ns_to_ktime(0); for (i = 0; i < num_fences; ++i) { dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { dma_fence_put(unsignaled); unsignaled = dma_fence_get(tmp); ++count; } else { ktime_t t = dma_fence_timestamp(tmp); if (ktime_after(t, timestamp)) timestamp = t; } } } /* * If we couldn't find a pending fence just return a private signaled * fence with the timestamp of the last signaled one. * * Or if there was a single unsignaled fence left we can return it * directly and early since that is a major path on many workloads. */ if (count == 0) return dma_fence_allocate_private_stub(timestamp); else if (count == 1) return unsignaled; dma_fence_put(unsignaled); array = kmalloc_array(count, sizeof(*array), GFP_KERNEL); if (!array) return NULL; count = 0; for (i = 0; i < num_fences; ++i) { dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { array[count++] = dma_fence_get(tmp); } else { ktime_t t = dma_fence_timestamp(tmp); if (ktime_after(t, timestamp)) timestamp = t; } } } if (count == 0 || count == 1) goto return_fastpath; sort(array, count, sizeof(*array), fence_cmp, NULL); /* * Only keep the most recent fence for each context. */ j = 0; for (i = 1; i < count; i++) { if (array[i]->context == array[j]->context) dma_fence_put(array[i]); else array[++j] = array[i]; } count = ++j; if (count > 1) { result = dma_fence_array_create(count, array, dma_fence_context_alloc(1), 1, false); if (!result) { for (i = 0; i < count; i++) dma_fence_put(array[i]); tmp = NULL; goto return_tmp; } return &result->base; } return_fastpath: if (count == 0) tmp = dma_fence_allocate_private_stub(timestamp); else tmp = array[0]; return_tmp: kfree(array); return tmp; } EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);
4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2017 Netronome Systems, Inc. * Copyright (C) 2019 Mellanox Technologies. All rights reserved */ #include <linux/completion.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/refcount.h> #include <linux/slab.h> #include <linux/sysfs.h> #include "netdevsim.h" static DEFINE_IDA(nsim_bus_dev_ids); static LIST_HEAD(nsim_bus_dev_list); static DEFINE_MUTEX(nsim_bus_dev_list_lock); static bool nsim_bus_enable; static refcount_t nsim_bus_devs; /* Including the bus itself. */ static DECLARE_COMPLETION(nsim_bus_devs_released); static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev) { return container_of(dev, struct nsim_bus_dev, dev); } static ssize_t nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); unsigned int num_vfs; int ret; ret = kstrtouint(buf, 0, &num_vfs); if (ret) return ret; device_lock(dev); ret = -ENOENT; if (dev_get_drvdata(dev)) ret = nsim_drv_configure_vfs(nsim_bus_dev, num_vfs); device_unlock(dev); return ret ? ret : count; } static ssize_t nsim_bus_dev_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs); } static struct device_attribute nsim_bus_dev_numvfs_attr = __ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show, nsim_bus_dev_numvfs_store); static ssize_t new_port_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); unsigned int port_index; int ret; /* Prevent to use nsim_bus_dev before initialization. */ if (!smp_load_acquire(&nsim_bus_dev->init)) return -EBUSY; ret = kstrtouint(buf, 0, &port_index); if (ret) return ret; ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); return ret ? ret : count; } static struct device_attribute nsim_bus_dev_new_port_attr = __ATTR_WO(new_port); static ssize_t del_port_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); unsigned int port_index; int ret; /* Prevent to use nsim_bus_dev before initialization. */ if (!smp_load_acquire(&nsim_bus_dev->init)) return -EBUSY; ret = kstrtouint(buf, 0, &port_index); if (ret) return ret; ret = nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); return ret ? ret : count; } static struct device_attribute nsim_bus_dev_del_port_attr = __ATTR_WO(del_port); static struct attribute *nsim_bus_dev_attrs[] = { &nsim_bus_dev_numvfs_attr.attr, &nsim_bus_dev_new_port_attr.attr, &nsim_bus_dev_del_port_attr.attr, NULL, }; static const struct attribute_group nsim_bus_dev_attr_group = { .attrs = nsim_bus_dev_attrs, }; static const struct attribute_group *nsim_bus_dev_attr_groups[] = { &nsim_bus_dev_attr_group, NULL, }; static void nsim_bus_dev_release(struct device *dev) { struct nsim_bus_dev *nsim_bus_dev; nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev); kfree(nsim_bus_dev); if (refcount_dec_and_test(&nsim_bus_devs)) complete(&nsim_bus_devs_released); } static const struct device_type nsim_bus_dev_type = { .groups = nsim_bus_dev_attr_groups, .release = nsim_bus_dev_release, }; static struct nsim_bus_dev * nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues); static ssize_t new_device_store(const struct bus_type *bus, const char *buf, size_t count) { unsigned int id, port_count, num_queues; struct nsim_bus_dev *nsim_bus_dev; int err; err = sscanf(buf, "%u %u %u", &id, &port_count, &num_queues); switch (err) { case 1: port_count = 1; fallthrough; case 2: num_queues = 1; fallthrough; case 3: if (id > INT_MAX) { pr_err("Value of \"id\" is too big.\n"); return -EINVAL; } break; default: pr_err("Format for adding new device is \"id port_count num_queues\" (uint uint unit).\n"); return -EINVAL; } mutex_lock(&nsim_bus_dev_list_lock); /* Prevent to use resource before initialization. */ if (!smp_load_acquire(&nsim_bus_enable)) { err = -EBUSY; goto err; } nsim_bus_dev = nsim_bus_dev_new(id, port_count, num_queues); if (IS_ERR(nsim_bus_dev)) { err = PTR_ERR(nsim_bus_dev); goto err; } refcount_inc(&nsim_bus_devs); /* Allow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, true); list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list); mutex_unlock(&nsim_bus_dev_list_lock); return count; err: mutex_unlock(&nsim_bus_dev_list_lock); return err; } static BUS_ATTR_WO(new_device); static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev); static ssize_t del_device_store(const struct bus_type *bus, const char *buf, size_t count) { struct nsim_bus_dev *nsim_bus_dev, *tmp; unsigned int id; int err; err = sscanf(buf, "%u", &id); switch (err) { case 1: if (id > INT_MAX) { pr_err("Value of \"id\" is too big.\n"); return -EINVAL; } break; default: pr_err("Format for deleting device is \"id\" (uint).\n"); return -EINVAL; } err = -ENOENT; mutex_lock(&nsim_bus_dev_list_lock); /* Prevent to use resource before initialization. */ if (!smp_load_acquire(&nsim_bus_enable)) { mutex_unlock(&nsim_bus_dev_list_lock); return -EBUSY; } list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) { if (nsim_bus_dev->dev.id != id) continue; list_del(&nsim_bus_dev->list); nsim_bus_dev_del(nsim_bus_dev); err = 0; break; } mutex_unlock(&nsim_bus_dev_list_lock); return !err ? count : err; } static BUS_ATTR_WO(del_device); static ssize_t link_device_store(const struct bus_type *bus, const char *buf, size_t count) { struct netdevsim *nsim_a, *nsim_b, *peer; struct net_device *dev_a, *dev_b; unsigned int ifidx_a, ifidx_b; int netnsfd_a, netnsfd_b, err; struct net *ns_a, *ns_b; err = sscanf(buf, "%d:%u %d:%u", &netnsfd_a, &ifidx_a, &netnsfd_b, &ifidx_b); if (err != 4) { pr_err("Format for linking two devices is \"netnsfd_a:ifidx_a netnsfd_b:ifidx_b\" (int uint int uint).\n"); return -EINVAL; } ns_a = get_net_ns_by_fd(netnsfd_a); if (IS_ERR(ns_a)) { pr_err("Could not find netns with fd: %d\n", netnsfd_a); return -EINVAL; } ns_b = get_net_ns_by_fd(netnsfd_b); if (IS_ERR(ns_b)) { pr_err("Could not find netns with fd: %d\n", netnsfd_b); put_net(ns_a); return -EINVAL; } err = -EINVAL; rtnl_lock(); dev_a = __dev_get_by_index(ns_a, ifidx_a); if (!dev_a) { pr_err("Could not find device with ifindex %u in netnsfd %d\n", ifidx_a, netnsfd_a); goto out_err; } if (!netdev_is_nsim(dev_a)) { pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n", ifidx_a, netnsfd_a); goto out_err; } dev_b = __dev_get_by_index(ns_b, ifidx_b); if (!dev_b) { pr_err("Could not find device with ifindex %u in netnsfd %d\n", ifidx_b, netnsfd_b); goto out_err; } if (!netdev_is_nsim(dev_b)) { pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n", ifidx_b, netnsfd_b); goto out_err; } if (dev_a == dev_b) { pr_err("Cannot link a netdevsim to itself\n"); goto out_err; } err = -EBUSY; nsim_a = netdev_priv(dev_a); peer = rtnl_dereference(nsim_a->peer); if (peer) { pr_err("Netdevsim %d:%u is already linked\n", netnsfd_a, ifidx_a); goto out_err; } nsim_b = netdev_priv(dev_b); peer = rtnl_dereference(nsim_b->peer); if (peer) { pr_err("Netdevsim %d:%u is already linked\n", netnsfd_b, ifidx_b); goto out_err; } err = 0; rcu_assign_pointer(nsim_a->peer, nsim_b); rcu_assign_pointer(nsim_b->peer, nsim_a); out_err: put_net(ns_b); put_net(ns_a); rtnl_unlock(); return !err ? count : err; } static BUS_ATTR_WO(link_device); static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf, size_t count) { struct netdevsim *nsim, *peer; struct net_device *dev; unsigned int ifidx; int netnsfd, err; struct net *ns; err = sscanf(buf, "%u:%u", &netnsfd, &ifidx); if (err != 2) { pr_err("Format for unlinking a device is \"netnsfd:ifidx\" (int uint).\n"); return -EINVAL; } ns = get_net_ns_by_fd(netnsfd); if (IS_ERR(ns)) { pr_err("Could not find netns with fd: %d\n", netnsfd); return -EINVAL; } err = -EINVAL; rtnl_lock(); dev = __dev_get_by_index(ns, ifidx); if (!dev) { pr_err("Could not find device with ifindex %u in netnsfd %d\n", ifidx, netnsfd); goto out_put_netns; } if (!netdev_is_nsim(dev)) { pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n", ifidx, netnsfd); goto out_put_netns; } nsim = netdev_priv(dev); peer = rtnl_dereference(nsim->peer); if (!peer) goto out_put_netns; err = 0; RCU_INIT_POINTER(nsim->peer, NULL); RCU_INIT_POINTER(peer->peer, NULL); out_put_netns: put_net(ns); rtnl_unlock(); return !err ? count : err; } static BUS_ATTR_WO(unlink_device); static struct attribute *nsim_bus_attrs[] = { &bus_attr_new_device.attr, &bus_attr_del_device.attr, &bus_attr_link_device.attr, &bus_attr_unlink_device.attr, NULL }; ATTRIBUTE_GROUPS(nsim_bus); static int nsim_bus_probe(struct device *dev) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); return nsim_drv_probe(nsim_bus_dev); } static void nsim_bus_remove(struct device *dev) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); nsim_drv_remove(nsim_bus_dev); } static int nsim_num_vf(struct device *dev) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); return nsim_bus_dev->num_vfs; } static const struct bus_type nsim_bus = { .name = DRV_NAME, .dev_name = DRV_NAME, .bus_groups = nsim_bus_groups, .probe = nsim_bus_probe, .remove = nsim_bus_remove, .num_vf = nsim_num_vf, }; #define NSIM_BUS_DEV_MAX_VFS 4 static struct nsim_bus_dev * nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues) { struct nsim_bus_dev *nsim_bus_dev; int err; nsim_bus_dev = kzalloc(sizeof(*nsim_bus_dev), GFP_KERNEL); if (!nsim_bus_dev) return ERR_PTR(-ENOMEM); err = ida_alloc_range(&nsim_bus_dev_ids, id, id, GFP_KERNEL); if (err < 0) goto err_nsim_bus_dev_free; nsim_bus_dev->dev.id = err; nsim_bus_dev->dev.bus = &nsim_bus; nsim_bus_dev->dev.type = &nsim_bus_dev_type; nsim_bus_dev->port_count = port_count; nsim_bus_dev->num_queues = num_queues; nsim_bus_dev->initial_net = current->nsproxy->net_ns; nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS; /* Disallow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, false); err = device_register(&nsim_bus_dev->dev); if (err) goto err_nsim_bus_dev_id_free; return nsim_bus_dev; err_nsim_bus_dev_id_free: ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); put_device(&nsim_bus_dev->dev); nsim_bus_dev = NULL; err_nsim_bus_dev_free: kfree(nsim_bus_dev); return ERR_PTR(err); } static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev) { /* Disallow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, false); ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); device_unregister(&nsim_bus_dev->dev); } static struct device_driver nsim_driver = { .name = DRV_NAME, .bus = &nsim_bus, .owner = THIS_MODULE, }; int nsim_bus_init(void) { int err; err = bus_register(&nsim_bus); if (err) return err; err = driver_register(&nsim_driver); if (err) goto err_bus_unregister; refcount_set(&nsim_bus_devs, 1); /* Allow using resources */ smp_store_release(&nsim_bus_enable, true); return 0; err_bus_unregister: bus_unregister(&nsim_bus); return err; } void nsim_bus_exit(void) { struct nsim_bus_dev *nsim_bus_dev, *tmp; /* Disallow using resources */ smp_store_release(&nsim_bus_enable, false); if (refcount_dec_and_test(&nsim_bus_devs)) complete(&nsim_bus_devs_released); mutex_lock(&nsim_bus_dev_list_lock); list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) { list_del(&nsim_bus_dev->list); nsim_bus_dev_del(nsim_bus_dev); } mutex_unlock(&nsim_bus_dev_list_lock); wait_for_completion(&nsim_bus_devs_released); driver_unregister(&nsim_driver); bus_unregister(&nsim_bus); }
2803 4166 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef LINUX_RESUME_USER_MODE_H #define LINUX_RESUME_USER_MODE_H #include <linux/sched.h> #include <linux/task_work.h> #include <linux/memcontrol.h> #include <linux/rseq.h> #include <linux/blk-cgroup.h> /** * set_notify_resume - cause resume_user_mode_work() to be called * @task: task that will call resume_user_mode_work() * * Calling this arranges that @task will call resume_user_mode_work() * before returning to user mode. If it's already running in user mode, * it will enter the kernel and call resume_user_mode_work() soon. * If it's blocked, it will not be woken. */ static inline void set_notify_resume(struct task_struct *task) { if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) kick_process(task); } /** * resume_user_mode_work - Perform work before returning to user mode * @regs: user-mode registers of @current task * * This is called when %TIF_NOTIFY_RESUME has been set. Now we are * about to return to user mode, and the user state in @regs can be * inspected or adjusted. The caller in arch code has cleared * %TIF_NOTIFY_RESUME before the call. If the flag gets set again * asynchronously, this will be called again before we return to * user mode. * * Called without locks. */ static inline void resume_user_mode_work(struct pt_regs *regs) { clear_thread_flag(TIF_NOTIFY_RESUME); /* * This barrier pairs with task_work_add()->set_notify_resume() after * hlist_add_head(task->task_works); */ smp_mb__after_atomic(); if (unlikely(task_work_pending(current))) task_work_run(); #ifdef CONFIG_KEYS_REQUEST_CACHE if (unlikely(current->cached_requested_key)) { key_put(current->cached_requested_key); current->cached_requested_key = NULL; } #endif mem_cgroup_handle_over_high(GFP_KERNEL); blkcg_maybe_throttle_current(); rseq_handle_notify_resume(NULL, regs); } #endif /* LINUX_RESUME_USER_MODE_H */
3 1 2 1 1 1 1 1 3 3 1 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 // SPDX-License-Identifier: GPL-2.0-or-later /* * SQ905 subdriver * * Copyright (C) 2008, 2009 Adam Baker and Theodore Kilgore */ /* * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by * Marcell Lengyel and further developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for * that driver and for libgphoto2 but shares no code with them. * * This driver has used as a base the finepix driver and other gspca * based drivers and may still contain code fragments taken from those * drivers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905_CMD_TIMEOUT 500 #define SQ905_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 64 /* The known modes, or registers. These go in the "value" slot. */ /* 00 is "none" obviously */ #define SQ905_BULK_READ 0x03 /* precedes any bulk read */ #define SQ905_COMMAND 0x06 /* precedes the command codes below */ #define SQ905_PING 0x07 /* when reading an "idling" command */ #define SQ905_READ_DONE 0xc0 /* ack bulk read completed */ /* Any non-zero value in the bottom 2 bits of the 2nd byte of * the ID appears to indicate the camera can do 640*480. If the * LSB of that byte is set the image is just upside down, otherwise * it is rotated 180 degrees. */ #define SQ905_HIRES_MASK 0x00000300 #define SQ905_ORIENTATION_MASK 0x00000100 /* Some command codes. These go in the "index" slot. */ #define SQ905_ID 0xf0 /* asks for model string */ #define SQ905_CONFIG 0x20 /* gets photo alloc. table, not used here */ #define SQ905_DATA 0x30 /* accesses photo data, not used here */ #define SQ905_CLEAR 0xa0 /* clear everything */ #define SQ905_CAPTURE_LOW 0x60 /* Starts capture at 160x120 */ #define SQ905_CAPTURE_MED 0x61 /* Starts capture at 320x240 */ #define SQ905_CAPTURE_HIGH 0x62 /* Starts capture at 640x480 (some cams only) */ /* note that the capture command also controls the output dimensions */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ /* * Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; static struct v4l2_pix_format sq905_mode[] = { { 160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* * Send a command to the camera. */ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_COMMAND, index, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret); return ret; } return 0; } /* * Acknowledge the end of a frame - see warning on sq905_command. */ static int sq905_ack_frame(struct gspca_dev *gspca_dev) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * request and read a block of data - see warning on sq905_command. */ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; int act_len = 0; gspca_dev->usb_buf[0] = '\0'; if (need_lock) mutex_lock(&gspca_dev->usb_lock); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_BULK_READ, size, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (need_lock) mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, SQ905_DATA_TIMEOUT); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read; /* true if we have already read the frame header. */ int packet_type; int frame_sz; int ret; u8 *data; u8 *buffer; buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + FRAME_HEADER_LEN; while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* request some data and then read it until we have * a complete frame. */ bytes_left = frame_sz; header_read = 0; /* Note we do not check for gspca_dev->streaming here, as we must finish reading an entire frame, otherwise the next time we stream we start reading in the middle of a frame. */ while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905_MAX_TRANSFER ? SQ905_MAX_TRANSFER : bytes_left; ret = sq905_read_data(gspca_dev, buffer, data_len, 1); if (ret < 0) goto quit_stream; gspca_dbg(gspca_dev, D_PACK, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; data = buffer; if (!header_read) { packet_type = FIRST_PACKET; /* The first 64 bytes of each frame are * a header full of FF 00 bytes */ data += FRAME_HEADER_LEN; data_len -= FRAME_HEADER_LEN; header_read = 1; } else if (bytes_left == 0) { packet_type = LAST_PACKET; } else { packet_type = INTER_PACKET; } gspca_frame_add(gspca_dev, packet_type, data, data_len); /* If entire frame fits in one packet we still need to add a LAST_PACKET */ if (packet_type == FIRST_PACKET && bytes_left == 0) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } if (gspca_dev->present) { /* acknowledge the frame */ mutex_lock(&gspca_dev->usb_lock); ret = sq905_ack_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905_command(gspca_dev, SQ905_CLEAR); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; INIT_WORK(&dev->work_struct, sq905_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u32 ident; int ret; /* connect to the camera and read * the model ID and process that and put it away. */ ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; ret = sq905_command(gspca_dev, SQ905_ID); if (ret < 0) return ret; ret = sq905_read_data(gspca_dev, gspca_dev->usb_buf, 4, 0); if (ret < 0) return ret; /* usb_buf is allocated with kmalloc so is aligned. * Camera model number is the right way round if we assume this * reverse engineered ID is supposed to be big endian. */ ident = be32_to_cpup((__be32 *)gspca_dev->usb_buf); ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; gspca_dbg(gspca_dev, D_CONF, "SQ905 camera ID %08x detected\n", ident); gspca_dev->cam.cam_mode = sq905_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(sq905_mode); if (!(ident & SQ905_HIRES_MASK)) gspca_dev->cam.nmodes--; if (ident & SQ905_ORIENTATION_MASK) gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP; else gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->curr_mode) { default: /* case 2: */ gspca_dbg(gspca_dev, D_STREAM, "Start streaming at high resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_HIGH); break; case 1: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at medium resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_MED); break; case 0: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at low resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_LOW); } if (ret < 0) { gspca_err(gspca_dev, "Start streaming command failed\n"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); if (!dev->work_thread) return -ENOMEM; queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x9120)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
3 2 34 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_GUE_H #define __NET_GUE_H /* Definitions for the GUE header, standard and private flags, lengths * of optional fields are below. * * Diagram of GUE header: * * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |Ver|C| Hlen | Proto/ctype | Standard flags |P| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * ~ Fields (optional) ~ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Private flags (optional, P bit is set) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * ~ Private fields (optional) ~ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * C bit indicates control message when set, data message when unset. * For a control message, proto/ctype is interpreted as a type of * control message. For data messages, proto/ctype is the IP protocol * of the next header. * * P bit indicates private flags field is present. The private flags * may refer to options placed after this field. */ #include <asm/byteorder.h> #include <linux/types.h> struct guehdr { union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 hlen:5, control:1, version:2; #elif defined (__BIG_ENDIAN_BITFIELD) __u8 version:2, control:1, hlen:5; #else #error "Please fix <asm/byteorder.h>" #endif __u8 proto_ctype; __be16 flags; }; __be32 word; }; }; /* Standard flags in GUE header */ #define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */ #define GUE_LEN_PRIV 4 #define GUE_FLAGS_ALL (GUE_FLAG_PRIV) /* Private flags in the private option extension */ #define GUE_PFLAG_REMCSUM htonl(1U << 31) #define GUE_PLEN_REMCSUM 4 #define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) /* Functions to compute options length corresponding to flags. * If we ever have a lot of flags this can be potentially be * converted to a more optimized algorithm (table lookup * for instance). */ static inline size_t guehdr_flags_len(__be16 flags) { return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0); } static inline size_t guehdr_priv_flags_len(__be32 flags) { return 0; } /* Validate standard and private flags. Returns non-zero (meaning invalid) * if there is an unknown standard or private flags, or the options length for * the flags exceeds the options length specific in hlen of the GUE header. */ static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen) { __be16 flags = guehdr->flags; size_t len; if (flags & ~GUE_FLAGS_ALL) return 1; len = guehdr_flags_len(flags); if (len > optlen) return 1; if (flags & GUE_FLAG_PRIV) { /* Private flags are last four bytes accounted in * guehdr_flags_len */ __be32 pflags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); if (pflags & ~GUE_PFLAGS_ALL) return 1; len += guehdr_priv_flags_len(pflags); if (len > optlen) return 1; } return 0; } #endif
19 19 9 13 34 34 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com> */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "leds.h" DEFINE_LED_TRIGGER(bt_power_led_trigger); struct hci_basic_led_trigger { struct led_trigger led_trigger; struct hci_dev *hdev; }; #define to_hci_basic_led_trigger(arg) container_of(arg, \ struct hci_basic_led_trigger, led_trigger) void hci_leds_update_powered(struct hci_dev *hdev, bool enabled) { if (hdev->power_led) led_trigger_event(hdev->power_led, enabled ? LED_FULL : LED_OFF); if (!enabled) { struct hci_dev *d; read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (test_bit(HCI_UP, &d->flags)) enabled = true; } read_unlock(&hci_dev_list_lock); } led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF); } static int power_activate(struct led_classdev *led_cdev) { struct hci_basic_led_trigger *htrig; bool powered; htrig = to_hci_basic_led_trigger(led_cdev->trigger); powered = test_bit(HCI_UP, &htrig->hdev->flags); led_set_brightness(led_cdev, powered ? LED_FULL : LED_OFF); return 0; } static struct led_trigger *led_allocate_basic(struct hci_dev *hdev, int (*activate)(struct led_classdev *led_cdev), const char *name) { struct hci_basic_led_trigger *htrig; htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL); if (!htrig) return NULL; htrig->hdev = hdev; htrig->led_trigger.activate = activate; htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s-%s", hdev->name, name); if (!htrig->led_trigger.name) goto err_alloc; if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger)) goto err_register; return &htrig->led_trigger; err_register: devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name); err_alloc: devm_kfree(&hdev->dev, htrig); return NULL; } void hci_leds_init(struct hci_dev *hdev) { /* initialize power_led */ hdev->power_led = led_allocate_basic(hdev, power_activate, "power"); } void bt_leds_init(void) { led_trigger_register_simple("bluetooth-power", &bt_power_led_trigger); } void bt_leds_cleanup(void) { led_trigger_unregister_simple(bt_power_led_trigger); }
3 3 2348 47 49 49 49 49 1971 121 1836 1838 728 61 58 71 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INETDEVICE_H #define _LINUX_INETDEVICE_H #ifdef __KERNEL__ #include <linux/bitmap.h> #include <linux/if.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <linux/timer.h> #include <linux/sysctl.h> #include <linux/rtnetlink.h> #include <linux/refcount.h> struct ipv4_devconf { void *sysctl; int data[IPV4_DEVCONF_MAX]; DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); }; #define MC_HASH_SZ_LOG 9 struct in_device { struct net_device *dev; netdevice_tracker dev_tracker; refcount_t refcnt; int dead; struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned long mr_qi; /* Query Interval */ unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct rcu_head rcu_head; }; #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) #define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr)) #define IPV4_DEVCONF_ALL(net, attr) \ IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) #define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr)) static inline int ipv4_devconf_get(const struct in_device *in_dev, int index) { index--; return READ_ONCE(in_dev->cnf.data[index]); } static inline void ipv4_devconf_set(struct in_device *in_dev, int index, int val) { index--; set_bit(index, in_dev->cnf.state); WRITE_ONCE(in_dev->cnf.data[index], val); } static inline void ipv4_devconf_setall(struct in_device *in_dev) { bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX); } #define IN_DEV_CONF_GET(in_dev, attr) \ ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr) #define IN_DEV_CONF_SET(in_dev, attr, val) \ ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val)) #define IN_DEV_ANDCONF(in_dev, attr) \ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \ IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_NET_ORCONF(in_dev, net, attr) \ (IPV4_DEVCONF_ALL_RO(net, attr) || \ IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_ORCONF(in_dev, attr) \ IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr) #define IN_DEV_MAXCONF(in_dev, attr) \ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \ IN_DEV_CONF_GET((in_dev), attr))) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) #define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING) #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ ACCEPT_SOURCE_ROUTE) #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) #define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN) #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ SECURE_REDIRECTS) #define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG) #define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID) #define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ IN_DEV_ORCONF((in_dev), \ PROMOTE_SECONDARIES) #define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET) #define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \ IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET) #define IN_DEV_RX_REDIRECTS(in_dev) \ ((IN_DEV_FORWARD(in_dev) && \ IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \ || (!IN_DEV_FORWARD(in_dev) && \ IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) #define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) #define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \ ARP_EVICT_NOCARRIER) struct in_ifaddr { struct hlist_node addr_lst; struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __u32 ifa_rt_priority; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[IFNAMSIZ]; /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; /* created timestamp */ unsigned long ifa_tstamp; /* updated timestamp */ }; struct in_validator_info { __be32 ivi_addr; struct in_device *ivi_dev; struct netlink_ext_ack *extack; }; int register_inetaddr_notifier(struct notifier_block *nb); int unregister_inetaddr_notifier(struct notifier_block *nb); int register_inetaddr_validator_notifier(struct notifier_block *nb); int unregister_inetaddr_validator_notifier(struct notifier_block *nb); void inet_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv4_devconf *devconf); struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) { return __ip_dev_find(net, addr, true); } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); #ifdef CONFIG_INET int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); #else static inline int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) { return 0; } #endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope); struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } /* * Check if a mask is acceptable. */ static __inline__ bool bad_mask(__be32 mask, __be32 addr) { __u32 hmask; if (addr & (mask = ~mask)) return true; hmask = ntohl(mask); if (hmask & (hmask+1)) return true; return false; } #define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ ifa = rtnl_dereference(ifa->ifa_next)) #define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \ for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \ ifa = rtnl_net_dereference(net, ifa->ifa_next)) #define in_dev_for_each_ifa_rcu(ifa, in_dev) \ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ ifa = rcu_dereference(ifa->ifa_next)) static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->ip_ptr); } static inline struct in_device *in_dev_get(const struct net_device *dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) refcount_inc(&in_dev->refcnt); rcu_read_unlock(); return in_dev; } static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) { return rtnl_dereference(dev->ip_ptr); } static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev) { return rtnl_net_dereference(dev_net(dev), dev->ip_ptr); } /* called with rcu_read_lock or rtnl held */ static inline bool ip_ignore_linkdown(const struct net_device *dev) { struct in_device *in_dev; bool rc = false; in_dev = rcu_dereference_rtnl(dev->ip_ptr); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) rc = true; return rc; } static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev) { struct in_device *in_dev = __in_dev_get_rcu(dev); return in_dev ? in_dev->arp_parms : NULL; } void in_dev_finish_destroy(struct in_device *idev); static inline void in_dev_put(struct in_device *idev) { if (refcount_dec_and_test(&idev->refcnt)) in_dev_finish_destroy(idev); } #define __in_dev_put(idev) refcount_dec(&(idev)->refcnt) #define in_dev_hold(idev) refcount_inc(&(idev)->refcnt) #endif /* __KERNEL__ */ static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) return htonl(~((1U<<(32-logmask))-1)); return 0; } static __inline__ int inet_mask_len(__be32 mask) { __u32 hmask = ntohl(mask); if (!hmask) return 0; return 32 - ffz(~hmask); } #endif /* _LINUX_INETDEVICE_H */
119 28 42 35 27 193 43 43 35 43 8 8 193 193 193 149 44 44 44 7392 7396 7398 7388 1752 1751 7422 7432 1742 131 126 107 30 132 27 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mempool.c * * memory buffer pool support. Such pools are mostly used * for guaranteed, deadlock-free memory allocations during * extreme VM load. * * started by Ingo Molnar, Copyright (C) 2001 * debugging by David Rientjes, Copyright (C) 2015 */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/kasan.h> #include <linux/kmemleak.h> #include <linux/export.h> #include <linux/mempool.h> #include <linux/writeback.h> #include "slab.h" #ifdef CONFIG_SLUB_DEBUG_ON static void poison_error(mempool_t *pool, void *element, size_t size, size_t byte) { const int nr = pool->curr_nr; const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); int i; pr_err("BUG: mempool element poison mismatch\n"); pr_err("Mempool %p size %zu\n", pool, size); pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); for (i = start; i < end; i++) pr_cont("%x ", *(u8 *)(element + i)); pr_cont("%s\n", end < size ? "..." : ""); dump_stack(); } static void __check_element(mempool_t *pool, void *element, size_t size) { u8 *obj = element; size_t i; for (i = 0; i < size; i++) { u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; if (obj[i] != exp) { poison_error(pool, element, size, i); return; } } memset(obj, POISON_INUSE, size); } static void check_element(mempool_t *pool, void *element) { /* Skip checking: KASAN might save its metadata in the element. */ if (kasan_enabled()) return; /* Mempools backed by slab allocator */ if (pool->free == mempool_kfree) { __check_element(pool, element, (size_t)pool->pool_data); } else if (pool->free == mempool_free_slab) { __check_element(pool, element, kmem_cache_size(pool->pool_data)); } else if (pool->free == mempool_free_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_local_page((struct page *)element); __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); kunmap_local(addr); } } static void __poison_element(void *element, size_t size) { u8 *obj = element; memset(obj, POISON_FREE, size - 1); obj[size - 1] = POISON_END; } static void poison_element(mempool_t *pool, void *element) { /* Skip poisoning: KASAN might save its metadata in the element. */ if (kasan_enabled()) return; /* Mempools backed by slab allocator */ if (pool->alloc == mempool_kmalloc) { __poison_element(element, (size_t)pool->pool_data); } else if (pool->alloc == mempool_alloc_slab) { __poison_element(element, kmem_cache_size(pool->pool_data)); } else if (pool->alloc == mempool_alloc_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_local_page((struct page *)element); __poison_element(addr, 1UL << (PAGE_SHIFT + order)); kunmap_local(addr); } } #else /* CONFIG_SLUB_DEBUG_ON */ static inline void check_element(mempool_t *pool, void *element) { } static inline void poison_element(mempool_t *pool, void *element) { } #endif /* CONFIG_SLUB_DEBUG_ON */ static __always_inline bool kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) return kasan_mempool_poison_object(element); else if (pool->alloc == mempool_alloc_pages) return kasan_mempool_poison_pages(element, (unsigned long)pool->pool_data); return true; } static void kasan_unpoison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_kmalloc) kasan_mempool_unpoison_object(element, (size_t)pool->pool_data); else if (pool->alloc == mempool_alloc_slab) kasan_mempool_unpoison_object(element, kmem_cache_size(pool->pool_data)); else if (pool->alloc == mempool_alloc_pages) kasan_mempool_unpoison_pages(element, (unsigned long)pool->pool_data); } static __always_inline void add_element(mempool_t *pool, void *element) { BUG_ON(pool->curr_nr >= pool->min_nr); poison_element(pool, element); if (kasan_poison_element(pool, element)) pool->elements[pool->curr_nr++] = element; } static void *remove_element(mempool_t *pool) { void *element = pool->elements[--pool->curr_nr]; BUG_ON(pool->curr_nr < 0); kasan_unpoison_element(pool, element); check_element(pool, element); return element; } /** * mempool_exit - exit a mempool initialized with mempool_init() * @pool: pointer to the memory pool which was initialized with * mempool_init(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. * * May be called on a zeroed but uninitialized mempool (i.e. allocated with * kzalloc()). */ void mempool_exit(mempool_t *pool) { while (pool->curr_nr) { void *element = remove_element(pool); pool->free(element, pool->pool_data); } kfree(pool->elements); pool->elements = NULL; } EXPORT_SYMBOL(mempool_exit); /** * mempool_destroy - deallocate a memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. */ void mempool_destroy(mempool_t *pool) { if (unlikely(!pool)) return; mempool_exit(pool); kfree(pool); } EXPORT_SYMBOL(mempool_destroy); int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) { spin_lock_init(&pool->lock); pool->min_nr = min_nr; pool->pool_data = pool_data; pool->alloc = alloc_fn; pool->free = free_fn; init_waitqueue_head(&pool->wait); pool->elements = kmalloc_array_node(min_nr, sizeof(void *), gfp_mask, node_id); if (!pool->elements) return -ENOMEM; /* * First pre-allocate the guaranteed number of buffers. */ while (pool->curr_nr < pool->min_nr) { void *element; element = pool->alloc(gfp_mask, pool->pool_data); if (unlikely(!element)) { mempool_exit(pool); return -ENOMEM; } add_element(pool, element); } return 0; } EXPORT_SYMBOL(mempool_init_node); /** * mempool_init - initialize a memory pool * @pool: pointer to the memory pool that should be initialized * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * * Like mempool_create(), but initializes the pool in (i.e. embedded in another * structure). * * Return: %0 on success, negative error code otherwise. */ int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { return mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_init_noprof); /** * mempool_create_node - create a memory pool * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * @gfp_mask: memory allocation flags * @node_id: numa node to allocate on * * this function creates and allocates a guaranteed size, preallocated * memory pool. The pool can be used from the mempool_alloc() and mempool_free() * functions. This function might sleep. Both the alloc_fn() and the free_fn() * functions might sleep - as long as the mempool_alloc() function is not called * from IRQ contexts. * * Return: pointer to the created memory pool object or %NULL on error. */ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) { mempool_t *pool; pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) return NULL; if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, gfp_mask, node_id)) { kfree(pool); return NULL; } return pool; } EXPORT_SYMBOL(mempool_create_node_noprof); /** * mempool_resize - resize an existing memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @new_min_nr: the new minimum number of elements guaranteed to be * allocated for this pool. * * This function shrinks/grows the pool. In the case of growing, * it cannot be guaranteed that the pool will be grown to the new * size immediately, but new mempool_free() calls will refill it. * This function may sleep. * * Note, the caller must guarantee that no mempool_destroy is called * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. * * Return: %0 on success, negative error code otherwise. */ int mempool_resize(mempool_t *pool, int new_min_nr) { void *element; void **new_elements; unsigned long flags; BUG_ON(new_min_nr <= 0); might_sleep(); spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); spin_lock_irqsave(&pool->lock, flags); } pool->min_nr = new_min_nr; goto out_unlock; } spin_unlock_irqrestore(&pool->lock, flags); /* Grow the pool */ new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), GFP_KERNEL); if (!new_elements) return -ENOMEM; spin_lock_irqsave(&pool->lock, flags); if (unlikely(new_min_nr <= pool->min_nr)) { /* Raced, other resize will do our work */ spin_unlock_irqrestore(&pool->lock, flags); kfree(new_elements); goto out; } memcpy(new_elements, pool->elements, pool->curr_nr * sizeof(*new_elements)); kfree(pool->elements); pool->elements = new_elements; pool->min_nr = new_min_nr; while (pool->curr_nr < pool->min_nr) { spin_unlock_irqrestore(&pool->lock, flags); element = pool->alloc(GFP_KERNEL, pool->pool_data); if (!element) goto out; spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); } else { spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); /* Raced */ goto out; } } out_unlock: spin_unlock_irqrestore(&pool->lock, flags); out: return 0; } EXPORT_SYMBOL(mempool_resize); /** * mempool_alloc - allocate an element from a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @gfp_mask: the usual allocation bitmask. * * this function only sleeps if the alloc_fn() function sleeps or * returns NULL. Note that due to preallocation, this function * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) * Note: using __GFP_ZERO is not supported. * * Return: pointer to the allocated element or %NULL on error. */ void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; wait_queue_entry_t wait; gfp_t gfp_temp; VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); might_alloc(gfp_mask); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); repeat_alloc: element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) return element; spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */ smp_wmb(); /* * Update the allocation stack trace as this is more useful * for debugging. */ kmemleak_update_trace(element); return element; } /* * We use gfp mask w/o direct reclaim or IO for the first round. If * alloc failed with that and @pool was empty, retry immediately. */ if (gfp_temp != gfp_mask) { spin_unlock_irqrestore(&pool->lock, flags); gfp_temp = gfp_mask; goto repeat_alloc; } /* We must not sleep if !__GFP_DIRECT_RECLAIM */ if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { spin_unlock_irqrestore(&pool->lock, flags); return NULL; } /* Let's wait for someone else to return an element to @pool */ init_wait(&wait); prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(&pool->lock, flags); /* * FIXME: this should be io_schedule(). The timeout is there as a * workaround for some DM problems in 2.6.18. */ io_schedule_timeout(5*HZ); finish_wait(&pool->wait, &wait); goto repeat_alloc; } EXPORT_SYMBOL(mempool_alloc_noprof); /** * mempool_alloc_preallocated - allocate an element from preallocated elements * belonging to a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * This function is similar to mempool_alloc, but it only attempts allocating * an element from the preallocated elements. It does not sleep and immediately * returns if no preallocated elements are available. * * Return: pointer to the allocated element or %NULL if no elements are * available. */ void *mempool_alloc_preallocated(mempool_t *pool) { void *element; unsigned long flags; spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */ smp_wmb(); /* * Update the allocation stack trace as this is more useful * for debugging. */ kmemleak_update_trace(element); return element; } spin_unlock_irqrestore(&pool->lock, flags); return NULL; } EXPORT_SYMBOL(mempool_alloc_preallocated); /** * mempool_free - return an element to the pool. * @element: pool element pointer. * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * this function only sleeps if the free_fn() function sleeps. */ void mempool_free(void *element, mempool_t *pool) { unsigned long flags; if (unlikely(element == NULL)) return; /* * Paired with the wmb in mempool_alloc(). The preceding read is * for @element and the following @pool->curr_nr. This ensures * that the visible value of @pool->curr_nr is from after the * allocation of @element. This is necessary for fringe cases * where @element was passed to this task without going through * barriers. * * For example, assume @p is %NULL at the beginning and one task * performs "p = mempool_alloc(...);" while another task is doing * "while (!p) cpu_relax(); mempool_free(p, ...);". This function * may end up using curr_nr value which is from before allocation * of @p without the following rmb. */ smp_rmb(); /* * For correctness, we need a test which is guaranteed to trigger * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr * without locking achieves that and refilling as soon as possible * is desirable. * * Because curr_nr visible here is always a value after the * allocation of @element, any task which decremented curr_nr below * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets * incremented to min_nr afterwards. If curr_nr gets incremented * to min_nr after the allocation of @element, the elements * allocated after that are subject to the same guarantee. * * Waiters happen iff curr_nr is 0 and the above guarantee also * ensures that there will be frees which return elements to the * pool waking up the waiters. */ if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr < pool->min_nr)) { add_element(pool, element); spin_unlock_irqrestore(&pool->lock, flags); wake_up(&pool->wait); return; } spin_unlock_irqrestore(&pool->lock, flags); } pool->free(element, pool->pool_data); } EXPORT_SYMBOL(mempool_free); /* * A commonly used alloc and free fn. */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { struct kmem_cache *mem = pool_data; VM_BUG_ON(mem->ctor); return kmem_cache_alloc_noprof(mem, gfp_mask); } EXPORT_SYMBOL(mempool_alloc_slab); void mempool_free_slab(void *element, void *pool_data) { struct kmem_cache *mem = pool_data; kmem_cache_free(mem, element); } EXPORT_SYMBOL(mempool_free_slab); /* * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory * specified by pool_data */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t)pool_data; return kmalloc_noprof(size, gfp_mask); } EXPORT_SYMBOL(mempool_kmalloc); void mempool_kfree(void *element, void *pool_data) { kfree(element); } EXPORT_SYMBOL(mempool_kfree); void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t)pool_data; return kvmalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kvmalloc); void mempool_kvfree(void *element, void *pool_data) { kvfree(element); } EXPORT_SYMBOL(mempool_kvfree); /* * A simple mempool-backed page allocator that allocates pages * of the order specified by pool_data. */ void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) { int order = (int)(long)pool_data; return alloc_pages_noprof(gfp_mask, order); } EXPORT_SYMBOL(mempool_alloc_pages); void mempool_free_pages(void *element, void *pool_data) { int order = (int)(long)pool_data; __free_pages(element, order); } EXPORT_SYMBOL(mempool_free_pages);
1 1 5 5 5 5 5 5 1 1 85 86 48 86 2 1 1 99 10 89 76 75 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 12 3 9 5 5 1 1 5 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 7 2 5 1 2 1 1 1 1 1 1 1 5 4 1 5 5 5 74 1 96 11 85 2 3 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 // SPDX-License-Identifier: GPL-2.0-only /* * xattr.c * * Copyright (C) 2004, 2008 Oracle. All rights reserved. * * CREDITS: * Lots of code in this file is copy from linux/fs/ext3/xattr.c. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> */ #include <linux/capability.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/sched.h> #include <linux/splice.h> #include <linux/mount.h> #include <linux/writeback.h> #include <linux/falloc.h> #include <linux/sort.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/security.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "blockcheck.h" #include "dlmglue.h" #include "file.h" #include "symlink.h" #include "sysfile.h" #include "inode.h" #include "journal.h" #include "ocfs2_fs.h" #include "suballoc.h" #include "uptodate.h" #include "buffer_head_io.h" #include "super.h" #include "xattr.h" #include "refcounttree.h" #include "acl.h" #include "ocfs2_trace.h" struct ocfs2_xattr_def_value_root { struct ocfs2_xattr_value_root xv; struct ocfs2_extent_rec er; }; struct ocfs2_xattr_bucket { /* The inode these xattrs are associated with */ struct inode *bu_inode; /* The actual buffers that make up the bucket */ struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET]; /* How many blocks make up one bucket for this filesystem */ int bu_blocks; }; struct ocfs2_xattr_set_ctxt { handle_t *handle; struct ocfs2_alloc_context *meta_ac; struct ocfs2_alloc_context *data_ac; struct ocfs2_cached_dealloc_ctxt dealloc; int set_abort; }; #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) #define OCFS2_XATTR_INLINE_SIZE 80 #define OCFS2_XATTR_HEADER_GAP 4 #define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \ - sizeof(struct ocfs2_xattr_header) \ - OCFS2_XATTR_HEADER_GAP) #define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \ - sizeof(struct ocfs2_xattr_block) \ - sizeof(struct ocfs2_xattr_header) \ - OCFS2_XATTR_HEADER_GAP) static struct ocfs2_xattr_def_value_root def_xv = { .xv.xr_list.l_count = cpu_to_le16(1), }; const struct xattr_handler * const ocfs2_xattr_handlers[] = { &ocfs2_xattr_user_handler, &ocfs2_xattr_trusted_handler, &ocfs2_xattr_security_handler, NULL }; static const struct xattr_handler * const ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler, [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default, [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler, [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler, }; struct ocfs2_xattr_info { int xi_name_index; const char *xi_name; int xi_name_len; const void *xi_value; size_t xi_value_len; }; struct ocfs2_xattr_search { struct buffer_head *inode_bh; /* * xattr_bh point to the block buffer head which has extended attribute * when extended attribute in inode, xattr_bh is equal to inode_bh. */ struct buffer_head *xattr_bh; struct ocfs2_xattr_header *header; struct ocfs2_xattr_bucket *bucket; void *base; void *end; struct ocfs2_xattr_entry *here; int not_found; }; /* Operations on struct ocfs2_xa_entry */ struct ocfs2_xa_loc; struct ocfs2_xa_loc_operations { /* * Journal functions */ int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc, int type); void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc); /* * Return a pointer to the appropriate buffer in loc->xl_storage * at the given offset from loc->xl_header. */ void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset); /* Can we reuse the existing entry for the new value? */ int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi); /* How much space is needed for the new value? */ int (*xlo_check_space)(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi); /* * Return the offset of the first name+value pair. This is * the start of our downward-filling free space. */ int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc); /* * Remove the name+value at this location. Do whatever is * appropriate with the remaining name+value pairs. */ void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc); /* Fill xl_entry with a new entry */ void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash); /* Add name+value storage to an entry */ void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size); /* * Initialize the value buf's access and bh fields for this entry. * ocfs2_xa_fill_value_buf() will handle the xv pointer. */ void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_value_buf *vb); }; /* * Describes an xattr entry location. This is a memory structure * tracking the on-disk structure. */ struct ocfs2_xa_loc { /* This xattr belongs to this inode */ struct inode *xl_inode; /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */ struct ocfs2_xattr_header *xl_header; /* Bytes from xl_header to the end of the storage */ int xl_size; /* * The ocfs2_xattr_entry this location describes. If this is * NULL, this location describes the on-disk structure where it * would have been. */ struct ocfs2_xattr_entry *xl_entry; /* * Internal housekeeping */ /* Buffer(s) containing this entry */ void *xl_storage; /* Operations on the storage backing this location */ const struct ocfs2_xa_loc_operations *xl_ops; }; /* * Convenience functions to calculate how much space is needed for a * given name+value pair */ static int namevalue_size(int name_len, uint64_t value_len) { if (value_len > OCFS2_XATTR_INLINE_SIZE) return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; else return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len); } static int namevalue_size_xi(struct ocfs2_xattr_info *xi) { return namevalue_size(xi->xi_name_len, xi->xi_value_len); } static int namevalue_size_xe(struct ocfs2_xattr_entry *xe) { u64 value_len = le64_to_cpu(xe->xe_value_size); BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) && ocfs2_xattr_is_local(xe)); return namevalue_size(xe->xe_name_len, value_len); } static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, struct ocfs2_xattr_header *xh, int index, int *block_off, int *new_offset); static int ocfs2_xattr_block_find(struct inode *inode, int name_index, const char *name, struct ocfs2_xattr_search *xs); static int ocfs2_xattr_index_block_find(struct inode *inode, struct buffer_head *root_bh, int name_index, const char *name, struct ocfs2_xattr_search *xs); static int ocfs2_xattr_tree_list_index_block(struct inode *inode, struct buffer_head *blk_bh, char *buffer, size_t buffer_size); static int ocfs2_xattr_create_index_block(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt); static int ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt); typedef int (xattr_tree_rec_func)(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, u32 len, void *para); static int ocfs2_iterate_xattr_index_block(struct inode *inode, struct buffer_head *root_bh, xattr_tree_rec_func *rec_func, void *para); static int ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para); static int ocfs2_rm_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, u32 len, void *para); static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, u64 src_blk, u64 last_blk, u64 to_blk, unsigned int start_bucket, u32 *first_hash); static int ocfs2_prepare_refcount_xattr(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_refcount_tree **ref_tree, int *meta_need, int *credits); static int ocfs2_get_xattr_tree_value_root(struct super_block *sb, struct ocfs2_xattr_bucket *bucket, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **bh); static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb) { return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE; } static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb) { return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits); } #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode) { struct ocfs2_xattr_bucket *bucket; int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET); bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); if (bucket) { bucket->bu_inode = inode; bucket->bu_blocks = blks; } return bucket; } static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) { int i; for (i = 0; i < bucket->bu_blocks; i++) { brelse(bucket->bu_bhs[i]); bucket->bu_bhs[i] = NULL; } } static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket) { if (bucket) { ocfs2_xattr_bucket_relse(bucket); bucket->bu_inode = NULL; kfree(bucket); } } /* * A bucket that has never been written to disk doesn't need to be * read. We just need the buffer_heads. Don't call this for * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes * them fully. */ static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, u64 xb_blkno, int new) { int i, rc = 0; for (i = 0; i < bucket->bu_blocks; i++) { bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb, xb_blkno + i); if (!bucket->bu_bhs[i]) { rc = -ENOMEM; mlog_errno(rc); break; } if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i])) { if (new) ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i]); else { set_buffer_uptodate(bucket->bu_bhs[i]); ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i]); } } } if (rc) ocfs2_xattr_bucket_relse(bucket); return rc; } /* Read the xattr bucket at xb_blkno */ static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket, u64 xb_blkno) { int rc; rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno, bucket->bu_blocks, bucket->bu_bhs, 0, NULL); if (!rc) { spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb, bucket->bu_bhs, bucket->bu_blocks, &bucket_xh(bucket)->xh_check); spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); if (rc) mlog_errno(rc); } if (rc) ocfs2_xattr_bucket_relse(bucket); return rc; } static int ocfs2_xattr_bucket_journal_access(handle_t *handle, struct ocfs2_xattr_bucket *bucket, int type) { int i, rc = 0; for (i = 0; i < bucket->bu_blocks; i++) { rc = ocfs2_journal_access(handle, INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i], type); if (rc) { mlog_errno(rc); break; } } return rc; } static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle, struct ocfs2_xattr_bucket *bucket) { int i; spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb, bucket->bu_bhs, bucket->bu_blocks, &bucket_xh(bucket)->xh_check); spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); for (i = 0; i < bucket->bu_blocks; i++) ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); } static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest, struct ocfs2_xattr_bucket *src) { int i; int blocksize = src->bu_inode->i_sb->s_blocksize; BUG_ON(dest->bu_blocks != src->bu_blocks); BUG_ON(dest->bu_inode != src->bu_inode); for (i = 0; i < src->bu_blocks; i++) { memcpy(bucket_block(dest, i), bucket_block(src, i), blocksize); } } static int ocfs2_validate_xattr_block(struct super_block *sb, struct buffer_head *bh) { int rc; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)bh->b_data; trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr); BUG_ON(!buffer_uptodate(bh)); /* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check); if (rc) return rc; /* * Errors after here are fatal */ if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { return ocfs2_error(sb, "Extended attribute block #%llu has bad signature %.*s\n", (unsigned long long)bh->b_blocknr, 7, xb->xb_signature); } if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) { return ocfs2_error(sb, "Extended attribute block #%llu has an invalid xb_blkno of %llu\n", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(xb->xb_blkno)); } if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) { return ocfs2_error(sb, "Extended attribute block #%llu has an invalid xb_fs_generation of #%u\n", (unsigned long long)bh->b_blocknr, le32_to_cpu(xb->xb_fs_generation)); } return 0; } static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno, struct buffer_head **bh) { int rc; struct buffer_head *tmp = *bh; rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp, ocfs2_validate_xattr_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!rc && !*bh) *bh = tmp; return rc; } static inline const char *ocfs2_xattr_prefix(int name_index) { const struct xattr_handler *handler = NULL; if (name_index > 0 && name_index < OCFS2_XATTR_MAX) handler = ocfs2_xattr_handler_map[name_index]; return handler ? xattr_prefix(handler) : NULL; } static u32 ocfs2_xattr_name_hash(struct inode *inode, const char *name, int name_len) { /* Get hash value of uuid from super block */ u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash; int i; /* hash extended attribute name */ for (i = 0; i < name_len; i++) { hash = (hash << OCFS2_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^ *name++; } return hash; } static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) { return namevalue_size(name_len, value_len) + sizeof(struct ocfs2_xattr_entry); } static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi) { return namevalue_size_xi(xi) + sizeof(struct ocfs2_xattr_entry); } static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe) { return namevalue_size_xe(xe) + sizeof(struct ocfs2_xattr_entry); } int ocfs2_calc_security_init(struct inode *dir, struct ocfs2_security_xattr_info *si, int *want_clusters, int *xattr_credits, struct ocfs2_alloc_context **xattr_ac) { int ret = 0; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); int s_size = ocfs2_xattr_entry_real_size(strlen(si->name), si->value_len); /* * The max space of security xattr taken inline is * 256(name) + 80(value) + 16(entry) = 352 bytes, * So reserve one metadata block for it is ok. */ if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE || s_size > OCFS2_XATTR_FREE_IN_IBODY) { ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac); if (ret) { mlog_errno(ret); return ret; } *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; } /* reserve clusters for xattr value which will be set in B tree*/ if (si->value_len > OCFS2_XATTR_INLINE_SIZE) { int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb, si->value_len); *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb, new_clusters); *want_clusters += new_clusters; } return ret; } int ocfs2_calc_xattr_init(struct inode *dir, struct buffer_head *dir_bh, umode_t mode, struct ocfs2_security_xattr_info *si, int *want_clusters, int *xattr_credits, int *want_meta) { int ret = 0; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); int s_size = 0, a_size = 0, acl_len = 0, new_clusters; if (si->enable) s_size = ocfs2_xattr_entry_real_size(strlen(si->name), si->value_len); if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { down_read(&OCFS2_I(dir)->ip_xattr_sem); acl_len = ocfs2_xattr_get_nolock(dir, dir_bh, OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT, "", NULL, 0); up_read(&OCFS2_I(dir)->ip_xattr_sem); if (acl_len > 0) { a_size = ocfs2_xattr_entry_real_size(0, acl_len); if (S_ISDIR(mode)) a_size <<= 1; } else if (acl_len != 0 && acl_len != -ENODATA) { ret = acl_len; mlog_errno(ret); return ret; } } if (!(s_size + a_size)) return ret; /* * The max space of security xattr taken inline is * 256(name) + 80(value) + 16(entry) = 352 bytes, * The max space of acl xattr taken inline is * 80(value) + 16(entry) * 2(if directory) = 192 bytes, * when blocksize = 512, may reserve one more cluster for * xattr bucket, otherwise reserve one metadata block * for them is ok. * If this is a new directory with inline data, * we choose to reserve the entire inline area for * directory contents and force an external xattr block. */ if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE || (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) || (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) { *want_meta = *want_meta + 1; *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; } if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE && (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) { *want_clusters += 1; *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb); } /* * reserve credits and clusters for xattrs which has large value * and have to be set outside */ if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) { new_clusters = ocfs2_clusters_for_bytes(dir->i_sb, si->value_len); *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb, new_clusters); *want_clusters += new_clusters; } if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL && acl_len > OCFS2_XATTR_INLINE_SIZE) { /* for directory, it has DEFAULT and ACCESS two types of acls */ new_clusters = (S_ISDIR(mode) ? 2 : 1) * ocfs2_clusters_for_bytes(dir->i_sb, acl_len); *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb, new_clusters); *want_clusters += new_clusters; } return ret; } static int ocfs2_xattr_extend_allocation(struct inode *inode, u32 clusters_to_add, struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_set_ctxt *ctxt) { int status = 0, credits; handle_t *handle = ctxt->handle; enum ocfs2_alloc_restarted why; u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); struct ocfs2_extent_tree et; ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); while (clusters_to_add) { trace_ocfs2_xattr_extend_allocation(clusters_to_add); status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); break; } prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); status = ocfs2_add_clusters_in_btree(handle, &et, &logical_start, clusters_to_add, 0, ctxt->data_ac, ctxt->meta_ac, &why); if ((status < 0) && (status != -EAGAIN)) { if (status != -ENOSPC) mlog_errno(status); break; } ocfs2_journal_dirty(handle, vb->vb_bh); clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters; if (why != RESTART_NONE && clusters_to_add) { /* * We can only fail in case the alloc file doesn't give * up enough clusters. */ BUG_ON(why == RESTART_META); credits = ocfs2_calc_extend_credits(inode->i_sb, &vb->vb_xv->xr_list); status = ocfs2_extend_trans(handle, credits); if (status < 0) { status = -ENOMEM; mlog_errno(status); break; } } } return status; } static int __ocfs2_remove_xattr_range(struct inode *inode, struct ocfs2_xattr_value_buf *vb, u32 cpos, u32 phys_cpos, u32 len, unsigned int ext_flags, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); handle_t *handle = ctxt->handle; struct ocfs2_extent_tree et; ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, &ctxt->dealloc); if (ret) { mlog_errno(ret); goto out; } le32_add_cpu(&vb->vb_xv->xr_clusters, -len); ocfs2_journal_dirty(handle, vb->vb_bh); if (ext_flags & OCFS2_EXT_REFCOUNTED) ret = ocfs2_decrease_refcount(inode, handle, ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno), len, ctxt->meta_ac, &ctxt->dealloc, 1); else ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, phys_blkno, len); if (ret) mlog_errno(ret); out: return ret; } static int ocfs2_xattr_shrink_size(struct inode *inode, u32 old_clusters, u32 new_clusters, struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_set_ctxt *ctxt) { int ret = 0; unsigned int ext_flags; u32 trunc_len, cpos, phys_cpos, alloc_size; u64 block; if (old_clusters <= new_clusters) return 0; cpos = new_clusters; trunc_len = old_clusters - new_clusters; while (trunc_len) { ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos, &alloc_size, &vb->vb_xv->xr_list, &ext_flags); if (ret) { mlog_errno(ret); goto out; } if (alloc_size > trunc_len) alloc_size = trunc_len; ret = __ocfs2_remove_xattr_range(inode, vb, cpos, phys_cpos, alloc_size, ext_flags, ctxt); if (ret) { mlog_errno(ret); goto out; } block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), block, alloc_size); cpos += alloc_size; trunc_len -= alloc_size; } out: return ret; } static int ocfs2_xattr_value_truncate(struct inode *inode, struct ocfs2_xattr_value_buf *vb, int len, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len); u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); if (new_clusters == old_clusters) return 0; if (new_clusters > old_clusters) ret = ocfs2_xattr_extend_allocation(inode, new_clusters - old_clusters, vb, ctxt); else ret = ocfs2_xattr_shrink_size(inode, old_clusters, new_clusters, vb, ctxt); return ret; } static int ocfs2_xattr_list_entry(struct super_block *sb, char *buffer, size_t size, size_t *result, int type, const char *name, int name_len) { char *p = buffer + *result; const char *prefix; int prefix_len; int total_len; switch(type) { case OCFS2_XATTR_INDEX_USER: if (OCFS2_SB(sb)->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) return 0; break; case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS: case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT: if (!(sb->s_flags & SB_POSIXACL)) return 0; break; case OCFS2_XATTR_INDEX_TRUSTED: if (!capable(CAP_SYS_ADMIN)) return 0; break; } prefix = ocfs2_xattr_prefix(type); if (!prefix) return 0; prefix_len = strlen(prefix); total_len = prefix_len + name_len + 1; *result += total_len; /* we are just looking for how big our buffer needs to be */ if (!size) return 0; if (*result > size) return -ERANGE; memcpy(p, prefix, prefix_len); memcpy(p + prefix_len, name, name_len); p[prefix_len + name_len] = '\0'; return 0; } static int ocfs2_xattr_list_entries(struct inode *inode, struct ocfs2_xattr_header *header, char *buffer, size_t buffer_size) { size_t result = 0; int i, type, ret; const char *name; for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) { struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; type = ocfs2_xattr_get_type(entry); name = (const char *)header + le16_to_cpu(entry->xe_name_offset); ret = ocfs2_xattr_list_entry(inode->i_sb, buffer, buffer_size, &result, type, name, entry->xe_name_len); if (ret) return ret; } return result; } int ocfs2_has_inline_xattr_value_outside(struct inode *inode, struct ocfs2_dinode *di) { struct ocfs2_xattr_header *xh; int i; xh = (struct ocfs2_xattr_header *) ((void *)di + inode->i_sb->s_blocksize - le16_to_cpu(di->i_xattr_inline_size)); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) if (!ocfs2_xattr_is_local(&xh->xh_entries[i])) return 1; return 0; } static int ocfs2_xattr_ibody_list(struct inode *inode, struct ocfs2_dinode *di, char *buffer, size_t buffer_size) { struct ocfs2_xattr_header *header = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); int ret = 0; if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) return ret; header = (struct ocfs2_xattr_header *) ((void *)di + inode->i_sb->s_blocksize - le16_to_cpu(di->i_xattr_inline_size)); ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size); return ret; } static int ocfs2_xattr_block_list(struct inode *inode, struct ocfs2_dinode *di, char *buffer, size_t buffer_size) { struct buffer_head *blk_bh = NULL; struct ocfs2_xattr_block *xb; int ret = 0; if (!di->i_xattr_loc) return ret; ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); if (ret < 0) { mlog_errno(ret); return ret; } xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header; ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size); } else ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh, buffer, buffer_size); brelse(blk_bh); return ret; } ssize_t ocfs2_listxattr(struct dentry *dentry, char *buffer, size_t size) { int ret = 0, i_ret = 0, b_ret = 0; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di = NULL; struct ocfs2_inode_info *oi = OCFS2_I(d_inode(dentry)); if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb))) return -EOPNOTSUPP; if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) return ret; ret = ocfs2_inode_lock(d_inode(dentry), &di_bh, 0); if (ret < 0) { mlog_errno(ret); return ret; } di = (struct ocfs2_dinode *)di_bh->b_data; down_read(&oi->ip_xattr_sem); i_ret = ocfs2_xattr_ibody_list(d_inode(dentry), di, buffer, size); if (i_ret < 0) b_ret = 0; else { if (buffer) { buffer += i_ret; size -= i_ret; } b_ret = ocfs2_xattr_block_list(d_inode(dentry), di, buffer, size); if (b_ret < 0) i_ret = 0; } up_read(&oi->ip_xattr_sem); ocfs2_inode_unlock(d_inode(dentry), 0); brelse(di_bh); return i_ret + b_ret; } static int ocfs2_xattr_find_entry(struct inode *inode, int name_index, const char *name, struct ocfs2_xattr_search *xs) { struct ocfs2_xattr_entry *entry; size_t name_len; int i, name_offset, cmp = 1; if (name == NULL) return -EINVAL; name_len = strlen(name); entry = xs->here; for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) { if ((void *)entry >= xs->end) { ocfs2_error(inode->i_sb, "corrupted xattr entries"); return -EFSCORRUPTED; } cmp = name_index - ocfs2_xattr_get_type(entry); if (!cmp) cmp = name_len - entry->xe_name_len; if (!cmp) { name_offset = le16_to_cpu(entry->xe_name_offset); if ((xs->base + name_offset + name_len) > xs->end) { ocfs2_error(inode->i_sb, "corrupted xattr entries"); return -EFSCORRUPTED; } cmp = memcmp(name, (xs->base + name_offset), name_len); } if (cmp == 0) break; entry += 1; } xs->here = entry; return cmp ? -ENODATA : 0; } static int ocfs2_xattr_get_value_outside(struct inode *inode, struct ocfs2_xattr_value_root *xv, void *buffer, size_t len) { u32 cpos, p_cluster, num_clusters, bpc, clusters; u64 blkno; int i, ret = 0; size_t cplen, blocksize; struct buffer_head *bh = NULL; struct ocfs2_extent_list *el; el = &xv->xr_list; clusters = le32_to_cpu(xv->xr_clusters); bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); blocksize = inode->i_sb->s_blocksize; cpos = 0; while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, &num_clusters, el, NULL); if (ret) { mlog_errno(ret); goto out; } blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); /* Copy ocfs2_xattr_value */ for (i = 0; i < num_clusters * bpc; i++, blkno++) { ret = ocfs2_read_block(INODE_CACHE(inode), blkno, &bh, NULL); if (ret) { mlog_errno(ret); goto out; } cplen = len >= blocksize ? blocksize : len; memcpy(buffer, bh->b_data, cplen); len -= cplen; buffer += cplen; brelse(bh); bh = NULL; if (len == 0) break; } cpos += num_clusters; } out: return ret; } static int ocfs2_xattr_ibody_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size, struct ocfs2_xattr_search *xs) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; struct ocfs2_xattr_value_root *xv; size_t size; int ret = 0; if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) return -ENODATA; xs->end = (void *)di + inode->i_sb->s_blocksize; xs->header = (struct ocfs2_xattr_header *) (xs->end - le16_to_cpu(di->i_xattr_inline_size)); xs->base = (void *)xs->header; xs->here = xs->header->xh_entries; ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); if (ret) return ret; size = le64_to_cpu(xs->here->xe_value_size); if (buffer) { if (size > buffer_size) return -ERANGE; if (ocfs2_xattr_is_local(xs->here)) { memcpy(buffer, (void *)xs->base + le16_to_cpu(xs->here->xe_name_offset) + OCFS2_XATTR_SIZE(xs->here->xe_name_len), size); } else { xv = (struct ocfs2_xattr_value_root *) (xs->base + le16_to_cpu( xs->here->xe_name_offset) + OCFS2_XATTR_SIZE(xs->here->xe_name_len)); ret = ocfs2_xattr_get_value_outside(inode, xv, buffer, size); if (ret < 0) { mlog_errno(ret); return ret; } } } return size; } static int ocfs2_xattr_block_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size, struct ocfs2_xattr_search *xs) { struct ocfs2_xattr_block *xb; struct ocfs2_xattr_value_root *xv; size_t size; int ret = -ENODATA, name_offset, name_len, i; int block_off; xs->bucket = ocfs2_xattr_bucket_new(inode); if (!xs->bucket) { ret = -ENOMEM; mlog_errno(ret); goto cleanup; } ret = ocfs2_xattr_block_find(inode, name_index, name, xs); if (ret) { mlog_errno(ret); goto cleanup; } if (xs->not_found) { ret = -ENODATA; goto cleanup; } xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; size = le64_to_cpu(xs->here->xe_value_size); if (buffer) { ret = -ERANGE; if (size > buffer_size) goto cleanup; name_offset = le16_to_cpu(xs->here->xe_name_offset); name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len); i = xs->here - xs->header->xh_entries; if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(xs->bucket), i, &block_off, &name_offset); if (ret) { mlog_errno(ret); goto cleanup; } xs->base = bucket_block(xs->bucket, block_off); } if (ocfs2_xattr_is_local(xs->here)) { memcpy(buffer, (void *)xs->base + name_offset + name_len, size); } else { xv = (struct ocfs2_xattr_value_root *) (xs->base + name_offset + name_len); ret = ocfs2_xattr_get_value_outside(inode, xv, buffer, size); if (ret < 0) { mlog_errno(ret); goto cleanup; } } } ret = size; cleanup: ocfs2_xattr_bucket_free(xs->bucket); brelse(xs->xattr_bh); xs->xattr_bh = NULL; return ret; } int ocfs2_xattr_get_nolock(struct inode *inode, struct buffer_head *di_bh, int name_index, const char *name, void *buffer, size_t buffer_size) { int ret; struct ocfs2_dinode *di = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_xattr_search xis = { .not_found = -ENODATA, }; struct ocfs2_xattr_search xbs = { .not_found = -ENODATA, }; if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) return -ENODATA; xis.inode_bh = xbs.inode_bh = di_bh; di = (struct ocfs2_dinode *)di_bh->b_data; ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, buffer_size, &xis); if (ret == -ENODATA && di->i_xattr_loc) ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, buffer_size, &xbs); return ret; } /* ocfs2_xattr_get() * * Copy an extended attribute into the buffer provided. * Buffer is NULL to compute the size of buffer required. */ static int ocfs2_xattr_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { int ret, had_lock; struct buffer_head *di_bh = NULL; struct ocfs2_lock_holder oh; had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh); if (had_lock < 0) { mlog_errno(had_lock); return had_lock; } down_read(&OCFS2_I(inode)->ip_xattr_sem); ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, name, buffer, buffer_size); up_read(&OCFS2_I(inode)->ip_xattr_sem); ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock); brelse(di_bh); return ret; } static int __ocfs2_xattr_set_value_outside(struct inode *inode, handle_t *handle, struct ocfs2_xattr_value_buf *vb, const void *value, int value_len) { int ret = 0, i, cp_len; u16 blocksize = inode->i_sb->s_blocksize; u32 p_cluster, num_clusters; u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len); u64 blkno; struct buffer_head *bh = NULL; unsigned int ext_flags; struct ocfs2_xattr_value_root *xv = vb->vb_xv; BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, &num_clusters, &xv->xr_list, &ext_flags); if (ret) { mlog_errno(ret); goto out; } BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); for (i = 0; i < num_clusters * bpc; i++, blkno++) { ret = ocfs2_read_block(INODE_CACHE(inode), blkno, &bh, NULL); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; } cp_len = value_len > blocksize ? blocksize : value_len; memcpy(bh->b_data, value, cp_len); value_len -= cp_len; value += cp_len; if (cp_len < blocksize) memset(bh->b_data + cp_len, 0, blocksize - cp_len); ocfs2_journal_dirty(handle, bh); brelse(bh); bh = NULL; /* * XXX: do we need to empty all the following * blocks in this cluster? */ if (!value_len) break; } cpos += num_clusters; } out: brelse(bh); return ret; } static int ocfs2_xa_check_space_helper(int needed_space, int free_start, int num_entries) { int free_space; if (!needed_space) return 0; free_space = free_start - sizeof(struct ocfs2_xattr_header) - (num_entries * sizeof(struct ocfs2_xattr_entry)) - OCFS2_XATTR_HEADER_GAP; if (free_space < 0) return -EIO; if (free_space < needed_space) return -ENOSPC; return 0; } static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc, int type) { return loc->xl_ops->xlo_journal_access(handle, loc, type); } static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc) { loc->xl_ops->xlo_journal_dirty(handle, loc); } /* Give a pointer into the storage for the given offset */ static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset) { BUG_ON(offset >= loc->xl_size); return loc->xl_ops->xlo_offset_pointer(loc, offset); } /* * Wipe the name+value pair and allow the storage to reclaim it. This * must be followed by either removal of the entry or a call to * ocfs2_xa_add_namevalue(). */ static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc) { loc->xl_ops->xlo_wipe_namevalue(loc); } /* * Find lowest offset to a name+value pair. This is the start of our * downward-growing free space. */ static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc) { return loc->xl_ops->xlo_get_free_start(loc); } /* Can we reuse loc->xl_entry for xi? */ static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { return loc->xl_ops->xlo_can_reuse(loc, xi); } /* How much free space is needed to set the new value */ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { return loc->xl_ops->xlo_check_space(loc, xi); } static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) { loc->xl_ops->xlo_add_entry(loc, name_hash); loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); /* * We can't leave the new entry's xe_name_offset at zero or * add_namevalue() will go nuts. We set it to the size of our * storage so that it can never be less than any other entry. */ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); } static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { int size = namevalue_size_xi(xi); int nameval_offset; char *nameval_buf; loc->xl_ops->xlo_add_namevalue(loc, size); loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len); loc->xl_entry->xe_name_len = xi->xi_name_len; ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index); ocfs2_xattr_set_local(loc->xl_entry, xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE); nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset); memset(nameval_buf, 0, size); memcpy(nameval_buf, xi->xi_name, xi->xi_name_len); } static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_value_buf *vb) { int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len); /* Value bufs are for value trees */ BUG_ON(ocfs2_xattr_is_local(loc->xl_entry)); BUG_ON(namevalue_size_xe(loc->xl_entry) != (name_size + OCFS2_XATTR_ROOT_SIZE)); loc->xl_ops->xlo_fill_value_buf(loc, vb); vb->vb_xv = (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc, nameval_offset + name_size); } static int ocfs2_xa_block_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc, int type) { struct buffer_head *bh = loc->xl_storage; ocfs2_journal_access_func access; if (loc->xl_size == (bh->b_size - offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header))) access = ocfs2_journal_access_xb; else access = ocfs2_journal_access_di; return access(handle, INODE_CACHE(loc->xl_inode), bh, type); } static void ocfs2_xa_block_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc) { struct buffer_head *bh = loc->xl_storage; ocfs2_journal_dirty(handle, bh); } static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc, int offset) { return (char *)loc->xl_header + offset; } static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { /* * Block storage is strict. If the sizes aren't exact, we will * remove the old one and reinsert the new. */ return namevalue_size_xe(loc->xl_entry) == namevalue_size_xi(xi); } static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc) { struct ocfs2_xattr_header *xh = loc->xl_header; int i, count = le16_to_cpu(xh->xh_count); int offset, free_start = loc->xl_size; for (i = 0; i < count; i++) { offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); if (offset < free_start) free_start = offset; } return free_start; } static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { int count = le16_to_cpu(loc->xl_header->xh_count); int free_start = ocfs2_xa_get_free_start(loc); int needed_space = ocfs2_xi_entry_usage(xi); /* * Block storage will reclaim the original entry before inserting * the new value, so we only need the difference. If the new * entry is smaller than the old one, we don't need anything. */ if (loc->xl_entry) { /* Don't need space if we're reusing! */ if (ocfs2_xa_can_reuse_entry(loc, xi)) needed_space = 0; else needed_space -= ocfs2_xe_entry_usage(loc->xl_entry); } if (needed_space < 0) needed_space = 0; return ocfs2_xa_check_space_helper(needed_space, free_start, count); } /* * Block storage for xattrs keeps the name+value pairs compacted. When * we remove one, we have to shift any that preceded it towards the end. */ static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc) { int i, offset; int namevalue_offset, first_namevalue_offset, namevalue_size; struct ocfs2_xattr_entry *entry = loc->xl_entry; struct ocfs2_xattr_header *xh = loc->xl_header; int count = le16_to_cpu(xh->xh_count); namevalue_offset = le16_to_cpu(entry->xe_name_offset); namevalue_size = namevalue_size_xe(entry); first_namevalue_offset = ocfs2_xa_get_free_start(loc); /* Shift the name+value pairs */ memmove((char *)xh + first_namevalue_offset + namevalue_size, (char *)xh + first_namevalue_offset, namevalue_offset - first_namevalue_offset); memset((char *)xh + first_namevalue_offset, 0, namevalue_size); /* Now tell xh->xh_entries about it */ for (i = 0; i < count; i++) { offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); if (offset <= namevalue_offset) le16_add_cpu(&xh->xh_entries[i].xe_name_offset, namevalue_size); } /* * Note that we don't update xh_free_start or xh_name_value_len * because they're not used in block-stored xattrs. */ } static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) { int count = le16_to_cpu(loc->xl_header->xh_count); loc->xl_entry = &(loc->xl_header->xh_entries[count]); le16_add_cpu(&loc->xl_header->xh_count, 1); memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry)); } static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size) { int free_start = ocfs2_xa_get_free_start(loc); loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size); } static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_value_buf *vb) { struct buffer_head *bh = loc->xl_storage; if (loc->xl_size == (bh->b_size - offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header))) vb->vb_access = ocfs2_journal_access_xb; else vb->vb_access = ocfs2_journal_access_di; vb->vb_bh = bh; } /* * Operations for xattrs stored in blocks. This includes inline inode * storage and unindexed ocfs2_xattr_blocks. */ static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = { .xlo_journal_access = ocfs2_xa_block_journal_access, .xlo_journal_dirty = ocfs2_xa_block_journal_dirty, .xlo_offset_pointer = ocfs2_xa_block_offset_pointer, .xlo_check_space = ocfs2_xa_block_check_space, .xlo_can_reuse = ocfs2_xa_block_can_reuse, .xlo_get_free_start = ocfs2_xa_block_get_free_start, .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue, .xlo_add_entry = ocfs2_xa_block_add_entry, .xlo_add_namevalue = ocfs2_xa_block_add_namevalue, .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf, }; static int ocfs2_xa_bucket_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc, int type) { struct ocfs2_xattr_bucket *bucket = loc->xl_storage; return ocfs2_xattr_bucket_journal_access(handle, bucket, type); } static void ocfs2_xa_bucket_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc) { struct ocfs2_xattr_bucket *bucket = loc->xl_storage; ocfs2_xattr_bucket_journal_dirty(handle, bucket); } static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc, int offset) { struct ocfs2_xattr_bucket *bucket = loc->xl_storage; int block, block_offset; /* The header is at the front of the bucket */ block = offset >> loc->xl_inode->i_sb->s_blocksize_bits; block_offset = offset % loc->xl_inode->i_sb->s_blocksize; return bucket_block(bucket, block) + block_offset; } static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { return namevalue_size_xe(loc->xl_entry) >= namevalue_size_xi(xi); } static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc) { struct ocfs2_xattr_bucket *bucket = loc->xl_storage; return le16_to_cpu(bucket_xh(bucket)->xh_free_start); } static int ocfs2_bucket_align_free_start(struct super_block *sb, int free_start, int size) { /* * We need to make sure that the name+value pair fits within * one block. */ if (((free_start - size) >> sb->s_blocksize_bits) != ((free_start - 1) >> sb->s_blocksize_bits)) free_start -= free_start % sb->s_blocksize; return free_start; } static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { int rc; int count = le16_to_cpu(loc->xl_header->xh_count); int free_start = ocfs2_xa_get_free_start(loc); int needed_space = ocfs2_xi_entry_usage(xi); int size = namevalue_size_xi(xi); struct super_block *sb = loc->xl_inode->i_sb; /* * Bucket storage does not reclaim name+value pairs it cannot * reuse. They live as holes until the bucket fills, and then * the bucket is defragmented. However, the bucket can reclaim * the ocfs2_xattr_entry. */ if (loc->xl_entry) { /* Don't need space if we're reusing! */ if (ocfs2_xa_can_reuse_entry(loc, xi)) needed_space = 0; else needed_space -= sizeof(struct ocfs2_xattr_entry); } BUG_ON(needed_space < 0); if (free_start < size) { if (needed_space) return -ENOSPC; } else { /* * First we check if it would fit in the first place. * Below, we align the free start to a block. This may * slide us below the minimum gap. By checking unaligned * first, we avoid that error. */ rc = ocfs2_xa_check_space_helper(needed_space, free_start, count); if (rc) return rc; free_start = ocfs2_bucket_align_free_start(sb, free_start, size); } return ocfs2_xa_check_space_helper(needed_space, free_start, count); } static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc) { le16_add_cpu(&loc->xl_header->xh_name_value_len, -namevalue_size_xe(loc->xl_entry)); } static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) { struct ocfs2_xattr_header *xh = loc->xl_header; int count = le16_to_cpu(xh->xh_count); int low = 0, high = count - 1, tmp; struct ocfs2_xattr_entry *tmp_xe; /* * We keep buckets sorted by name_hash, so we need to find * our insert place. */ while (low <= high && count) { tmp = (low + high) / 2; tmp_xe = &xh->xh_entries[tmp]; if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash)) low = tmp + 1; else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash)) high = tmp - 1; else { low = tmp; break; } } if (low != count) memmove(&xh->xh_entries[low + 1], &xh->xh_entries[low], ((count - low) * sizeof(struct ocfs2_xattr_entry))); le16_add_cpu(&xh->xh_count, 1); loc->xl_entry = &xh->xh_entries[low]; memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry)); } static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size) { int free_start = ocfs2_xa_get_free_start(loc); struct ocfs2_xattr_header *xh = loc->xl_header; struct super_block *sb = loc->xl_inode->i_sb; int nameval_offset; free_start = ocfs2_bucket_align_free_start(sb, free_start, size); nameval_offset = free_start - size; loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset); xh->xh_free_start = cpu_to_le16(nameval_offset); le16_add_cpu(&xh->xh_name_value_len, size); } static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_value_buf *vb) { struct ocfs2_xattr_bucket *bucket = loc->xl_storage; struct super_block *sb = loc->xl_inode->i_sb; int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); int size = namevalue_size_xe(loc->xl_entry); int block_offset = nameval_offset >> sb->s_blocksize_bits; /* Values are not allowed to straddle block boundaries */ BUG_ON(block_offset != ((nameval_offset + size - 1) >> sb->s_blocksize_bits)); /* We expect the bucket to be filled in */ BUG_ON(!bucket->bu_bhs[block_offset]); vb->vb_access = ocfs2_journal_access; vb->vb_bh = bucket->bu_bhs[block_offset]; } /* Operations for xattrs stored in buckets. */ static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = { .xlo_journal_access = ocfs2_xa_bucket_journal_access, .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty, .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer, .xlo_check_space = ocfs2_xa_bucket_check_space, .xlo_can_reuse = ocfs2_xa_bucket_can_reuse, .xlo_get_free_start = ocfs2_xa_bucket_get_free_start, .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue, .xlo_add_entry = ocfs2_xa_bucket_add_entry, .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue, .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf, }; static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc) { struct ocfs2_xattr_value_buf vb; if (ocfs2_xattr_is_local(loc->xl_entry)) return 0; ocfs2_xa_fill_value_buf(loc, &vb); return le32_to_cpu(vb.vb_xv->xr_clusters); } static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes, struct ocfs2_xattr_set_ctxt *ctxt) { int trunc_rc, access_rc; struct ocfs2_xattr_value_buf vb; ocfs2_xa_fill_value_buf(loc, &vb); trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes, ctxt); /* * The caller of ocfs2_xa_value_truncate() has already called * ocfs2_xa_journal_access on the loc. However, The truncate code * calls ocfs2_extend_trans(). This may commit the previous * transaction and open a new one. If this is a bucket, truncate * could leave only vb->vb_bh set up for journaling. Meanwhile, * the caller is expecting to dirty the entire bucket. So we must * reset the journal work. We do this even if truncate has failed, * as it could have failed after committing the extend. */ access_rc = ocfs2_xa_journal_access(ctxt->handle, loc, OCFS2_JOURNAL_ACCESS_WRITE); /* Errors in truncate take precedence */ return trunc_rc ? trunc_rc : access_rc; } static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc) { int index, count; struct ocfs2_xattr_header *xh = loc->xl_header; struct ocfs2_xattr_entry *entry = loc->xl_entry; ocfs2_xa_wipe_namevalue(loc); loc->xl_entry = NULL; le16_add_cpu(&xh->xh_count, -1); count = le16_to_cpu(xh->xh_count); /* * Only zero out the entry if there are more remaining. This is * important for an empty bucket, as it keeps track of the * bucket's hash value. It doesn't hurt empty block storage. */ if (count) { index = ((char *)entry - (char *)&xh->xh_entries) / sizeof(struct ocfs2_xattr_entry); memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1], (count - index) * sizeof(struct ocfs2_xattr_entry)); memset(&xh->xh_entries[count], 0, sizeof(struct ocfs2_xattr_entry)); } } /* * If we have a problem adjusting the size of an external value during * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr * in an intermediate state. For example, the value may be partially * truncated. * * If the value tree hasn't changed, the extend/truncate went nowhere. * We have nothing to do. The caller can treat it as a straight error. * * If the value tree got partially truncated, we now have a corrupted * extended attribute. We're going to wipe its entry and leak the * clusters. Better to leak some storage than leave a corrupt entry. * * If the value tree grew, it obviously didn't grow enough for the * new entry. We're not going to try and reclaim those clusters either. * If there was already an external value there (orig_clusters != 0), * the new clusters are attached safely and we can just leave the old * value in place. If there was no external value there, we remove * the entry. * * This way, the xattr block we store in the journal will be consistent. * If the size change broke because of the journal, no changes will hit * disk anyway. */ static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc, const char *what, unsigned int orig_clusters) { unsigned int new_clusters = ocfs2_xa_value_clusters(loc); char *nameval_buf = ocfs2_xa_offset_pointer(loc, le16_to_cpu(loc->xl_entry->xe_name_offset)); if (new_clusters < orig_clusters) { mlog(ML_ERROR, "Partial truncate while %s xattr %.*s. Leaking " "%u clusters and removing the entry\n", what, loc->xl_entry->xe_name_len, nameval_buf, orig_clusters - new_clusters); ocfs2_xa_remove_entry(loc); } else if (!orig_clusters) { mlog(ML_ERROR, "Unable to allocate an external value for xattr " "%.*s safely. Leaking %u clusters and removing the " "entry\n", loc->xl_entry->xe_name_len, nameval_buf, new_clusters - orig_clusters); ocfs2_xa_remove_entry(loc); } else if (new_clusters > orig_clusters) mlog(ML_ERROR, "Unable to grow xattr %.*s safely. %u new clusters " "have been added, but the value will not be " "modified\n", loc->xl_entry->xe_name_len, nameval_buf, new_clusters - orig_clusters); } static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_set_ctxt *ctxt) { int rc = 0; unsigned int orig_clusters; if (!ocfs2_xattr_is_local(loc->xl_entry)) { orig_clusters = ocfs2_xa_value_clusters(loc); rc = ocfs2_xa_value_truncate(loc, 0, ctxt); if (rc) { mlog_errno(rc); /* * Since this is remove, we can return 0 if * ocfs2_xa_cleanup_value_truncate() is going to * wipe the entry anyway. So we check the * cluster count as well. */ if (orig_clusters != ocfs2_xa_value_clusters(loc)) rc = 0; ocfs2_xa_cleanup_value_truncate(loc, "removing", orig_clusters); goto out; } } ocfs2_xa_remove_entry(loc); out: return rc; } static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc) { int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len); char *nameval_buf; nameval_buf = ocfs2_xa_offset_pointer(loc, le16_to_cpu(loc->xl_entry->xe_name_offset)); memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE); } /* * Take an existing entry and make it ready for the new value. This * won't allocate space, but it may free space. It should be ready for * ocfs2_xa_prepare_entry() to finish the work. */ static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt) { int rc = 0; int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); unsigned int orig_clusters; char *nameval_buf; int xe_local = ocfs2_xattr_is_local(loc->xl_entry); int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE; BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) != name_size); nameval_buf = ocfs2_xa_offset_pointer(loc, le16_to_cpu(loc->xl_entry->xe_name_offset)); if (xe_local) { memset(nameval_buf + name_size, 0, namevalue_size_xe(loc->xl_entry) - name_size); if (!xi_local) ocfs2_xa_install_value_root(loc); } else { orig_clusters = ocfs2_xa_value_clusters(loc); if (xi_local) { rc = ocfs2_xa_value_truncate(loc, 0, ctxt); if (rc < 0) mlog_errno(rc); else memset(nameval_buf + name_size, 0, namevalue_size_xe(loc->xl_entry) - name_size); } else if (le64_to_cpu(loc->xl_entry->xe_value_size) > xi->xi_value_len) { rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); if (rc < 0) mlog_errno(rc); } if (rc) { ocfs2_xa_cleanup_value_truncate(loc, "reusing", orig_clusters); goto out; } } loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len); ocfs2_xattr_set_local(loc->xl_entry, xi_local); out: return rc; } /* * Prepares loc->xl_entry to receive the new xattr. This includes * properly setting up the name+value pair region. If loc->xl_entry * already exists, it will take care of modifying it appropriately. * * Note that this modifies the data. You did journal_access already, * right? */ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, u32 name_hash, struct ocfs2_xattr_set_ctxt *ctxt) { int rc = 0; unsigned int orig_clusters; __le64 orig_value_size = 0; rc = ocfs2_xa_check_space(loc, xi); if (rc) goto out; if (loc->xl_entry) { if (ocfs2_xa_can_reuse_entry(loc, xi)) { orig_value_size = loc->xl_entry->xe_value_size; rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); if (rc) goto out; goto alloc_value; } if (!ocfs2_xattr_is_local(loc->xl_entry)) { orig_clusters = ocfs2_xa_value_clusters(loc); rc = ocfs2_xa_value_truncate(loc, 0, ctxt); if (rc) { mlog_errno(rc); ocfs2_xa_cleanup_value_truncate(loc, "overwriting", orig_clusters); goto out; } } ocfs2_xa_wipe_namevalue(loc); } else ocfs2_xa_add_entry(loc, name_hash); /* * If we get here, we have a blank entry. Fill it. We grow our * name+value pair back from the end. */ ocfs2_xa_add_namevalue(loc, xi); if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) ocfs2_xa_install_value_root(loc); alloc_value: if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { orig_clusters = ocfs2_xa_value_clusters(loc); rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); if (rc < 0) { ctxt->set_abort = 1; ocfs2_xa_cleanup_value_truncate(loc, "growing", orig_clusters); /* * If we were growing an existing value, * ocfs2_xa_cleanup_value_truncate() won't remove * the entry. We need to restore the original value * size. */ if (loc->xl_entry) { BUG_ON(!orig_value_size); loc->xl_entry->xe_value_size = orig_value_size; } mlog_errno(rc); } } out: return rc; } /* * Store the value portion of the name+value pair. This will skip * values that are stored externally. Their tree roots were set up * by ocfs2_xa_prepare_entry(). */ static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt) { int rc = 0; int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); char *nameval_buf; struct ocfs2_xattr_value_buf vb; nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset); if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { ocfs2_xa_fill_value_buf(loc, &vb); rc = __ocfs2_xattr_set_value_outside(loc->xl_inode, ctxt->handle, &vb, xi->xi_value, xi->xi_value_len); } else memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len); return rc; } static int ocfs2_xa_set(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name, xi->xi_name_len); ret = ocfs2_xa_journal_access(ctxt->handle, loc, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } /* * From here on out, everything is going to modify the buffer a * little. Errors are going to leave the xattr header in a * sane state. Thus, even with errors we dirty the sucker. */ /* Don't worry, we are never called with !xi_value and !xl_entry */ if (!xi->xi_value) { ret = ocfs2_xa_remove(loc, ctxt); goto out_dirty; } ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out_dirty; } ret = ocfs2_xa_store_value(loc, xi, ctxt); if (ret) mlog_errno(ret); out_dirty: ocfs2_xa_journal_dirty(ctxt->handle, loc); out: return ret; } static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc, struct inode *inode, struct buffer_head *bh, struct ocfs2_xattr_entry *entry) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL)); loc->xl_inode = inode; loc->xl_ops = &ocfs2_xa_block_loc_ops; loc->xl_storage = bh; loc->xl_entry = entry; loc->xl_size = le16_to_cpu(di->i_xattr_inline_size); loc->xl_header = (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size - loc->xl_size); } static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc, struct inode *inode, struct buffer_head *bh, struct ocfs2_xattr_entry *entry) { struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)bh->b_data; BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED); loc->xl_inode = inode; loc->xl_ops = &ocfs2_xa_block_loc_ops; loc->xl_storage = bh; loc->xl_header = &(xb->xb_attrs.xb_header); loc->xl_entry = entry; loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); } static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_bucket *bucket, struct ocfs2_xattr_entry *entry) { loc->xl_inode = bucket->bu_inode; loc->xl_ops = &ocfs2_xa_bucket_loc_ops; loc->xl_storage = bucket; loc->xl_header = bucket_xh(bucket); loc->xl_entry = entry; loc->xl_size = OCFS2_XATTR_BUCKET_SIZE; } /* * In xattr remove, if it is stored outside and refcounted, we may have * the chance to split the refcount tree. So need the allocators. */ static int ocfs2_lock_xattr_remove_allocators(struct inode *inode, struct ocfs2_xattr_value_root *xv, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_alloc_context **meta_ac, int *ref_credits) { int ret, meta_add = 0; u32 p_cluster, num_clusters; unsigned int ext_flags; *ref_credits = 0; ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster, &num_clusters, &xv->xr_list, &ext_flags); if (ret) { mlog_errno(ret); goto out; } if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) goto out; ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci, ref_root_bh, xv, &meta_add, ref_credits); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), meta_add, meta_ac); if (ret) mlog_errno(ret); out: return ret; } static int ocfs2_remove_value_outside(struct inode*inode, struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_header *header, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh) { int ret = 0, i, ref_credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; void *val; ocfs2_init_dealloc_ctxt(&ctxt.dealloc); for (i = 0; i < le16_to_cpu(header->xh_count); i++) { struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; if (ocfs2_xattr_is_local(entry)) continue; val = (void *)header + le16_to_cpu(entry->xe_name_offset); vb->vb_xv = (struct ocfs2_xattr_value_root *) (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv, ref_ci, ref_root_bh, &ctxt.meta_ac, &ref_credits); ctxt.handle = ocfs2_start_trans(osb, ref_credits + ocfs2_remove_extent_credits(osb->sb)); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); break; } ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); ocfs2_commit_trans(osb, ctxt.handle); if (ctxt.meta_ac) { ocfs2_free_alloc_context(ctxt.meta_ac); ctxt.meta_ac = NULL; } if (ret < 0) { mlog_errno(ret); break; } } if (ctxt.meta_ac) ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); return ret; } static int ocfs2_xattr_ibody_remove(struct inode *inode, struct buffer_head *di_bh, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_xattr_header *header; int ret; struct ocfs2_xattr_value_buf vb = { .vb_bh = di_bh, .vb_access = ocfs2_journal_access_di, }; header = (struct ocfs2_xattr_header *) ((void *)di + inode->i_sb->s_blocksize - le16_to_cpu(di->i_xattr_inline_size)); ret = ocfs2_remove_value_outside(inode, &vb, header, ref_ci, ref_root_bh); return ret; } struct ocfs2_rm_xattr_bucket_para { struct ocfs2_caching_info *ref_ci; struct buffer_head *ref_root_bh; }; static int ocfs2_xattr_block_remove(struct inode *inode, struct buffer_head *blk_bh, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh) { struct ocfs2_xattr_block *xb; int ret = 0; struct ocfs2_xattr_value_buf vb = { .vb_bh = blk_bh, .vb_access = ocfs2_journal_access_xb, }; struct ocfs2_rm_xattr_bucket_para args = { .ref_ci = ref_ci, .ref_root_bh = ref_root_bh, }; xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header); ret = ocfs2_remove_value_outside(inode, &vb, header, ref_ci, ref_root_bh); } else ret = ocfs2_iterate_xattr_index_block(inode, blk_bh, ocfs2_rm_xattr_cluster, &args); return ret; } static int ocfs2_xattr_free_block(struct inode *inode, u64 block, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh) { struct inode *xb_alloc_inode; struct buffer_head *xb_alloc_bh = NULL; struct buffer_head *blk_bh = NULL; struct ocfs2_xattr_block *xb; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle; int ret = 0; u64 blk, bg_blkno; u16 bit; ret = ocfs2_read_xattr_block(inode, block, &blk_bh); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh); if (ret < 0) { mlog_errno(ret); goto out; } xb = (struct ocfs2_xattr_block *)blk_bh->b_data; blk = le64_to_cpu(xb->xb_blkno); bit = le16_to_cpu(xb->xb_suballoc_bit); if (xb->xb_suballoc_loc) bg_blkno = le64_to_cpu(xb->xb_suballoc_loc); else bg_blkno = ocfs2_which_suballoc_group(blk, bit); xb_alloc_inode = ocfs2_get_system_file_inode(osb, EXTENT_ALLOC_SYSTEM_INODE, le16_to_cpu(xb->xb_suballoc_slot)); if (!xb_alloc_inode) { ret = -ENOMEM; mlog_errno(ret); goto out; } inode_lock(xb_alloc_inode); ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1); if (ret < 0) { mlog_errno(ret); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh, bit, bg_blkno, 1); if (ret < 0) mlog_errno(ret); ocfs2_commit_trans(osb, handle); out_unlock: ocfs2_inode_unlock(xb_alloc_inode, 1); brelse(xb_alloc_bh); out_mutex: inode_unlock(xb_alloc_inode); iput(xb_alloc_inode); out: brelse(blk_bh); return ret; } /* * ocfs2_xattr_remove() * * Free extended attribute resources associated with this inode. */ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_refcount_tree *ref_tree = NULL; struct buffer_head *ref_root_bh = NULL; struct ocfs2_caching_info *ref_ci = NULL; handle_t *handle; int ret; if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) return 0; if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) return 0; if (ocfs2_is_refcount_inode(inode)) { ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), le64_to_cpu(di->i_refcount_loc), 1, &ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } ref_ci = &ref_tree->rf_ci; } if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { ret = ocfs2_xattr_ibody_remove(inode, di_bh, ref_ci, ref_root_bh); if (ret < 0) { mlog_errno(ret); goto out; } } if (di->i_xattr_loc) { ret = ocfs2_xattr_free_block(inode, le64_to_cpu(di->i_xattr_loc), ref_ci, ref_root_bh); if (ret < 0) { mlog_errno(ret); goto out; } } handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } di->i_xattr_loc = 0; spin_lock(&oi->ip_lock); oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL); di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); ocfs2_update_inode_fsync_trans(handle, inode, 0); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: if (ref_tree) ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1); brelse(ref_root_bh); return ret; } static int ocfs2_xattr_has_space_inline(struct inode *inode, struct ocfs2_dinode *di) { struct ocfs2_inode_info *oi = OCFS2_I(inode); unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size; int free; if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE) return 0; if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { struct ocfs2_inline_data *idata = &di->id2.i_data; free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size); } else if (ocfs2_inode_is_fast_symlink(inode)) { free = ocfs2_fast_symlink_chars(inode->i_sb) - le64_to_cpu(di->i_size); } else { struct ocfs2_extent_list *el = &di->id2.i_list; free = (le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec)) * sizeof(struct ocfs2_extent_rec); } if (free >= xattrsize) return 1; return 0; } /* * ocfs2_xattr_ibody_find() * * Find extended attribute in inode block and * fill search info into struct ocfs2_xattr_search. */ static int ocfs2_xattr_ibody_find(struct inode *inode, int name_index, const char *name, struct ocfs2_xattr_search *xs) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; int ret; int has_space = 0; if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) return 0; if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { down_read(&oi->ip_alloc_sem); has_space = ocfs2_xattr_has_space_inline(inode, di); up_read(&oi->ip_alloc_sem); if (!has_space) return 0; } xs->xattr_bh = xs->inode_bh; xs->end = (void *)di + inode->i_sb->s_blocksize; if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) xs->header = (struct ocfs2_xattr_header *) (xs->end - le16_to_cpu(di->i_xattr_inline_size)); else xs->header = (struct ocfs2_xattr_header *) (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size); xs->base = (void *)xs->header; xs->here = xs->header->xh_entries; /* Find the named attribute. */ if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); if (ret && ret != -ENODATA) return ret; xs->not_found = ret; } return 0; } static int ocfs2_xattr_ibody_init(struct inode *inode, struct buffer_head *di_bh, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); unsigned int xattrsize = osb->s_xattr_inline_size; if (!ocfs2_xattr_has_space_inline(inode, di)) { ret = -ENOSPC; goto out; } ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } /* * Adjust extent record count or inline data size * to reserve space for extended attribute. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { struct ocfs2_inline_data *idata = &di->id2.i_data; le16_add_cpu(&idata->id_count, -xattrsize); } else if (!(ocfs2_inode_is_fast_symlink(inode))) { struct ocfs2_extent_list *el = &di->id2.i_list; le16_add_cpu(&el->l_count, -(xattrsize / sizeof(struct ocfs2_extent_rec))); } di->i_xattr_inline_size = cpu_to_le16(xattrsize); spin_lock(&oi->ip_lock); oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); ocfs2_journal_dirty(ctxt->handle, di_bh); out: return ret; } /* * ocfs2_xattr_ibody_set() * * Set, replace or remove an extended attribute into inode block. * */ static int ocfs2_xattr_ibody_set(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_xa_loc loc; if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) return -ENOSPC; down_write(&oi->ip_alloc_sem); if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } } ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh, xs->not_found ? NULL : xs->here); ret = ocfs2_xa_set(&loc, xi, ctxt); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } xs->here = loc.xl_entry; out: up_write(&oi->ip_alloc_sem); return ret; } /* * ocfs2_xattr_block_find() * * Find extended attribute in external block and * fill search info into struct ocfs2_xattr_search. */ static int ocfs2_xattr_block_find(struct inode *inode, int name_index, const char *name, struct ocfs2_xattr_search *xs) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; struct buffer_head *blk_bh = NULL; struct ocfs2_xattr_block *xb; int ret = 0; if (!di->i_xattr_loc) return ret; ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); if (ret < 0) { mlog_errno(ret); return ret; } xs->xattr_bh = blk_bh; xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { xs->header = &xb->xb_attrs.xb_header; xs->base = (void *)xs->header; xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size; xs->here = xs->header->xh_entries; ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); } else ret = ocfs2_xattr_index_block_find(inode, blk_bh, name_index, name, xs); if (ret && ret != -ENODATA) { xs->xattr_bh = NULL; goto cleanup; } xs->not_found = ret; return 0; cleanup: brelse(blk_bh); return ret; } static int ocfs2_create_xattr_block(struct inode *inode, struct buffer_head *inode_bh, struct ocfs2_xattr_set_ctxt *ctxt, int indexed, struct buffer_head **ret_bh) { int ret; u16 suballoc_bit_start; u32 num_got; u64 suballoc_loc, first_blkno; struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data; struct buffer_head *new_bh = NULL; struct ocfs2_xattr_block *xblk; ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), inode_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto end; } ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1, &suballoc_loc, &suballoc_bit_start, &num_got, &first_blkno); if (ret < 0) { mlog_errno(ret); goto end; } new_bh = sb_getblk(inode->i_sb, first_blkno); if (!new_bh) { ret = -ENOMEM; mlog_errno(ret); goto end; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto end; } /* Initialize ocfs2_xattr_block */ xblk = (struct ocfs2_xattr_block *)new_bh->b_data; memset(xblk, 0, inode->i_sb->s_blocksize); strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot); xblk->xb_suballoc_loc = cpu_to_le64(suballoc_loc); xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); xblk->xb_fs_generation = cpu_to_le32(OCFS2_SB(inode->i_sb)->fs_generation); xblk->xb_blkno = cpu_to_le64(first_blkno); if (indexed) { struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root; xr->xt_clusters = cpu_to_le32(1); xr->xt_last_eb_blk = 0; xr->xt_list.l_tree_depth = 0; xr->xt_list.l_count = cpu_to_le16( ocfs2_xattr_recs_per_xb(inode->i_sb)); xr->xt_list.l_next_free_rec = cpu_to_le16(1); xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED); } ocfs2_journal_dirty(ctxt->handle, new_bh); /* Add it to the inode */ di->i_xattr_loc = cpu_to_le64(first_blkno); spin_lock(&OCFS2_I(inode)->ip_lock); OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL; di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features); spin_unlock(&OCFS2_I(inode)->ip_lock); ocfs2_journal_dirty(ctxt->handle, inode_bh); *ret_bh = new_bh; new_bh = NULL; end: brelse(new_bh); return ret; } /* * ocfs2_xattr_block_set() * * Set, replace or remove an extended attribute into external block. * */ static int ocfs2_xattr_block_set(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { struct buffer_head *new_bh = NULL; struct ocfs2_xattr_block *xblk = NULL; int ret; struct ocfs2_xa_loc loc; if (!xs->xattr_bh) { ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt, 0, &new_bh); if (ret) { mlog_errno(ret); goto end; } xs->xattr_bh = new_bh; xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; xs->header = &xblk->xb_attrs.xb_header; xs->base = (void *)xs->header; xs->end = (void *)xblk + inode->i_sb->s_blocksize; xs->here = xs->header->xh_entries; } else xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) { ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh, xs->not_found ? NULL : xs->here); ret = ocfs2_xa_set(&loc, xi, ctxt); if (!ret) xs->here = loc.xl_entry; else if ((ret != -ENOSPC) || ctxt->set_abort) goto end; else { ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); if (ret) goto end; } } if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED) ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); end: return ret; } /* Check whether the new xattr can be inserted into the inode. */ static int ocfs2_xattr_can_be_in_inode(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs) { struct ocfs2_xattr_entry *last; int free, i; size_t min_offs = xs->end - xs->base; if (!xs->header) return 0; last = xs->header->xh_entries; for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) { size_t offs = le16_to_cpu(last->xe_name_offset); if (offs < min_offs) min_offs = offs; last += 1; } free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP; if (free < 0) return 0; BUG_ON(!xs->not_found); if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi))) return 1; return 0; } static int ocfs2_calc_xattr_set_need(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, int *clusters_need, int *meta_need, int *credits_need) { int ret = 0, old_in_xb = 0; int clusters_add = 0, meta_add = 0, credits = 0; struct buffer_head *bh = NULL; struct ocfs2_xattr_block *xb = NULL; struct ocfs2_xattr_entry *xe = NULL; struct ocfs2_xattr_value_root *xv = NULL; char *base = NULL; int name_offset, name_len = 0; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, xi->xi_value_len); u64 value_size; /* * Calculate the clusters we need to write. * No matter whether we replace an old one or add a new one, * we need this for writing. */ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) credits += new_clusters * ocfs2_clusters_to_blocks(inode->i_sb, 1); if (xis->not_found && xbs->not_found) { credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { clusters_add += new_clusters; credits += ocfs2_calc_extend_credits(inode->i_sb, &def_xv.xv.xr_list); } goto meta_guess; } if (!xis->not_found) { xe = xis->here; name_offset = le16_to_cpu(xe->xe_name_offset); name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); base = xis->base; credits += OCFS2_INODE_UPDATE_CREDITS; } else { int i, block_off = 0; xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; xe = xbs->here; name_offset = le16_to_cpu(xe->xe_name_offset); name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); i = xbs->here - xbs->header->xh_entries; old_in_xb = 1; if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(xbs->bucket), i, &block_off, &name_offset); base = bucket_block(xbs->bucket, block_off); credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); } else { base = xbs->base; credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS; } } /* * delete a xattr doesn't need metadata and cluster allocation. * so just calculate the credits and return. * * The credits for removing the value tree will be extended * by ocfs2_remove_extent itself. */ if (!xi->xi_value) { if (!ocfs2_xattr_is_local(xe)) credits += ocfs2_remove_extent_credits(inode->i_sb); goto out; } /* do cluster allocation guess first. */ value_size = le64_to_cpu(xe->xe_value_size); if (old_in_xb) { /* * In xattr set, we always try to set the xe in inode first, * so if it can be inserted into inode successfully, the old * one will be removed from the xattr block, and this xattr * will be inserted into inode as a new xattr in inode. */ if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) { clusters_add += new_clusters; credits += ocfs2_remove_extent_credits(inode->i_sb) + OCFS2_INODE_UPDATE_CREDITS; if (!ocfs2_xattr_is_local(xe)) credits += ocfs2_calc_extend_credits( inode->i_sb, &def_xv.xv.xr_list); goto out; } } if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { /* the new values will be stored outside. */ u32 old_clusters = 0; if (!ocfs2_xattr_is_local(xe)) { old_clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_size); xv = (struct ocfs2_xattr_value_root *) (base + name_offset + name_len); value_size = OCFS2_XATTR_ROOT_SIZE; } else xv = &def_xv.xv; if (old_clusters >= new_clusters) { credits += ocfs2_remove_extent_credits(inode->i_sb); goto out; } else { meta_add += ocfs2_extend_meta_needed(&xv->xr_list); clusters_add += new_clusters - old_clusters; credits += ocfs2_calc_extend_credits(inode->i_sb, &xv->xr_list); if (value_size >= OCFS2_XATTR_ROOT_SIZE) goto out; } } else { /* * Now the new value will be stored inside. So if the new * value is smaller than the size of value root or the old * value, we don't need any allocation, otherwise we have * to guess metadata allocation. */ if ((ocfs2_xattr_is_local(xe) && (value_size >= xi->xi_value_len)) || (!ocfs2_xattr_is_local(xe) && OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len)) goto out; } meta_guess: /* calculate metadata allocation. */ if (di->i_xattr_loc) { if (!xbs->xattr_bh) { ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), &bh); if (ret) { mlog_errno(ret); goto out; } xb = (struct ocfs2_xattr_block *)bh->b_data; } else xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; /* * If there is already an xattr tree, good, we can calculate * like other b-trees. Otherwise we may have the chance of * create a tree, the credit calculation is borrowed from * ocfs2_calc_extend_credits with root_el = NULL. And the * new tree will be cluster based, so no meta is needed. */ if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; meta_add += ocfs2_extend_meta_needed(el); credits += ocfs2_calc_extend_credits(inode->i_sb, el); } else credits += OCFS2_SUBALLOC_ALLOC + 1; /* * This cluster will be used either for new bucket or for * new xattr block. * If the cluster size is the same as the bucket size, one * more is needed since we may need to extend the bucket * also. */ clusters_add += 1; credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); if (OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(inode->i_sb)->s_clustersize) { credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); clusters_add += 1; } } else { credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { struct ocfs2_extent_list *el = &def_xv.xv.xr_list; meta_add += ocfs2_extend_meta_needed(el); credits += ocfs2_calc_extend_credits(inode->i_sb, el); } else { meta_add += 1; } } out: if (clusters_need) *clusters_need = clusters_add; if (meta_need) *meta_need = meta_add; if (credits_need) *credits_need = credits; brelse(bh); return ret; } static int ocfs2_init_xattr_set_ctxt(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_xattr_set_ctxt *ctxt, int extra_meta, int *credits) { int clusters_add, meta_add, ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt)); ocfs2_init_dealloc_ctxt(&ctxt->dealloc); ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs, &clusters_add, &meta_add, credits); if (ret) { mlog_errno(ret); return ret; } meta_add += extra_meta; trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add, clusters_add, *credits); if (meta_add) { ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, &ctxt->meta_ac); if (ret) { mlog_errno(ret); goto out; } } if (clusters_add) { ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac); if (ret) mlog_errno(ret); } out: if (ret) { if (ctxt->meta_ac) { ocfs2_free_alloc_context(ctxt->meta_ac); ctxt->meta_ac = NULL; } /* * We cannot have an error and a non null ctxt->data_ac. */ } return ret; } static int __ocfs2_xattr_set_handle(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_xattr_set_ctxt *ctxt) { int ret = 0, credits, old_found; if (!xi->xi_value) { /* Remove existing extended attribute */ if (!xis->not_found) ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); else if (!xbs->not_found) ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); } else { /* We always try to set extended attribute into inode first*/ ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); if (!ret && !xbs->not_found) { /* * If succeed and that extended attribute existing in * external block, then we will remove it. */ xi->xi_value = NULL; xi->xi_value_len = 0; old_found = xis->not_found; xis->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs, NULL, NULL, &credits); xis->not_found = old_found; if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_extend_trans(ctxt->handle, credits); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); } else if ((ret == -ENOSPC) && !ctxt->set_abort) { if (di->i_xattr_loc && !xbs->xattr_bh) { ret = ocfs2_xattr_block_find(inode, xi->xi_name_index, xi->xi_name, xbs); if (ret) goto out; old_found = xis->not_found; xis->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs, NULL, NULL, &credits); xis->not_found = old_found; if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_extend_trans(ctxt->handle, credits); if (ret) { mlog_errno(ret); goto out; } } /* * If no space in inode, we will set extended attribute * into external block. */ ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); if (ret) goto out; if (!xis->not_found) { /* * If succeed and that extended attribute * existing in inode, we will remove it. */ xi->xi_value = NULL; xi->xi_value_len = 0; xbs->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs, NULL, NULL, &credits); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_extend_trans(ctxt->handle, credits); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); } } } if (!ret) { /* Update inode ctime. */ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), xis->inode_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } inode_set_ctime_current(inode); di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode)); di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); ocfs2_journal_dirty(ctxt->handle, xis->inode_bh); } out: return ret; } /* * This function only called duing creating inode * for init security/acl xattrs of the new inode. * All transanction credits have been reserved in mknod. */ int ocfs2_xattr_set_handle(handle_t *handle, struct inode *inode, struct buffer_head *di_bh, int name_index, const char *name, const void *value, size_t value_len, int flags, struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *data_ac) { struct ocfs2_dinode *di; int ret; struct ocfs2_xattr_info xi = { .xi_name_index = name_index, .xi_name = name, .xi_name_len = strlen(name), .xi_value = value, .xi_value_len = value_len, }; struct ocfs2_xattr_search xis = { .not_found = -ENODATA, }; struct ocfs2_xattr_search xbs = { .not_found = -ENODATA, }; struct ocfs2_xattr_set_ctxt ctxt = { .handle = handle, .meta_ac = meta_ac, .data_ac = data_ac, }; if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; /* * In extreme situation, may need xattr bucket when * block size is too small. And we have already reserved * the credits for bucket in mknod. */ if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) { xbs.bucket = ocfs2_xattr_bucket_new(inode); if (!xbs.bucket) { mlog_errno(-ENOMEM); return -ENOMEM; } } xis.inode_bh = xbs.inode_bh = di_bh; di = (struct ocfs2_dinode *)di_bh->b_data; down_write(&OCFS2_I(inode)->ip_xattr_sem); ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis); if (ret) goto cleanup; if (xis.not_found) { ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs); if (ret) goto cleanup; } ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); cleanup: up_write(&OCFS2_I(inode)->ip_xattr_sem); brelse(xbs.xattr_bh); ocfs2_xattr_bucket_free(xbs.bucket); return ret; } /* * ocfs2_xattr_set() * * Set, replace or remove an extended attribute for this inode. * value is NULL to remove an existing extended attribute, else either * create or replace an extended attribute. */ int ocfs2_xattr_set(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; int ret, credits, had_lock, ref_meta = 0, ref_credits = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; struct ocfs2_refcount_tree *ref_tree = NULL; struct ocfs2_lock_holder oh; struct ocfs2_xattr_info xi = { .xi_name_index = name_index, .xi_name = name, .xi_name_len = strlen(name), .xi_value = value, .xi_value_len = value_len, }; struct ocfs2_xattr_search xis = { .not_found = -ENODATA, }; struct ocfs2_xattr_search xbs = { .not_found = -ENODATA, }; if (!ocfs2_supports_xattr(osb)) return -EOPNOTSUPP; /* * Only xbs will be used on indexed trees. xis doesn't need a * bucket. */ xbs.bucket = ocfs2_xattr_bucket_new(inode); if (!xbs.bucket) { mlog_errno(-ENOMEM); return -ENOMEM; } had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh); if (had_lock < 0) { ret = had_lock; mlog_errno(ret); goto cleanup_nolock; } xis.inode_bh = xbs.inode_bh = di_bh; di = (struct ocfs2_dinode *)di_bh->b_data; down_write(&OCFS2_I(inode)->ip_xattr_sem); /* * Scan inode and external block to find the same name * extended attribute and collect search information. */ ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis); if (ret) goto cleanup; if (xis.not_found) { ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs); if (ret) goto cleanup; } if (xis.not_found && xbs.not_found) { ret = -ENODATA; if (flags & XATTR_REPLACE) goto cleanup; ret = 0; if (!value) goto cleanup; } else { ret = -EEXIST; if (flags & XATTR_CREATE) goto cleanup; } /* Check whether the value is refcounted and do some preparation. */ if (ocfs2_is_refcount_inode(inode) && (!xis.not_found || !xbs.not_found)) { ret = ocfs2_prepare_refcount_xattr(inode, di, &xi, &xis, &xbs, &ref_tree, &ref_meta, &ref_credits); if (ret) { mlog_errno(ret); goto cleanup; } } inode_lock(tl_inode); if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { inode_unlock(tl_inode); mlog_errno(ret); goto cleanup; } } inode_unlock(tl_inode); ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis, &xbs, &ctxt, ref_meta, &credits); if (ret) { mlog_errno(ret); goto cleanup; } /* we need to update inode's ctime field, so add credit for it. */ credits += OCFS2_INODE_UPDATE_CREDITS; ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); goto out_free_ac; } ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0); ocfs2_commit_trans(osb, ctxt.handle); out_free_ac: if (ctxt.data_ac) ocfs2_free_alloc_context(ctxt.data_ac); if (ctxt.meta_ac) ocfs2_free_alloc_context(ctxt.meta_ac); if (ocfs2_dealloc_has_cluster(&ctxt.dealloc)) ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); cleanup: if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); up_write(&OCFS2_I(inode)->ip_xattr_sem); if (!value && !ret) { ret = ocfs2_try_remove_refcount_tree(inode, di_bh); if (ret) mlog_errno(ret); } ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); cleanup_nolock: brelse(di_bh); brelse(xbs.xattr_bh); ocfs2_xattr_bucket_free(xbs.bucket); return ret; } /* * Find the xattr extent rec which may contains name_hash. * e_cpos will be the first name hash of the xattr rec. * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list. */ static int ocfs2_xattr_get_rec(struct inode *inode, u32 name_hash, u64 *p_blkno, u32 *e_cpos, u32 *num_clusters, struct ocfs2_extent_list *el) { int ret = 0, i; struct buffer_head *eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_rec *rec = NULL; u64 e_blkno = 0; if (el->l_tree_depth) { ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ret = ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in xattr tree block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); goto out; } } for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { rec = &el->l_recs[i]; if (le32_to_cpu(rec->e_cpos) <= name_hash) { e_blkno = le64_to_cpu(rec->e_blkno); break; } } if (!e_blkno) { ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent record (%u, %u, 0) in xattr\n", inode->i_ino, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); goto out; } *p_blkno = le64_to_cpu(rec->e_blkno); *num_clusters = le16_to_cpu(rec->e_leaf_clusters); if (e_cpos) *e_cpos = le32_to_cpu(rec->e_cpos); out: brelse(eb_bh); return ret; } typedef int (xattr_bucket_func)(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para); static int ocfs2_find_xe_in_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, int name_index, const char *name, u32 name_hash, u16 *xe_index, int *found) { int i, ret = 0, cmp = 1, block_off, new_offset; struct ocfs2_xattr_header *xh = bucket_xh(bucket); size_t name_len = strlen(name); struct ocfs2_xattr_entry *xe = NULL; char *xe_name; /* * We don't use binary search in the bucket because there * may be multiple entries with the same name hash. */ for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (name_hash > le32_to_cpu(xe->xe_name_hash)) continue; else if (name_hash < le32_to_cpu(xe->xe_name_hash)) break; cmp = name_index - ocfs2_xattr_get_type(xe); if (!cmp) cmp = name_len - xe->xe_name_len; if (cmp) continue; ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh, i, &block_off, &new_offset); if (ret) { mlog_errno(ret); break; } xe_name = bucket_block(bucket, block_off) + new_offset; if (!memcmp(name, xe_name, name_len)) { *xe_index = i; *found = 1; ret = 0; break; } } return ret; } /* * Find the specified xattr entry in a series of buckets. * This series start from p_blkno and last for num_clusters. * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains * the num of the valid buckets. * * Return the buffer_head this xattr should reside in. And if the xattr's * hash is in the gap of 2 buckets, return the lower bucket. */ static int ocfs2_xattr_bucket_find(struct inode *inode, int name_index, const char *name, u32 name_hash, u64 p_blkno, u32 first_hash, u32 num_clusters, struct ocfs2_xattr_search *xs) { int ret, found = 0; struct ocfs2_xattr_header *xh = NULL; struct ocfs2_xattr_entry *xe = NULL; u16 index = 0; u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int low_bucket = 0, bucket, high_bucket; struct ocfs2_xattr_bucket *search; u64 blkno, lower_blkno = 0; search = ocfs2_xattr_bucket_new(inode); if (!search) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_read_xattr_bucket(search, p_blkno); if (ret) { mlog_errno(ret); goto out; } xh = bucket_xh(search); high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1; while (low_bucket <= high_bucket) { ocfs2_xattr_bucket_relse(search); bucket = (low_bucket + high_bucket) / 2; blkno = p_blkno + bucket * blk_per_bucket; ret = ocfs2_read_xattr_bucket(search, blkno); if (ret) { mlog_errno(ret); goto out; } xh = bucket_xh(search); xe = &xh->xh_entries[0]; if (name_hash < le32_to_cpu(xe->xe_name_hash)) { high_bucket = bucket - 1; continue; } /* * Check whether the hash of the last entry in our * bucket is larger than the search one. for an empty * bucket, the last one is also the first one. */ if (xh->xh_count) xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; /* record lower_blkno which may be the insert place. */ lower_blkno = blkno; if (name_hash > le32_to_cpu(xe->xe_name_hash)) { low_bucket = bucket + 1; continue; } /* the searched xattr should reside in this bucket if exists. */ ret = ocfs2_find_xe_in_bucket(inode, search, name_index, name, name_hash, &index, &found); if (ret) { mlog_errno(ret); goto out; } break; } /* * Record the bucket we have found. * When the xattr's hash value is in the gap of 2 buckets, we will * always set it to the previous bucket. */ if (!lower_blkno) lower_blkno = p_blkno; /* This should be in cache - we just read it during the search */ ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno); if (ret) { mlog_errno(ret); goto out; } xs->header = bucket_xh(xs->bucket); xs->base = bucket_block(xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; if (found) { xs->here = &xs->header->xh_entries[index]; trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno, name, name_index, name_hash, (unsigned long long)bucket_blkno(xs->bucket), index); } else ret = -ENODATA; out: ocfs2_xattr_bucket_free(search); return ret; } static int ocfs2_xattr_index_block_find(struct inode *inode, struct buffer_head *root_bh, int name_index, const char *name, struct ocfs2_xattr_search *xs) { int ret; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)root_bh->b_data; struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root; struct ocfs2_extent_list *el = &xb_root->xt_list; u64 p_blkno = 0; u32 first_hash, num_clusters = 0; u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name)); if (le16_to_cpu(el->l_next_free_rec) == 0) return -ENODATA; trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno, name, name_index, name_hash, (unsigned long long)root_bh->b_blocknr, -1); ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash, &num_clusters, el); if (ret) { mlog_errno(ret); goto out; } BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash); trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno, name, name_index, first_hash, (unsigned long long)p_blkno, num_clusters); ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash, p_blkno, first_hash, num_clusters, xs); out: return ret; } static int ocfs2_iterate_xattr_buckets(struct inode *inode, u64 blkno, u32 clusters, xattr_bucket_func *func, void *para) { int i, ret = 0; u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)); u32 num_buckets = clusters * bpc; struct ocfs2_xattr_bucket *bucket; bucket = ocfs2_xattr_bucket_new(inode); if (!bucket) { mlog_errno(-ENOMEM); return -ENOMEM; } trace_ocfs2_iterate_xattr_buckets( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)blkno, clusters); for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { ret = ocfs2_read_xattr_bucket(bucket, blkno); if (ret) { mlog_errno(ret); break; } /* * The real bucket num in this series of blocks is stored * in the 1st bucket. */ if (i == 0) num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno, le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); if (func) { ret = func(inode, bucket, para); if (ret && ret != -ERANGE) mlog_errno(ret); /* Fall through to bucket_relse() */ } ocfs2_xattr_bucket_relse(bucket); if (ret) break; } ocfs2_xattr_bucket_free(bucket); return ret; } struct ocfs2_xattr_tree_list { char *buffer; size_t buffer_size; size_t result; }; static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, struct ocfs2_xattr_header *xh, int index, int *block_off, int *new_offset) { u16 name_offset; if (index < 0 || index >= le16_to_cpu(xh->xh_count)) return -EINVAL; name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset); *block_off = name_offset >> sb->s_blocksize_bits; *new_offset = name_offset % sb->s_blocksize; return 0; } static int ocfs2_list_xattr_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) { int ret = 0, type; struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para; int i, block_off, new_offset; const char *name; for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) { struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i]; type = ocfs2_xattr_get_type(entry); ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(bucket), i, &block_off, &new_offset); if (ret) break; name = (const char *)bucket_block(bucket, block_off) + new_offset; ret = ocfs2_xattr_list_entry(inode->i_sb, xl->buffer, xl->buffer_size, &xl->result, type, name, entry->xe_name_len); if (ret) break; } return ret; } static int ocfs2_iterate_xattr_index_block(struct inode *inode, struct buffer_head *blk_bh, xattr_tree_rec_func *rec_func, void *para) { struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)blk_bh->b_data; struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; int ret = 0; u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0; u64 p_blkno = 0; if (!el->l_next_free_rec || !rec_func) return 0; while (name_hash > 0) { ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos, &num_clusters, el); if (ret) { mlog_errno(ret); break; } ret = rec_func(inode, blk_bh, p_blkno, e_cpos, num_clusters, para); if (ret) { if (ret != -ERANGE) mlog_errno(ret); break; } if (e_cpos == 0) break; name_hash = e_cpos - 1; } return ret; } static int ocfs2_list_xattr_tree_rec(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, u32 len, void *para) { return ocfs2_iterate_xattr_buckets(inode, blkno, len, ocfs2_list_xattr_bucket, para); } static int ocfs2_xattr_tree_list_index_block(struct inode *inode, struct buffer_head *blk_bh, char *buffer, size_t buffer_size) { int ret; struct ocfs2_xattr_tree_list xl = { .buffer = buffer, .buffer_size = buffer_size, .result = 0, }; ret = ocfs2_iterate_xattr_index_block(inode, blk_bh, ocfs2_list_xattr_tree_rec, &xl); if (ret) { mlog_errno(ret); goto out; } ret = xl.result; out: return ret; } static int cmp_xe(const void *a, const void *b) { const struct ocfs2_xattr_entry *l = a, *r = b; u32 l_hash = le32_to_cpu(l->xe_name_hash); u32 r_hash = le32_to_cpu(r->xe_name_hash); if (l_hash > r_hash) return 1; if (l_hash < r_hash) return -1; return 0; } /* * When the ocfs2_xattr_block is filled up, new bucket will be created * and all the xattr entries will be moved to the new bucket. * The header goes at the start of the bucket, and the names+values are * filled from the end. This is why *target starts as the last buffer. * Note: we need to sort the entries since they are not saved in order * in the ocfs2_xattr_block. */ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, struct buffer_head *xb_bh, struct ocfs2_xattr_bucket *bucket) { int i, blocksize = inode->i_sb->s_blocksize; int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u16 offset, size, off_change; struct ocfs2_xattr_entry *xe; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header; struct ocfs2_xattr_header *xh = bucket_xh(bucket); u16 count = le16_to_cpu(xb_xh->xh_count); char *src = xb_bh->b_data; char *target = bucket_block(bucket, blks - 1); trace_ocfs2_cp_xattr_block_to_bucket_begin( (unsigned long long)xb_bh->b_blocknr, (unsigned long long)bucket_blkno(bucket)); for (i = 0; i < blks; i++) memset(bucket_block(bucket, i), 0, blocksize); /* * Since the xe_name_offset is based on ocfs2_xattr_header, * there is a offset change corresponding to the change of * ocfs2_xattr_header's position. */ off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); xe = &xb_xh->xh_entries[count - 1]; offset = le16_to_cpu(xe->xe_name_offset) + off_change; size = blocksize - offset; /* copy all the names and values. */ memcpy(target + offset, src + offset, size); /* Init new header now. */ xh->xh_count = xb_xh->xh_count; xh->xh_num_buckets = cpu_to_le16(1); xh->xh_name_value_len = cpu_to_le16(size); xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size); /* copy all the entries. */ target = bucket_block(bucket, 0); offset = offsetof(struct ocfs2_xattr_header, xh_entries); size = count * sizeof(struct ocfs2_xattr_entry); memcpy(target + offset, (char *)xb_xh + offset, size); /* Change the xe offset for all the xe because of the move. */ off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize + offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); for (i = 0; i < count; i++) le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change); trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change); sort(target + offset, count, sizeof(struct ocfs2_xattr_entry), cmp_xe, NULL); } /* * After we move xattr from block to index btree, we have to * update ocfs2_xattr_search to the new xe and base. * * When the entry is in xattr block, xattr_bh indicates the storage place. * While if the entry is in index b-tree, "bucket" indicates the * real place of the xattr. */ static void ocfs2_xattr_update_xattr_search(struct inode *inode, struct ocfs2_xattr_search *xs, struct buffer_head *old_bh) { char *buf = old_bh->b_data; struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf; struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header; int i; xs->header = bucket_xh(xs->bucket); xs->base = bucket_block(xs->bucket, 0); xs->end = xs->base + inode->i_sb->s_blocksize; if (xs->not_found) return; i = xs->here - old_xh->xh_entries; xs->here = &xs->header->xh_entries[i]; } static int ocfs2_xattr_create_index_block(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u32 bit_off, len; u64 blkno; handle_t *handle = ctxt->handle; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct buffer_head *xb_bh = xs->xattr_bh; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_tree_root *xr; u16 xb_flags = le16_to_cpu(xb->xb_flags); trace_ocfs2_xattr_create_index_block_begin( (unsigned long long)xb_bh->b_blocknr); BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); BUG_ON(!xs->bucket); /* * XXX: * We can use this lock for now, and maybe move to a dedicated mutex * if performance becomes a problem later. */ down_write(&oi->ip_alloc_sem); ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1, 1, &bit_off, &len); if (ret) { mlog_errno(ret); goto out; } /* * The bucket may spread in many blocks, and * we will only touch the 1st block and the last block * in the whole bucket(one for entry and one for data). */ blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); trace_ocfs2_xattr_create_index_block((unsigned long long)blkno); ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out; } ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket); ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); ocfs2_xattr_update_xattr_search(inode, xs, xb_bh); /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */ memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize - offsetof(struct ocfs2_xattr_block, xb_attrs)); xr = &xb->xb_attrs.xb_root; xr->xt_clusters = cpu_to_le32(1); xr->xt_last_eb_blk = 0; xr->xt_list.l_tree_depth = 0; xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb)); xr->xt_list.l_next_free_rec = cpu_to_le16(1); xr->xt_list.l_recs[0].e_cpos = 0; xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno); xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED); ocfs2_journal_dirty(handle, xb_bh); out: up_write(&oi->ip_alloc_sem); return ret; } static int cmp_xe_offset(const void *a, const void *b) { const struct ocfs2_xattr_entry *l = a, *r = b; u32 l_name_offset = le16_to_cpu(l->xe_name_offset); u32 r_name_offset = le16_to_cpu(r->xe_name_offset); if (l_name_offset < r_name_offset) return 1; if (l_name_offset > r_name_offset) return -1; return 0; } /* * defrag a xattr bucket if we find that the bucket has some * holes between name/value pairs. * We will move all the name/value pairs to the end of the bucket * so that we can spare some space for insertion. */ static int ocfs2_defrag_xattr_bucket(struct inode *inode, handle_t *handle, struct ocfs2_xattr_bucket *bucket) { int ret, i; size_t end, offset, len; struct ocfs2_xattr_header *xh; char *entries, *buf, *bucket_buf = NULL; u64 blkno = bucket_blkno(bucket); u16 xh_free_start; size_t blocksize = inode->i_sb->s_blocksize; struct ocfs2_xattr_entry *xe; /* * In order to make the operation more efficient and generic, * we copy all the blocks into a contiguous memory and do the * defragment there, so if anything is error, we will not touch * the real block. */ bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS); if (!bucket_buf) { ret = -EIO; goto out; } buf = bucket_buf; for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) memcpy(buf, bucket_block(bucket, i), blocksize); ret = ocfs2_xattr_bucket_journal_access(handle, bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; } xh = (struct ocfs2_xattr_header *)bucket_buf; entries = (char *)xh->xh_entries; xh_free_start = le16_to_cpu(xh->xh_free_start); trace_ocfs2_defrag_xattr_bucket( (unsigned long long)blkno, le16_to_cpu(xh->xh_count), xh_free_start, le16_to_cpu(xh->xh_name_value_len)); /* * sort all the entries by their offset. * the largest will be the first, so that we can * move them to the end one by one. */ sort(entries, le16_to_cpu(xh->xh_count), sizeof(struct ocfs2_xattr_entry), cmp_xe_offset, NULL); /* Move all name/values to the end of the bucket. */ xe = xh->xh_entries; end = OCFS2_XATTR_BUCKET_SIZE; for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) { offset = le16_to_cpu(xe->xe_name_offset); len = namevalue_size_xe(xe); /* * We must make sure that the name/value pair * exist in the same block. So adjust end to * the previous block end if needed. */ if (((end - len) / blocksize != (end - 1) / blocksize)) end = end - end % blocksize; if (end > offset + len) { memmove(bucket_buf + end - len, bucket_buf + offset, len); xe->xe_name_offset = cpu_to_le16(end - len); } mlog_bug_on_msg(end < offset + len, "Defrag check failed for " "bucket %llu\n", (unsigned long long)blkno); end -= len; } mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for " "bucket %llu\n", (unsigned long long)blkno); if (xh_free_start == end) goto out; memset(bucket_buf + xh_free_start, 0, end - xh_free_start); xh->xh_free_start = cpu_to_le16(end); /* sort the entries by their name_hash. */ sort(entries, le16_to_cpu(xh->xh_count), sizeof(struct ocfs2_xattr_entry), cmp_xe, NULL); buf = bucket_buf; for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) memcpy(bucket_block(bucket, i), buf, blocksize); ocfs2_xattr_bucket_journal_dirty(handle, bucket); out: kfree(bucket_buf); return ret; } /* * prev_blkno points to the start of an existing extent. new_blkno * points to a newly allocated extent. Because we know each of our * clusters contains more than bucket, we can easily split one cluster * at a bucket boundary. So we take the last cluster of the existing * extent and split it down the middle. We move the last half of the * buckets in the last cluster of the existing extent over to the new * extent. * * first_bh is the buffer at prev_blkno so we can update the existing * extent's bucket count. header_bh is the bucket were we were hoping * to insert our xattr. If the bucket move places the target in the new * extent, we'll update first_bh and header_bh after modifying the old * extent. * * first_hash will be set as the 1st xe's name_hash in the new extent. */ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, handle_t *handle, struct ocfs2_xattr_bucket *first, struct ocfs2_xattr_bucket *target, u64 new_blkno, u32 num_clusters, u32 *first_hash) { int ret; struct super_block *sb = inode->i_sb; int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb); int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb)); int to_move = num_buckets / 2; u64 src_blkno; u64 last_cluster_blkno = bucket_blkno(first) + ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1)); BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets); BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize); trace_ocfs2_mv_xattr_bucket_cross_cluster( (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno); ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), last_cluster_blkno, new_blkno, to_move, first_hash); if (ret) { mlog_errno(ret); goto out; } /* This is the first bucket that got moved */ src_blkno = last_cluster_blkno + (to_move * blks_per_bucket); /* * If the target bucket was part of the moved buckets, we need to * update first and target. */ if (bucket_blkno(target) >= src_blkno) { /* Find the block for the new target bucket */ src_blkno = new_blkno + (bucket_blkno(target) - src_blkno); ocfs2_xattr_bucket_relse(first); ocfs2_xattr_bucket_relse(target); /* * These shouldn't fail - the buffers are in the * journal from ocfs2_cp_xattr_bucket(). */ ret = ocfs2_read_xattr_bucket(first, new_blkno); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_xattr_bucket(target, src_blkno); if (ret) mlog_errno(ret); } out: return ret; } /* * Find the suitable pos when we divide a bucket into 2. * We have to make sure the xattrs with the same hash value exist * in the same bucket. * * If this ocfs2_xattr_header covers more than one hash value, find a * place where the hash value changes. Try to find the most even split. * The most common case is that all entries have different hash values, * and the first check we make will find a place to split. */ static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh) { struct ocfs2_xattr_entry *entries = xh->xh_entries; int count = le16_to_cpu(xh->xh_count); int delta, middle = count / 2; /* * We start at the middle. Each step gets farther away in both * directions. We therefore hit the change in hash value * nearest to the middle. Note that this loop does not execute for * count < 2. */ for (delta = 0; delta < middle; delta++) { /* Let's check delta earlier than middle */ if (cmp_xe(&entries[middle - delta - 1], &entries[middle - delta])) return middle - delta; /* For even counts, don't walk off the end */ if ((middle + delta + 1) == count) continue; /* Now try delta past middle */ if (cmp_xe(&entries[middle + delta], &entries[middle + delta + 1])) return middle + delta + 1; } /* Every entry had the same hash */ return count; } /* * Move some xattrs in old bucket(blk) to new bucket(new_blk). * first_hash will record the 1st hash of the new bucket. * * Normally half of the xattrs will be moved. But we have to make * sure that the xattrs with the same hash value are stored in the * same bucket. If all the xattrs in this bucket have the same hash * value, the new bucket will be initialized as an empty one and the * first_hash will be initialized as (hash_value+1). */ static int ocfs2_divide_xattr_bucket(struct inode *inode, handle_t *handle, u64 blk, u64 new_blk, u32 *first_hash, int new_bucket_head) { int ret, i; int count, start, len, name_value_len = 0, name_offset = 0; struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; int blocksize = inode->i_sb->s_blocksize; trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk, (unsigned long long)new_blk); s_bucket = ocfs2_xattr_bucket_new(inode); t_bucket = ocfs2_xattr_bucket_new(inode); if (!s_bucket || !t_bucket) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_read_xattr_bucket(s_bucket, blk); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } /* * Even if !new_bucket_head, we're overwriting t_bucket. Thus, * there's no need to read it. */ ret = ocfs2_init_xattr_bucket(t_bucket, new_blk, new_bucket_head); if (ret) { mlog_errno(ret); goto out; } /* * Hey, if we're overwriting t_bucket, what difference does * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the * same part of ocfs2_cp_xattr_bucket(). */ ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket, new_bucket_head ? OCFS2_JOURNAL_ACCESS_CREATE : OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } xh = bucket_xh(s_bucket); count = le16_to_cpu(xh->xh_count); start = ocfs2_xattr_find_divide_pos(xh); if (start == count) { xe = &xh->xh_entries[start-1]; /* * initialized a new empty bucket here. * The hash value is set as one larger than * that of the last entry in the previous bucket. */ for (i = 0; i < t_bucket->bu_blocks; i++) memset(bucket_block(t_bucket, i), 0, blocksize); xh = bucket_xh(t_bucket); xh->xh_free_start = cpu_to_le16(blocksize); xh->xh_entries[0].xe_name_hash = xe->xe_name_hash; le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1); goto set_num_buckets; } /* copy the whole bucket to the new first. */ ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket); /* update the new bucket. */ xh = bucket_xh(t_bucket); /* * Calculate the total name/value len and xh_free_start for * the old bucket first. */ name_offset = OCFS2_XATTR_BUCKET_SIZE; name_value_len = 0; for (i = 0; i < start; i++) { xe = &xh->xh_entries[i]; name_value_len += namevalue_size_xe(xe); if (le16_to_cpu(xe->xe_name_offset) < name_offset) name_offset = le16_to_cpu(xe->xe_name_offset); } /* * Now begin the modification to the new bucket. * * In the new bucket, We just move the xattr entry to the beginning * and don't touch the name/value. So there will be some holes in the * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is * called. */ xe = &xh->xh_entries[start]; len = sizeof(struct ocfs2_xattr_entry) * (count - start); trace_ocfs2_divide_xattr_bucket_move(len, (int)((char *)xe - (char *)xh), (int)((char *)xh->xh_entries - (char *)xh)); memmove((char *)xh->xh_entries, (char *)xe, len); xe = &xh->xh_entries[count - start]; len = sizeof(struct ocfs2_xattr_entry) * start; memset((char *)xe, 0, len); le16_add_cpu(&xh->xh_count, -start); le16_add_cpu(&xh->xh_name_value_len, -name_value_len); /* Calculate xh_free_start for the new bucket. */ xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (le16_to_cpu(xe->xe_name_offset) < le16_to_cpu(xh->xh_free_start)) xh->xh_free_start = xe->xe_name_offset; } set_num_buckets: /* set xh->xh_num_buckets for the new xh. */ if (new_bucket_head) xh->xh_num_buckets = cpu_to_le16(1); else xh->xh_num_buckets = 0; ocfs2_xattr_bucket_journal_dirty(handle, t_bucket); /* store the first_hash of the new bucket. */ if (first_hash) *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); /* * Now only update the 1st block of the old bucket. If we * just added a new empty bucket, there is no need to modify * it. */ if (start == count) goto out; xh = bucket_xh(s_bucket); memset(&xh->xh_entries[start], 0, sizeof(struct ocfs2_xattr_entry) * (count - start)); xh->xh_count = cpu_to_le16(start); xh->xh_free_start = cpu_to_le16(name_offset); xh->xh_name_value_len = cpu_to_le16(name_value_len); ocfs2_xattr_bucket_journal_dirty(handle, s_bucket); out: ocfs2_xattr_bucket_free(s_bucket); ocfs2_xattr_bucket_free(t_bucket); return ret; } /* * Copy xattr from one bucket to another bucket. * * The caller must make sure that the journal transaction * has enough space for journaling. */ static int ocfs2_cp_xattr_bucket(struct inode *inode, handle_t *handle, u64 s_blkno, u64 t_blkno, int t_is_new) { int ret; struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; BUG_ON(s_blkno == t_blkno); trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno, (unsigned long long)t_blkno, t_is_new); s_bucket = ocfs2_xattr_bucket_new(inode); t_bucket = ocfs2_xattr_bucket_new(inode); if (!s_bucket || !t_bucket) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno); if (ret) goto out; /* * Even if !t_is_new, we're overwriting t_bucket. Thus, * there's no need to read it. */ ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno, t_is_new); if (ret) goto out; /* * Hey, if we're overwriting t_bucket, what difference does * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new * cluster to fill, we came here from * ocfs2_mv_xattr_buckets(), and it is really new - * ACCESS_CREATE is required. But we also might have moved data * out of t_bucket before extending back into it. * ocfs2_add_new_xattr_bucket() can do this - its call to * ocfs2_add_new_xattr_cluster() may have created a new extent * and copied out the end of the old extent. Then it re-extends * the old extent back to create space for new xattrs. That's * how we get here, and the bucket isn't really new. */ ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket, t_is_new ? OCFS2_JOURNAL_ACCESS_CREATE : OCFS2_JOURNAL_ACCESS_WRITE); if (ret) goto out; ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket); ocfs2_xattr_bucket_journal_dirty(handle, t_bucket); out: ocfs2_xattr_bucket_free(t_bucket); ocfs2_xattr_bucket_free(s_bucket); return ret; } /* * src_blk points to the start of an existing extent. last_blk points to * last cluster in that extent. to_blk points to a newly allocated * extent. We copy the buckets from the cluster at last_blk to the new * extent. If start_bucket is non-zero, we skip that many buckets before * we start copying. The new extent's xh_num_buckets gets set to the * number of buckets we copied. The old extent's xh_num_buckets shrinks * by the same amount. */ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, u64 src_blk, u64 last_blk, u64 to_blk, unsigned int start_bucket, u32 *first_hash) { int i, ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); struct ocfs2_xattr_bucket *old_first, *new_first; trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk, (unsigned long long)to_blk); BUG_ON(start_bucket >= num_buckets); if (start_bucket) { num_buckets -= start_bucket; last_blk += (start_bucket * blks_per_bucket); } /* The first bucket of the original extent */ old_first = ocfs2_xattr_bucket_new(inode); /* The first bucket of the new extent */ new_first = ocfs2_xattr_bucket_new(inode); if (!old_first || !new_first) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_read_xattr_bucket(old_first, src_blk); if (ret) { mlog_errno(ret); goto out; } /* * We need to update the first bucket of the old extent and all * the buckets going to the new extent. */ credits = ((num_buckets + 1) * blks_per_bucket); ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_bucket_journal_access(handle, old_first, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } for (i = 0; i < num_buckets; i++) { ret = ocfs2_cp_xattr_bucket(inode, handle, last_blk + (i * blks_per_bucket), to_blk + (i * blks_per_bucket), 1); if (ret) { mlog_errno(ret); goto out; } } /* * Get the new bucket ready before we dirty anything * (This actually shouldn't fail, because we already dirtied * it once in ocfs2_cp_xattr_bucket()). */ ret = ocfs2_read_xattr_bucket(new_first, to_blk); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_bucket_journal_access(handle, new_first, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } /* Now update the headers */ le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets); ocfs2_xattr_bucket_journal_dirty(handle, old_first); bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets); ocfs2_xattr_bucket_journal_dirty(handle, new_first); if (first_hash) *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash); out: ocfs2_xattr_bucket_free(new_first); ocfs2_xattr_bucket_free(old_first); return ret; } /* * Move some xattrs in this cluster to the new cluster. * This function should only be called when bucket size == cluster size. * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead. */ static int ocfs2_divide_xattr_cluster(struct inode *inode, handle_t *handle, u64 prev_blk, u64 new_blk, u32 *first_hash) { u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int ret, credits = 2 * blk_per_bucket; BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize); ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); return ret; } /* Move half of the xattr in start_blk to the next bucket. */ return ocfs2_divide_xattr_bucket(inode, handle, prev_blk, new_blk, first_hash, 1); } /* * Move some xattrs from the old cluster to the new one since they are not * contiguous in ocfs2 xattr tree. * * new_blk starts a new separate cluster, and we will move some xattrs from * prev_blk to it. v_start will be set as the first name hash value in this * new cluster so that it can be used as e_cpos during tree insertion and * don't collide with our original b-tree operations. first_bh and header_bh * will also be updated since they will be used in ocfs2_extend_xattr_bucket * to extend the insert bucket. * * The problem is how much xattr should we move to the new one and when should * we update first_bh and header_bh? * 1. If cluster size > bucket size, that means the previous cluster has more * than 1 bucket, so just move half nums of bucket into the new cluster and * update the first_bh and header_bh if the insert bucket has been moved * to the new cluster. * 2. If cluster_size == bucket_size: * a) If the previous extent rec has more than one cluster and the insert * place isn't in the last cluster, copy the entire last cluster to the * new one. This time, we don't need to update the first_bh and header_bh * since they will not be moved into the new cluster. * b) Otherwise, move the bottom half of the xattrs in the last cluster into * the new one. And we set the extend flag to zero if the insert place is * moved into the new allocated cluster since no extend is needed. */ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, handle_t *handle, struct ocfs2_xattr_bucket *first, struct ocfs2_xattr_bucket *target, u64 new_blk, u32 prev_clusters, u32 *v_start, int *extend) { int ret; trace_ocfs2_adjust_xattr_cross_cluster( (unsigned long long)bucket_blkno(first), (unsigned long long)new_blk, prev_clusters); if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, handle, first, target, new_blk, prev_clusters, v_start); if (ret) mlog_errno(ret); } else { /* The start of the last cluster in the first extent */ u64 last_blk = bucket_blkno(first) + ((prev_clusters - 1) * ocfs2_clusters_to_blocks(inode->i_sb, 1)); if (prev_clusters > 1 && bucket_blkno(target) != last_blk) { ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), last_blk, new_blk, 0, v_start); if (ret) mlog_errno(ret); } else { ret = ocfs2_divide_xattr_cluster(inode, handle, last_blk, new_blk, v_start); if (ret) mlog_errno(ret); if ((bucket_blkno(target) == last_blk) && extend) *extend = 0; } } return ret; } /* * Add a new cluster for xattr storage. * * If the new cluster is contiguous with the previous one, it will be * appended to the same extent record, and num_clusters will be updated. * If not, we will insert a new extent for it and move some xattrs in * the last cluster into the new allocated one. * We also need to limit the maximum size of a btree leaf, otherwise we'll * lose the benefits of hashing because we'll have to search large leaves. * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize, * if it's bigger). * * first_bh is the first block of the previous extent rec and header_bh * indicates the bucket we will insert the new xattrs. They will be updated * when the header_bh is moved into the new cluster. */ static int ocfs2_add_new_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, struct ocfs2_xattr_bucket *first, struct ocfs2_xattr_bucket *target, u32 *num_clusters, u32 prev_cpos, int *extend, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); u32 prev_clusters = *num_clusters; u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0; u64 block; handle_t *handle = ctxt->handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; trace_ocfs2_add_new_xattr_cluster_begin( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)bucket_blkno(first), prev_cpos, prev_clusters); ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto leave; } ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1, clusters_to_add, &bit_off, &num_bits); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); goto leave; } BUG_ON(num_bits > clusters_to_add); block = ocfs2_clusters_to_blocks(osb->sb, bit_off); trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits); if (bucket_blkno(first) + (prev_clusters * bpc) == block && (prev_clusters + num_bits) << osb->s_clustersize_bits <= OCFS2_MAX_XATTR_TREE_LEAF_SIZE) { /* * If this cluster is contiguous with the old one and * adding this new cluster, we don't surpass the limit of * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be * initialized and used like other buckets in the previous * cluster. * So add it as a contiguous one. The caller will handle * its init process. */ v_start = prev_cpos + prev_clusters; *num_clusters = prev_clusters + num_bits; } else { ret = ocfs2_adjust_xattr_cross_cluster(inode, handle, first, target, block, prev_clusters, &v_start, extend); if (ret) { mlog_errno(ret); goto leave; } } trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block, v_start, num_bits); ret = ocfs2_insert_extent(handle, &et, v_start, block, num_bits, 0, ctxt->meta_ac); if (ret < 0) { mlog_errno(ret); goto leave; } ocfs2_journal_dirty(handle, root_bh); leave: return ret; } /* * We are given an extent. 'first' is the bucket at the very front of * the extent. The extent has space for an additional bucket past * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number * of the target bucket. We wish to shift every bucket past the target * down one, filling in that additional space. When we get back to the * target, we split the target between itself and the now-empty bucket * at target+1 (aka, target_blkno + blks_per_bucket). */ static int ocfs2_extend_xattr_bucket(struct inode *inode, handle_t *handle, struct ocfs2_xattr_bucket *first, u64 target_blk, u32 num_clusters) { int ret, credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u64 end_blk; u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets); trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk, (unsigned long long)bucket_blkno(first), num_clusters, new_bucket); /* The extent must have room for an additional bucket */ BUG_ON(new_bucket >= (num_clusters * ocfs2_xattr_buckets_per_cluster(osb))); /* end_blk points to the last existing bucket */ end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket); /* * end_blk is the start of the last existing bucket. * Thus, (end_blk - target_blk) covers the target bucket and * every bucket after it up to, but not including, the last * existing bucket. Then we add the last existing bucket, the * new bucket, and the first bucket (3 * blk_per_bucket). */ credits = (end_blk - target_blk) + (3 * blk_per_bucket); ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_bucket_journal_access(handle, first, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } while (end_blk != target_blk) { ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk, end_blk + blk_per_bucket, 0); if (ret) goto out; end_blk -= blk_per_bucket; } /* Move half of the xattr in target_blkno to the next bucket. */ ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk, target_blk + blk_per_bucket, NULL, 0); le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1); ocfs2_xattr_bucket_journal_dirty(handle, first); out: return ret; } /* * Add new xattr bucket in an extent record and adjust the buckets * accordingly. xb_bh is the ocfs2_xattr_block, and target is the * bucket we want to insert into. * * In the easy case, we will move all the buckets after target down by * one. Half of target's xattrs will be moved to the next bucket. * * If current cluster is full, we'll allocate a new one. This may not * be contiguous. The underlying calls will make sure that there is * space for the insert, shifting buckets around if necessary. * 'target' may be moved by those calls. */ static int ocfs2_add_new_xattr_bucket(struct inode *inode, struct buffer_head *xb_bh, struct ocfs2_xattr_bucket *target, struct ocfs2_xattr_set_ctxt *ctxt) { struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)xb_bh->b_data; struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root; struct ocfs2_extent_list *el = &xb_root->xt_list; u32 name_hash = le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int ret, num_buckets, extend = 1; u64 p_blkno; u32 e_cpos, num_clusters; /* The bucket at the front of the extent */ struct ocfs2_xattr_bucket *first; trace_ocfs2_add_new_xattr_bucket( (unsigned long long)bucket_blkno(target)); /* The first bucket of the original extent */ first = ocfs2_xattr_bucket_new(inode); if (!first) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos, &num_clusters, el); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_xattr_bucket(first, p_blkno); if (ret) { mlog_errno(ret); goto out; } num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters; if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) { /* * This can move first+target if the target bucket moves * to the new extent. */ ret = ocfs2_add_new_xattr_cluster(inode, xb_bh, first, target, &num_clusters, e_cpos, &extend, ctxt); if (ret) { mlog_errno(ret); goto out; } } if (extend) { ret = ocfs2_extend_xattr_bucket(inode, ctxt->handle, first, bucket_blkno(target), num_clusters); if (ret) mlog_errno(ret); } out: ocfs2_xattr_bucket_free(first); return ret; } /* * Truncate the specified xe_off entry in xattr bucket. * bucket is indicated by header_bh and len is the new length. * Both the ocfs2_xattr_value_root and the entry will be updated here. * * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed. */ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, struct ocfs2_xattr_bucket *bucket, int xe_off, int len, struct ocfs2_xattr_set_ctxt *ctxt) { int ret, offset; u64 value_blk; struct ocfs2_xattr_entry *xe; struct ocfs2_xattr_header *xh = bucket_xh(bucket); size_t blocksize = inode->i_sb->s_blocksize; struct ocfs2_xattr_value_buf vb = { .vb_access = ocfs2_journal_access, }; xe = &xh->xh_entries[xe_off]; BUG_ON(!xe || ocfs2_xattr_is_local(xe)); offset = le16_to_cpu(xe->xe_name_offset) + OCFS2_XATTR_SIZE(xe->xe_name_len); value_blk = offset / blocksize; /* We don't allow ocfs2_xattr_value to be stored in different block. */ BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize); vb.vb_bh = bucket->bu_bhs[value_blk]; BUG_ON(!vb.vb_bh); vb.vb_xv = (struct ocfs2_xattr_value_root *) (vb.vb_bh->b_data + offset % blocksize); /* * From here on out we have to dirty the bucket. The generic * value calls only modify one of the bucket's bhs, but we need * to send the bucket at once. So if they error, they *could* have * modified something. We have to assume they did, and dirty * the whole bucket. This leaves us in a consistent state. */ trace_ocfs2_xattr_bucket_value_truncate( (unsigned long long)bucket_blkno(bucket), xe_off, len); ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } xe->xe_value_size = cpu_to_le64(len); ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); out: return ret; } static int ocfs2_rm_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, u32 len, void *para) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; handle_t *handle; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)root_bh->b_data; struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree et; ret = ocfs2_iterate_xattr_buckets(inode, blkno, len, ocfs2_delete_xattr_in_bucket, para); if (ret) { mlog_errno(ret); return ret; } ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); ocfs2_init_dealloc_ctxt(&dealloc); trace_ocfs2_rm_xattr_cluster( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)blkno, cpos, len); ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno, len); ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); if (ret) { mlog_errno(ret); return ret; } inode_lock(tl_inode); if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); goto out; } } handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb)); if (IS_ERR(handle)) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac, &dealloc); if (ret) { mlog_errno(ret); goto out_commit; } le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len); ocfs2_journal_dirty(handle, root_bh); ret = ocfs2_truncate_log_append(osb, handle, blkno, len); if (ret) mlog_errno(ret); ocfs2_update_inode_fsync_trans(handle, inode, 0); out_commit: ocfs2_commit_trans(osb, handle); out: ocfs2_schedule_truncate_log_flush(osb, 1); inode_unlock(tl_inode); if (meta_ac) ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); return ret; } /* * check whether the xattr bucket is filled up with the same hash value. * If we want to insert the xattr with the same hash, return -ENOSPC. * If we want to insert a xattr with different hash value, go ahead * and ocfs2_divide_xattr_bucket will handle this. */ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, struct ocfs2_xattr_bucket *bucket, const char *name) { struct ocfs2_xattr_header *xh = bucket_xh(bucket); u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name)); if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash)) return 0; if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash == xh->xh_entries[0].xe_name_hash) { mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, " "hash = %u\n", (unsigned long long)bucket_blkno(bucket), le32_to_cpu(xh->xh_entries[0].xe_name_hash)); return -ENOSPC; } return 0; } /* * Try to set the entry in the current bucket. If we fail, the caller * will handle getting us another bucket. */ static int ocfs2_xattr_set_entry_bucket(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; struct ocfs2_xa_loc loc; trace_ocfs2_xattr_set_entry_bucket(xi->xi_name); ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, xs->not_found ? NULL : xs->here); ret = ocfs2_xa_set(&loc, xi, ctxt); if (!ret) { xs->here = loc.xl_entry; goto out; } if (ret != -ENOSPC) { mlog_errno(ret); goto out; } /* Ok, we need space. Let's try defragmenting the bucket. */ ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, xs->bucket); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_xa_set(&loc, xi, ctxt); if (!ret) { xs->here = loc.xl_entry; goto out; } if (ret != -ENOSPC) mlog_errno(ret); out: return ret; } static int ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; trace_ocfs2_xattr_set_entry_index_block(xi->xi_name); ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); if (!ret) goto out; if (ret != -ENOSPC) { mlog_errno(ret); goto out; } /* Ack, need more space. Let's try to get another bucket! */ /* * We do not allow for overlapping ranges between buckets. And * the maximum number of collisions we will allow for then is * one bucket's worth, so check it here whether we need to * add a new bucket for the insert. */ ret = ocfs2_check_xattr_bucket_collision(inode, xs->bucket, xi->xi_name); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_add_new_xattr_bucket(inode, xs->xattr_bh, xs->bucket, ctxt); if (ret) { mlog_errno(ret); goto out; } /* * ocfs2_add_new_xattr_bucket() will have updated * xs->bucket if it moved, but it will not have updated * any of the other search fields. Thus, we drop it and * re-search. Everything should be cached, so it'll be * quick. */ ocfs2_xattr_bucket_relse(xs->bucket); ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, xi->xi_name_index, xi->xi_name, xs); if (ret && ret != -ENODATA) goto out; xs->not_found = ret; /* Ok, we have a new bucket, let's try again */ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); if (ret && (ret != -ENOSPC)) mlog_errno(ret); out: return ret; } static int ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) { int ret = 0, ref_credits; struct ocfs2_xattr_header *xh = bucket_xh(bucket); u16 i; struct ocfs2_xattr_entry *xe; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,}; int credits = ocfs2_remove_extent_credits(osb->sb) + ocfs2_blocks_per_xattr_bucket(inode->i_sb); struct ocfs2_xattr_value_root *xv; struct ocfs2_rm_xattr_bucket_para *args = (struct ocfs2_rm_xattr_bucket_para *)para; ocfs2_init_dealloc_ctxt(&ctxt.dealloc); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (ocfs2_xattr_is_local(xe)) continue; ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i, &xv, NULL); if (ret) { mlog_errno(ret); break; } ret = ocfs2_lock_xattr_remove_allocators(inode, xv, args->ref_ci, args->ref_root_bh, &ctxt.meta_ac, &ref_credits); ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); break; } ret = ocfs2_xattr_bucket_value_truncate(inode, bucket, i, 0, &ctxt); ocfs2_commit_trans(osb, ctxt.handle); if (ctxt.meta_ac) { ocfs2_free_alloc_context(ctxt.meta_ac); ctxt.meta_ac = NULL; } if (ret) { mlog_errno(ret); break; } } if (ctxt.meta_ac) ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); return ret; } /* * Whenever we modify a xattr value root in the bucket(e.g, CoW * or change the extent record flag), we need to recalculate * the metaecc for the whole bucket. So it is done here. * * Note: * We have to give the extra credits for the caller. */ static int ocfs2_xattr_bucket_post_refcount(struct inode *inode, handle_t *handle, void *para) { int ret; struct ocfs2_xattr_bucket *bucket = (struct ocfs2_xattr_bucket *)para; ret = ocfs2_xattr_bucket_journal_access(handle, bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); return ret; } ocfs2_xattr_bucket_journal_dirty(handle, bucket); return 0; } /* * Special action we need if the xattr value is refcounted. * * 1. If the xattr is refcounted, lock the tree. * 2. CoW the xattr if we are setting the new value and the value * will be stored outside. * 3. In other case, decrease_refcount will work for us, so just * lock the refcount tree, calculate the meta and credits is OK. * * We have to do CoW before ocfs2_init_xattr_set_ctxt since * currently CoW is a completed transaction, while this function * will also lock the allocators and let us deadlock. So we will * CoW the whole xattr value. */ static int ocfs2_prepare_refcount_xattr(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_refcount_tree **ref_tree, int *meta_add, int *credits) { int ret = 0; struct ocfs2_xattr_block *xb; struct ocfs2_xattr_entry *xe; char *base; u32 p_cluster, num_clusters; unsigned int ext_flags; int name_offset, name_len; struct ocfs2_xattr_value_buf vb; struct ocfs2_xattr_bucket *bucket = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_post_refcount refcount; struct ocfs2_post_refcount *p = NULL; struct buffer_head *ref_root_bh = NULL; if (!xis->not_found) { xe = xis->here; name_offset = le16_to_cpu(xe->xe_name_offset); name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); base = xis->base; vb.vb_bh = xis->inode_bh; vb.vb_access = ocfs2_journal_access_di; } else { int i, block_off = 0; xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; xe = xbs->here; name_offset = le16_to_cpu(xe->xe_name_offset); name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); i = xbs->here - xbs->header->xh_entries; if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(xbs->bucket), i, &block_off, &name_offset); if (ret) { mlog_errno(ret); goto out; } base = bucket_block(xbs->bucket, block_off); vb.vb_bh = xbs->bucket->bu_bhs[block_off]; vb.vb_access = ocfs2_journal_access; if (ocfs2_meta_ecc(osb)) { /*create parameters for ocfs2_post_refcount. */ bucket = xbs->bucket; refcount.credits = bucket->bu_blocks; refcount.para = bucket; refcount.func = ocfs2_xattr_bucket_post_refcount; p = &refcount; } } else { base = xbs->base; vb.vb_bh = xbs->xattr_bh; vb.vb_access = ocfs2_journal_access_xb; } } if (ocfs2_xattr_is_local(xe)) goto out; vb.vb_xv = (struct ocfs2_xattr_value_root *) (base + name_offset + name_len); ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster, &num_clusters, &vb.vb_xv->xr_list, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* * We just need to check the 1st extent record, since we always * CoW the whole xattr. So there shouldn't be a xattr with * some REFCOUNT extent recs after the 1st one. */ if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) goto out; ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 1, ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } /* * If we are deleting the xattr or the new size will be stored inside, * cool, leave it there, the xattr truncate process will remove them * for us(it still needs the refcount tree lock and the meta, credits). * And the worse case is that every cluster truncate will split the * refcount tree, and make the original extent become 3. So we will need * 2 * cluster more extent recs at most. */ if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) { ret = ocfs2_refcounted_xattr_delete_need(inode, &(*ref_tree)->rf_ci, ref_root_bh, vb.vb_xv, meta_add, credits); if (ret) mlog_errno(ret); goto out; } ret = ocfs2_refcount_cow_xattr(inode, di, &vb, *ref_tree, ref_root_bh, 0, le32_to_cpu(vb.vb_xv->xr_clusters), p); if (ret) mlog_errno(ret); out: brelse(ref_root_bh); return ret; } /* * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root. * The physical clusters will be added to refcount tree. */ static int ocfs2_xattr_value_attach_refcount(struct inode *inode, struct ocfs2_xattr_value_root *xv, struct ocfs2_extent_tree *value_et, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_post_refcount *refcount) { int ret = 0; u32 clusters = le32_to_cpu(xv->xr_clusters); u32 cpos, p_cluster, num_clusters; struct ocfs2_extent_list *el = &xv->xr_list; unsigned int ext_flags; cpos = 0; while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, &num_clusters, el, &ext_flags); if (ret) { mlog_errno(ret); break; } cpos += num_clusters; if ((ext_flags & OCFS2_EXT_REFCOUNTED)) continue; BUG_ON(!p_cluster); ret = ocfs2_add_refcount_flag(inode, value_et, ref_ci, ref_root_bh, cpos - num_clusters, p_cluster, num_clusters, dealloc, refcount); if (ret) { mlog_errno(ret); break; } } return ret; } /* * Given a normal ocfs2_xattr_header, refcount all the entries which * have value stored outside. * Used for xattrs stored in inode and ocfs2_xattr_block. */ static int ocfs2_xattr_attach_refcount_normal(struct inode *inode, struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_header *header, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_cached_dealloc_ctxt *dealloc) { struct ocfs2_xattr_entry *xe; struct ocfs2_xattr_value_root *xv; struct ocfs2_extent_tree et; int i, ret = 0; for (i = 0; i < le16_to_cpu(header->xh_count); i++) { xe = &header->xh_entries[i]; if (ocfs2_xattr_is_local(xe)) continue; xv = (struct ocfs2_xattr_value_root *)((void *)header + le16_to_cpu(xe->xe_name_offset) + OCFS2_XATTR_SIZE(xe->xe_name_len)); vb->vb_xv = xv; ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et, ref_ci, ref_root_bh, dealloc, NULL); if (ret) { mlog_errno(ret); break; } } return ret; } static int ocfs2_xattr_inline_attach_refcount(struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_cached_dealloc_ctxt *dealloc) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *) (fe_bh->b_data + inode->i_sb->s_blocksize - le16_to_cpu(di->i_xattr_inline_size)); struct ocfs2_xattr_value_buf vb = { .vb_bh = fe_bh, .vb_access = ocfs2_journal_access_di, }; return ocfs2_xattr_attach_refcount_normal(inode, &vb, header, ref_ci, ref_root_bh, dealloc); } struct ocfs2_xattr_tree_value_refcount_para { struct ocfs2_caching_info *ref_ci; struct buffer_head *ref_root_bh; struct ocfs2_cached_dealloc_ctxt *dealloc; }; static int ocfs2_get_xattr_tree_value_root(struct super_block *sb, struct ocfs2_xattr_bucket *bucket, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **bh) { int ret, block_off, name_offset; struct ocfs2_xattr_header *xh = bucket_xh(bucket); struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset]; void *base; ret = ocfs2_xattr_bucket_get_name_value(sb, bucket_xh(bucket), offset, &block_off, &name_offset); if (ret) { mlog_errno(ret); goto out; } base = bucket_block(bucket, block_off); *xv = (struct ocfs2_xattr_value_root *)(base + name_offset + OCFS2_XATTR_SIZE(xe->xe_name_len)); if (bh) *bh = bucket->bu_bhs[block_off]; out: return ret; } /* * For a given xattr bucket, refcount all the entries which * have value stored outside. */ static int ocfs2_xattr_bucket_value_refcount(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) { int i, ret = 0; struct ocfs2_extent_tree et; struct ocfs2_xattr_tree_value_refcount_para *ref = (struct ocfs2_xattr_tree_value_refcount_para *)para; struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data; struct ocfs2_xattr_entry *xe; struct ocfs2_xattr_value_buf vb = { .vb_access = ocfs2_journal_access, }; struct ocfs2_post_refcount refcount = { .credits = bucket->bu_blocks, .para = bucket, .func = ocfs2_xattr_bucket_post_refcount, }; struct ocfs2_post_refcount *p = NULL; /* We only need post_refcount if we support metaecc. */ if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb))) p = &refcount; trace_ocfs2_xattr_bucket_value_refcount( (unsigned long long)bucket_blkno(bucket), le16_to_cpu(xh->xh_count)); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (ocfs2_xattr_is_local(xe)) continue; ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i, &vb.vb_xv, &vb.vb_bh); if (ret) { mlog_errno(ret); break; } ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), &vb); ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv, &et, ref->ref_ci, ref->ref_root_bh, ref->dealloc, p); if (ret) { mlog_errno(ret); break; } } return ret; } static int ocfs2_refcount_xattr_tree_rec(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, u32 len, void *para) { return ocfs2_iterate_xattr_buckets(inode, blkno, len, ocfs2_xattr_bucket_value_refcount, para); } static int ocfs2_xattr_block_attach_refcount(struct inode *inode, struct buffer_head *blk_bh, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header; struct ocfs2_xattr_value_buf vb = { .vb_bh = blk_bh, .vb_access = ocfs2_journal_access_xb, }; ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header, ref_ci, ref_root_bh, dealloc); } else { struct ocfs2_xattr_tree_value_refcount_para para = { .ref_ci = ref_ci, .ref_root_bh = ref_root_bh, .dealloc = dealloc, }; ret = ocfs2_iterate_xattr_index_block(inode, blk_bh, ocfs2_refcount_xattr_tree_rec, &para); } return ret; } int ocfs2_xattr_attach_refcount_tree(struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; struct buffer_head *blk_bh = NULL; if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh, ref_ci, ref_root_bh, dealloc); if (ret) { mlog_errno(ret); goto out; } } if (!di->i_xattr_loc) goto out; ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci, ref_root_bh, dealloc); if (ret) mlog_errno(ret); brelse(blk_bh); out: return ret; } typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe); /* * Store the information we need in xattr reflink. * old_bh and new_bh are inode bh for the old and new inode. */ struct ocfs2_xattr_reflink { struct inode *old_inode; struct inode *new_inode; struct buffer_head *old_bh; struct buffer_head *new_bh; struct ocfs2_caching_info *ref_ci; struct buffer_head *ref_root_bh; struct ocfs2_cached_dealloc_ctxt *dealloc; should_xattr_reflinked *xattr_reflinked; }; /* * Given a xattr header and xe offset, * return the proper xv and the corresponding bh. * xattr in inode, block and xattr tree have different implementations. */ typedef int (get_xattr_value_root)(struct super_block *sb, struct buffer_head *bh, struct ocfs2_xattr_header *xh, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **ret_bh, void *para); /* * Calculate all the xattr value root metadata stored in this xattr header and * credits we need if we create them from the scratch. * We use get_xattr_value_root so that all types of xattr container can use it. */ static int ocfs2_value_metas_in_xattr_header(struct super_block *sb, struct buffer_head *bh, struct ocfs2_xattr_header *xh, int *metas, int *credits, int *num_recs, get_xattr_value_root *func, void *para) { int i, ret = 0; struct ocfs2_xattr_value_root *xv; struct ocfs2_xattr_entry *xe; for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; if (ocfs2_xattr_is_local(xe)) continue; ret = func(sb, bh, xh, i, &xv, NULL, para); if (ret) { mlog_errno(ret); break; } *metas += le16_to_cpu(xv->xr_list.l_tree_depth) * le16_to_cpu(xv->xr_list.l_next_free_rec); *credits += ocfs2_calc_extend_credits(sb, &def_xv.xv.xr_list); /* * If the value is a tree with depth > 1, We don't go deep * to the extent block, so just calculate a maximum record num. */ if (!xv->xr_list.l_tree_depth) *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec); else *num_recs += ocfs2_clusters_for_bytes(sb, XATTR_SIZE_MAX); } return ret; } /* Used by xattr inode and block to return the right xv and buffer_head. */ static int ocfs2_get_xattr_value_root(struct super_block *sb, struct buffer_head *bh, struct ocfs2_xattr_header *xh, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **ret_bh, void *para) { struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset]; *xv = (struct ocfs2_xattr_value_root *)((void *)xh + le16_to_cpu(xe->xe_name_offset) + OCFS2_XATTR_SIZE(xe->xe_name_len)); if (ret_bh) *ret_bh = bh; return 0; } /* * Lock the meta_ac and calculate how much credits we need for reflink xattrs. * It is only used for inline xattr and xattr block. */ static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb, struct ocfs2_xattr_header *xh, struct buffer_head *ref_root_bh, int *credits, struct ocfs2_alloc_context **meta_ac) { int ret, meta_add = 0, num_recs = 0; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; *credits = 0; ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh, &meta_add, credits, &num_recs, ocfs2_get_xattr_value_root, NULL); if (ret) { mlog_errno(ret); goto out; } /* * We need to add/modify num_recs in refcount tree, so just calculate * an approximate number we need for refcount tree change. * Sometimes we need to split the tree, and after split, half recs * will be moved to the new block, and a new block can only provide * half number of recs. So we multiple new blocks by 2. */ num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2; meta_add += num_recs; *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; else *credits += 1; ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac); if (ret) mlog_errno(ret); out: return ret; } /* * Given a xattr header, reflink all the xattrs in this container. * It can be used for inode, block and bucket. * * NOTE: * Before we call this function, the caller has memcpy the xattr in * old_xh to the new_xh. * * If args.xattr_reflinked is set, call it to decide whether the xe should * be reflinked or not. If not, remove it from the new xattr header. */ static int ocfs2_reflink_xattr_header(handle_t *handle, struct ocfs2_xattr_reflink *args, struct buffer_head *old_bh, struct ocfs2_xattr_header *xh, struct buffer_head *new_bh, struct ocfs2_xattr_header *new_xh, struct ocfs2_xattr_value_buf *vb, struct ocfs2_alloc_context *meta_ac, get_xattr_value_root *func, void *para) { int ret = 0, i, j; struct super_block *sb = args->old_inode->i_sb; struct buffer_head *value_bh; struct ocfs2_xattr_entry *xe, *last; struct ocfs2_xattr_value_root *xv, *new_xv; struct ocfs2_extent_tree data_et; u32 clusters, cpos, p_cluster, num_clusters; unsigned int ext_flags = 0; trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count)); last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)]; for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) { xe = &xh->xh_entries[i]; if (args->xattr_reflinked && !args->xattr_reflinked(xe)) { xe = &new_xh->xh_entries[j]; le16_add_cpu(&new_xh->xh_count, -1); if (new_xh->xh_count) { memmove(xe, xe + 1, (void *)last - (void *)xe); memset(last, 0, sizeof(struct ocfs2_xattr_entry)); } /* * We don't want j to increase in the next round since * it is already moved ahead. */ j--; continue; } if (ocfs2_xattr_is_local(xe)) continue; ret = func(sb, old_bh, xh, i, &xv, NULL, para); if (ret) { mlog_errno(ret); break; } ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para); if (ret) { mlog_errno(ret); break; } /* * For the xattr which has l_tree_depth = 0, all the extent * recs have already be copied to the new xh with the * propriate OCFS2_EXT_REFCOUNTED flag we just need to * increase the refount count int the refcount tree. * * For the xattr which has l_tree_depth > 0, we need * to initialize it to the empty default value root, * and then insert the extents one by one. */ if (xv->xr_list.l_tree_depth) { memcpy(new_xv, &def_xv, OCFS2_XATTR_ROOT_SIZE); vb->vb_xv = new_xv; vb->vb_bh = value_bh; ocfs2_init_xattr_value_extent_tree(&data_et, INODE_CACHE(args->new_inode), vb); } clusters = le32_to_cpu(xv->xr_clusters); cpos = 0; while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(args->old_inode, cpos, &p_cluster, &num_clusters, &xv->xr_list, &ext_flags); if (ret) { mlog_errno(ret); goto out; } BUG_ON(!p_cluster); if (xv->xr_list.l_tree_depth) { ret = ocfs2_insert_extent(handle, &data_et, cpos, ocfs2_clusters_to_blocks( args->old_inode->i_sb, p_cluster), num_clusters, ext_flags, meta_ac); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_increase_refcount(handle, args->ref_ci, args->ref_root_bh, p_cluster, num_clusters, meta_ac, args->dealloc); if (ret) { mlog_errno(ret); goto out; } cpos += num_clusters; } } out: return ret; } static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args) { int ret = 0, credits = 0; handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data; int inline_size = le16_to_cpu(di->i_xattr_inline_size); int header_off = osb->sb->s_blocksize - inline_size; struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *) (args->old_bh->b_data + header_off); struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *) (args->new_bh->b_data + header_off); struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_inode_info *new_oi; struct ocfs2_dinode *new_di; struct ocfs2_xattr_value_buf vb = { .vb_bh = args->new_bh, .vb_access = ocfs2_journal_access_di, }; ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh, &credits, &meta_ac); if (ret) { mlog_errno(ret); goto out; } handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode), args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } memcpy(args->new_bh->b_data + header_off, args->old_bh->b_data + header_off, inline_size); new_di = (struct ocfs2_dinode *)args->new_bh->b_data; new_di->i_xattr_inline_size = cpu_to_le16(inline_size); ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh, args->new_bh, new_xh, &vb, meta_ac, ocfs2_get_xattr_value_root, NULL); if (ret) { mlog_errno(ret); goto out_commit; } new_oi = OCFS2_I(args->new_inode); spin_lock(&new_oi->ip_lock); new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); spin_unlock(&new_oi->ip_lock); ocfs2_journal_dirty(handle, args->new_bh); out_commit: ocfs2_commit_trans(osb, handle); out: if (meta_ac) ocfs2_free_alloc_context(meta_ac); return ret; } static int ocfs2_create_empty_xattr_block(struct inode *inode, struct buffer_head *fe_bh, struct buffer_head **ret_bh, int indexed) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_xattr_set_ctxt ctxt; memset(&ctxt, 0, sizeof(ctxt)); ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac); if (ret < 0) { mlog_errno(ret); return ret; } ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); goto out; } trace_ocfs2_create_empty_xattr_block( (unsigned long long)fe_bh->b_blocknr, indexed); ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, ret_bh); if (ret) mlog_errno(ret); ocfs2_commit_trans(osb, ctxt.handle); out: ocfs2_free_alloc_context(ctxt.meta_ac); return ret; } static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args, struct buffer_head *blk_bh, struct buffer_head *new_blk_bh) { int ret = 0, credits = 0; handle_t *handle; struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode); struct ocfs2_dinode *new_di; struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb); int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)blk_bh->b_data; struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header; struct ocfs2_xattr_block *new_xb = (struct ocfs2_xattr_block *)new_blk_bh->b_data; struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header; struct ocfs2_alloc_context *meta_ac; struct ocfs2_xattr_value_buf vb = { .vb_bh = new_blk_bh, .vb_access = ocfs2_journal_access_xb, }; ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh, &credits, &meta_ac); if (ret) { mlog_errno(ret); return ret; } /* One more credits in case we need to add xattr flags in new inode. */ handle = ocfs2_start_trans(osb, credits + 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) { ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode), args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } } ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode), new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off, osb->sb->s_blocksize - header_off); ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh, new_blk_bh, new_xh, &vb, meta_ac, ocfs2_get_xattr_value_root, NULL); if (ret) { mlog_errno(ret); goto out_commit; } ocfs2_journal_dirty(handle, new_blk_bh); if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) { new_di = (struct ocfs2_dinode *)args->new_bh->b_data; spin_lock(&new_oi->ip_lock); new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL; new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); spin_unlock(&new_oi->ip_lock); ocfs2_journal_dirty(handle, args->new_bh); } out_commit: ocfs2_commit_trans(osb, handle); out: ocfs2_free_alloc_context(meta_ac); return ret; } struct ocfs2_reflink_xattr_tree_args { struct ocfs2_xattr_reflink *reflink; struct buffer_head *old_blk_bh; struct buffer_head *new_blk_bh; struct ocfs2_xattr_bucket *old_bucket; struct ocfs2_xattr_bucket *new_bucket; }; /* * NOTE: * We have to handle the case that both old bucket and new bucket * will call this function to get the right ret_bh. * So The caller must give us the right bh. */ static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb, struct buffer_head *bh, struct ocfs2_xattr_header *xh, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **ret_bh, void *para) { struct ocfs2_reflink_xattr_tree_args *args = (struct ocfs2_reflink_xattr_tree_args *)para; struct ocfs2_xattr_bucket *bucket; if (bh == args->old_bucket->bu_bhs[0]) bucket = args->old_bucket; else bucket = args->new_bucket; return ocfs2_get_xattr_tree_value_root(sb, bucket, offset, xv, ret_bh); } struct ocfs2_value_tree_metas { int num_metas; int credits; int num_recs; }; static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb, struct buffer_head *bh, struct ocfs2_xattr_header *xh, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **ret_bh, void *para) { struct ocfs2_xattr_bucket *bucket = (struct ocfs2_xattr_bucket *)para; return ocfs2_get_xattr_tree_value_root(sb, bucket, offset, xv, ret_bh); } static int ocfs2_calc_value_tree_metas(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) { struct ocfs2_value_tree_metas *metas = (struct ocfs2_value_tree_metas *)para; struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data; /* Add the credits for this bucket first. */ metas->credits += bucket->bu_blocks; return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0], xh, &metas->num_metas, &metas->credits, &metas->num_recs, ocfs2_value_tree_metas_in_bucket, bucket); } /* * Given a xattr extent rec starting from blkno and having len clusters, * iterate all the buckets calculate how much metadata we need for reflinking * all the ocfs2_xattr_value_root and lock the allocators accordingly. */ static int ocfs2_lock_reflink_xattr_rec_allocators( struct ocfs2_reflink_xattr_tree_args *args, struct ocfs2_extent_tree *xt_et, u64 blkno, u32 len, int *credits, struct ocfs2_alloc_context **meta_ac, struct ocfs2_alloc_context **data_ac) { int ret, num_free_extents; struct ocfs2_value_tree_metas metas; struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb); struct ocfs2_refcount_block *rb; memset(&metas, 0, sizeof(metas)); ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len, ocfs2_calc_value_tree_metas, &metas); if (ret) { mlog_errno(ret); goto out; } *credits = metas.credits; /* * Calculate we need for refcount tree change. * * We need to add/modify num_recs in refcount tree, so just calculate * an approximate number we need for refcount tree change. * Sometimes we need to split the tree, and after split, half recs * will be moved to the new block, and a new block can only provide * half number of recs. So we multiple new blocks by 2. * In the end, we have to add credits for modifying the already * existed refcount block. */ rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data; metas.num_recs = (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) / ocfs2_refcount_recs_per_rb(osb->sb) * 2; metas.num_metas += metas.num_recs; *credits += metas.num_recs + metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; else *credits += 1; /* count in the xattr tree change. */ num_free_extents = ocfs2_num_free_extents(xt_et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); goto out; } if (num_free_extents < len) metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el); *credits += ocfs2_calc_extend_credits(osb->sb, xt_et->et_root_el); if (metas.num_metas) { ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas, meta_ac); if (ret) { mlog_errno(ret); goto out; } } if (len) { ret = ocfs2_reserve_clusters(osb, len, data_ac); if (ret) mlog_errno(ret); } out: if (ret) { if (*meta_ac) { ocfs2_free_alloc_context(*meta_ac); *meta_ac = NULL; } } return ret; } static int ocfs2_reflink_xattr_bucket(handle_t *handle, u64 blkno, u64 new_blkno, u32 clusters, u32 *cpos, int num_buckets, struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *data_ac, struct ocfs2_reflink_xattr_tree_args *args) { int i, j, ret = 0; struct super_block *sb = args->reflink->old_inode->i_sb; int bpb = args->old_bucket->bu_blocks; struct ocfs2_xattr_value_buf vb = { .vb_access = ocfs2_journal_access, }; for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) { ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno); if (ret) { mlog_errno(ret); break; } ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno, 1); if (ret) { mlog_errno(ret); break; } ret = ocfs2_xattr_bucket_journal_access(handle, args->new_bucket, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); break; } for (j = 0; j < bpb; j++) memcpy(bucket_block(args->new_bucket, j), bucket_block(args->old_bucket, j), sb->s_blocksize); /* * Record the start cpos so that we can use it to initialize * our xattr tree we also set the xh_num_bucket for the new * bucket. */ if (i == 0) { *cpos = le32_to_cpu(bucket_xh(args->new_bucket)-> xh_entries[0].xe_name_hash); bucket_xh(args->new_bucket)->xh_num_buckets = cpu_to_le16(num_buckets); } ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); ret = ocfs2_reflink_xattr_header(handle, args->reflink, args->old_bucket->bu_bhs[0], bucket_xh(args->old_bucket), args->new_bucket->bu_bhs[0], bucket_xh(args->new_bucket), &vb, meta_ac, ocfs2_get_reflink_xattr_value_root, args); if (ret) { mlog_errno(ret); break; } /* * Re-access and dirty the bucket to calculate metaecc. * Because we may extend the transaction in reflink_xattr_header * which will let the already accessed block gone. */ ret = ocfs2_xattr_bucket_journal_access(handle, args->new_bucket, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); break; } ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); ocfs2_xattr_bucket_relse(args->old_bucket); ocfs2_xattr_bucket_relse(args->new_bucket); } ocfs2_xattr_bucket_relse(args->old_bucket); ocfs2_xattr_bucket_relse(args->new_bucket); return ret; } static int ocfs2_reflink_xattr_buckets(handle_t *handle, struct inode *inode, struct ocfs2_reflink_xattr_tree_args *args, struct ocfs2_extent_tree *et, struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *data_ac, u64 blkno, u32 cpos, u32 len) { int ret, first_inserted = 0; u32 p_cluster, num_clusters, reflink_cpos = 0; u64 new_blkno; unsigned int num_buckets, reflink_buckets; unsigned int bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)); ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno); if (ret) { mlog_errno(ret); goto out; } num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets); ocfs2_xattr_bucket_relse(args->old_bucket); while (len && num_buckets) { ret = ocfs2_claim_clusters(handle, data_ac, 1, &p_cluster, &num_clusters); if (ret) { mlog_errno(ret); goto out; } new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); reflink_buckets = min(num_buckets, bpc * num_clusters); ret = ocfs2_reflink_xattr_bucket(handle, blkno, new_blkno, num_clusters, &reflink_cpos, reflink_buckets, meta_ac, data_ac, args); if (ret) { mlog_errno(ret); goto out; } /* * For the 1st allocated cluster, we make it use the same cpos * so that the xattr tree looks the same as the original one * in the most case. */ if (!first_inserted) { reflink_cpos = cpos; first_inserted = 1; } ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno, num_clusters, 0, meta_ac); if (ret) mlog_errno(ret); trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno, num_clusters, reflink_cpos); len -= num_clusters; blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); num_buckets -= reflink_buckets; } out: return ret; } /* * Create the same xattr extent record in the new inode's xattr tree. */ static int ocfs2_reflink_xattr_rec(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, u32 len, void *para) { int ret, credits = 0; handle_t *handle; struct ocfs2_reflink_xattr_tree_args *args = (struct ocfs2_reflink_xattr_tree_args *)para; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_extent_tree et; trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len); ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(args->reflink->new_inode), args->new_blk_bh); ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno, len, &credits, &meta_ac, &data_ac); if (ret) { mlog_errno(ret); goto out; } handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et, meta_ac, data_ac, blkno, cpos, len); if (ret) mlog_errno(ret); ocfs2_commit_trans(osb, handle); out: if (meta_ac) ocfs2_free_alloc_context(meta_ac); if (data_ac) ocfs2_free_alloc_context(data_ac); return ret; } /* * Create reflinked xattr buckets. * We will add bucket one by one, and refcount all the xattrs in the bucket * if they are stored outside. */ static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args, struct buffer_head *blk_bh, struct buffer_head *new_blk_bh) { int ret; struct ocfs2_reflink_xattr_tree_args para; memset(&para, 0, sizeof(para)); para.reflink = args; para.old_blk_bh = blk_bh; para.new_blk_bh = new_blk_bh; para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode); if (!para.old_bucket) { mlog_errno(-ENOMEM); return -ENOMEM; } para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode); if (!para.new_bucket) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh, ocfs2_reflink_xattr_rec, &para); if (ret) mlog_errno(ret); out: ocfs2_xattr_bucket_free(para.old_bucket); ocfs2_xattr_bucket_free(para.new_bucket); return ret; } static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args, struct buffer_head *blk_bh) { int ret, indexed = 0; struct buffer_head *new_blk_bh = NULL; struct ocfs2_xattr_block *xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) indexed = 1; ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh, &new_blk_bh, indexed); if (ret) { mlog_errno(ret); goto out; } if (!indexed) ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh); else ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh); if (ret) mlog_errno(ret); out: brelse(new_blk_bh); return ret; } static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe) { int type = ocfs2_xattr_get_type(xe); return type != OCFS2_XATTR_INDEX_SECURITY && type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS && type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; } int ocfs2_reflink_xattrs(struct inode *old_inode, struct buffer_head *old_bh, struct inode *new_inode, struct buffer_head *new_bh, bool preserve_security) { int ret; struct ocfs2_xattr_reflink args; struct ocfs2_inode_info *oi = OCFS2_I(old_inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data; struct buffer_head *blk_bh = NULL; struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_refcount_tree *ref_tree; struct buffer_head *ref_root_bh = NULL; ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb), le64_to_cpu(di->i_refcount_loc), 1, &ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } ocfs2_init_dealloc_ctxt(&dealloc); args.old_inode = old_inode; args.new_inode = new_inode; args.old_bh = old_bh; args.new_bh = new_bh; args.ref_ci = &ref_tree->rf_ci; args.ref_root_bh = ref_root_bh; args.dealloc = &dealloc; if (preserve_security) args.xattr_reflinked = NULL; else args.xattr_reflinked = ocfs2_reflink_xattr_no_security; if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { ret = ocfs2_reflink_xattr_inline(&args); if (ret) { mlog_errno(ret); goto out_unlock; } } if (!di->i_xattr_loc) goto out_unlock; ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); if (ret < 0) { mlog_errno(ret); goto out_unlock; } ret = ocfs2_reflink_xattr_in_block(&args, blk_bh); if (ret) mlog_errno(ret); brelse(blk_bh); out_unlock: ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb), ref_tree, 1); brelse(ref_root_bh); if (ocfs2_dealloc_has_cluster(&dealloc)) { ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1); ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc); } out: return ret; } /* * Initialize security and acl for a already created inode. * Used for reflink a non-preserve-security file. * * It uses common api like ocfs2_xattr_set, so the caller * must not hold any lock expect i_rwsem. */ int ocfs2_init_security_and_acl(struct inode *dir, struct inode *inode, const struct qstr *qstr) { int ret = 0; struct buffer_head *dir_bh = NULL; ret = ocfs2_init_security_get(inode, dir, qstr, NULL); if (ret) { mlog_errno(ret); goto leave; } ret = ocfs2_inode_lock(dir, &dir_bh, 0); if (ret) { mlog_errno(ret); goto leave; } ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL); if (ret) mlog_errno(ret); ocfs2_inode_unlock(dir, 0); brelse(dir_bh); leave: return ret; } /* * 'security' attributes support */ static int ocfs2_xattr_security_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY, name, buffer, size); } static int ocfs2_xattr_security_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY, name, value, size, flags); } static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { struct ocfs2_security_xattr_info *si = fs_info; const struct xattr *xattr; int err = 0; if (si) { si->value = kmemdup(xattr_array->value, xattr_array->value_len, GFP_KERNEL); if (!si->value) return -ENOMEM; si->name = xattr_array->name; si->value_len = xattr_array->value_len; return 0; } for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY, xattr->name, xattr->value, xattr->value_len, XATTR_CREATE); if (err) break; } return err; } int ocfs2_init_security_get(struct inode *inode, struct inode *dir, const struct qstr *qstr, struct ocfs2_security_xattr_info *si) { int ret; /* check whether ocfs2 support feature xattr */ if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb))) return -EOPNOTSUPP; if (si) { ret = security_inode_init_security(inode, dir, qstr, &ocfs2_initxattrs, si); /* * security_inode_init_security() does not return -EOPNOTSUPP, * we have to check the xattr ourselves. */ if (!ret && !si->name) si->enable = 0; return ret; } return security_inode_init_security(inode, dir, qstr, &ocfs2_initxattrs, NULL); } int ocfs2_init_security_set(handle_t *handle, struct inode *inode, struct buffer_head *di_bh, struct ocfs2_security_xattr_info *si, struct ocfs2_alloc_context *xattr_ac, struct ocfs2_alloc_context *data_ac) { return ocfs2_xattr_set_handle(handle, inode, di_bh, OCFS2_XATTR_INDEX_SECURITY, si->name, si->value, si->value_len, 0, xattr_ac, data_ac); } const struct xattr_handler ocfs2_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .get = ocfs2_xattr_security_get, .set = ocfs2_xattr_security_set, }; /* * 'trusted' attributes support */ static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED, name, buffer, size); } static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED, name, value, size, flags); } const struct xattr_handler ocfs2_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .get = ocfs2_xattr_trusted_get, .set = ocfs2_xattr_trusted_set, }; /* * 'user' attributes support */ static int ocfs2_xattr_user_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) return -EOPNOTSUPP; return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name, buffer, size); } static int ocfs2_xattr_user_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) return -EOPNOTSUPP; return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER, name, value, size, flags); } const struct xattr_handler ocfs2_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .get = ocfs2_xattr_user_get, .set = ocfs2_xattr_user_set, };
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 2 4 4 4 4 4 4 4 4 4 4 4 2 4 2 2 2 2 2 2 2 2 2 2 2 4 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 2 4 4 2 2 4 4 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * UBI attaching sub-system. * * This sub-system is responsible for attaching MTD devices and it also * implements flash media scanning. * * The attaching information is represented by a &struct ubi_attach_info' * object. Information about volumes is represented by &struct ubi_ainf_volume * objects which are kept in volume RB-tree with root at the @volumes field. * The RB-tree is indexed by the volume ID. * * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These * objects are kept in per-volume RB-trees with the root at the corresponding * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of * per-volume objects and each of these objects is the root of RB-tree of * per-LEB objects. * * Corrupted physical eraseblocks are put to the @corr list, free physical * eraseblocks are put to the @free list and the physical eraseblock to be * erased are put to the @erase list. * * About corruptions * ~~~~~~~~~~~~~~~~~ * * UBI protects EC and VID headers with CRC-32 checksums, so it can detect * whether the headers are corrupted or not. Sometimes UBI also protects the * data with CRC-32, e.g., when it executes the atomic LEB change operation, or * when it moves the contents of a PEB for wear-leveling purposes. * * UBI tries to distinguish between 2 types of corruptions. * * 1. Corruptions caused by power cuts. These are expected corruptions and UBI * tries to handle them gracefully, without printing too many warnings and * error messages. The idea is that we do not lose important data in these * cases - we may lose only the data which were being written to the media just * before the power cut happened, and the upper layers (e.g., UBIFS) are * supposed to handle such data losses (e.g., by using the FS journal). * * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like * the reason is a power cut, UBI puts this PEB to the @erase list, and all * PEBs in the @erase list are scheduled for erasure later. * * 2. Unexpected corruptions which are not caused by power cuts. During * attaching, such PEBs are put to the @corr list and UBI preserves them. * Obviously, this lessens the amount of available PEBs, and if at some point * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs * about such PEBs every time the MTD device is attached. * * However, it is difficult to reliably distinguish between these types of * corruptions and UBI's strategy is as follows (in case of attaching by * scanning). UBI assumes corruption type 2 if the VID header is corrupted and * the data area does not contain all 0xFFs, and there were no bit-flips or * integrity errors (e.g., ECC errors in case of NAND) while reading the data * area. Otherwise UBI assumes corruption type 1. So the decision criteria * are as follows. * o If the data area contains only 0xFFs, there are no data, and it is safe * to just erase this PEB - this is corruption type 1. * o If the data area has bit-flips or data integrity errors (ECC errors on * NAND), it is probably a PEB which was being erased when power cut * happened, so this is corruption type 1. However, this is just a guess, * which might be wrong. * o Otherwise this is corruption type 2. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/math64.h> #include <linux/random.h> #include "ubi.h" static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); #define AV_FIND BIT(0) #define AV_ADD BIT(1) #define AV_FIND_OR_ADD (AV_FIND | AV_ADD) /** * find_or_add_av - internal function to find a volume, add a volume or do * both (find and add if missing). * @ai: attaching information * @vol_id: the requested volume ID * @flags: a combination of the %AV_FIND and %AV_ADD flags describing the * expected operation. If only %AV_ADD is set, -EEXIST is returned * if the volume already exists. If only %AV_FIND is set, NULL is * returned if the volume does not exist. And if both flags are * set, the helper first tries to find an existing volume, and if * it does not exist it creates a new one. * @created: in value used to inform the caller whether it"s a newly created * volume or not. * * This function returns a pointer to a volume description or an ERR_PTR if * the operation failed. It can also return NULL if only %AV_FIND is set and * the volume does not exist. */ static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai, int vol_id, unsigned int flags, bool *created) { struct ubi_ainf_volume *av; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; /* Walk the volume RB-tree to look if this volume is already present */ while (*p) { parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); if (vol_id == av->vol_id) { *created = false; if (!(flags & AV_FIND)) return ERR_PTR(-EEXIST); return av; } if (vol_id > av->vol_id) p = &(*p)->rb_left; else p = &(*p)->rb_right; } if (!(flags & AV_ADD)) return NULL; /* The volume is absent - add it */ av = kzalloc(sizeof(*av), GFP_KERNEL); if (!av) return ERR_PTR(-ENOMEM); av->vol_id = vol_id; if (vol_id > ai->highest_vol_id) ai->highest_vol_id = vol_id; rb_link_node(&av->rb, parent, p); rb_insert_color(&av->rb, &ai->volumes); ai->vols_found += 1; *created = true; dbg_bld("added volume %d", vol_id); return av; } /** * ubi_find_or_add_av - search for a volume in the attaching information and * add one if it does not exist. * @ai: attaching information * @vol_id: the requested volume ID * @created: whether the volume has been created or not * * This function returns a pointer to the new volume description or an * ERR_PTR if the operation failed. */ static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai, int vol_id, bool *created) { return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created); } /** * ubi_alloc_aeb - allocate an aeb element * @ai: attaching information * @pnum: physical eraseblock number * @ec: erase counter of the physical eraseblock * * Allocate an aeb object and initialize the pnum and ec information. * vol_id and lnum are set to UBI_UNKNOWN, and the other fields are * initialized to zero. * Note that the element is not added in any list or RB tree. */ struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum, int ec) { struct ubi_ainf_peb *aeb; aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return NULL; aeb->pnum = pnum; aeb->ec = ec; aeb->vol_id = UBI_UNKNOWN; aeb->lnum = UBI_UNKNOWN; return aeb; } /** * ubi_free_aeb - free an aeb element * @ai: attaching information * @aeb: the element to free * * Free an aeb object. The caller must have removed the element from any list * or RB tree. */ void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb) { kmem_cache_free(ai->aeb_slab_cache, aeb); } /** * add_to_list - add physical eraseblock to a list. * @ai: attaching information * @pnum: physical eraseblock number to add * @vol_id: the last used volume id for the PEB * @lnum: the last used LEB number for the PEB * @ec: erase counter of the physical eraseblock * @to_head: if not zero, add to the head of the list * @list: the list to add to * * This function allocates a 'struct ubi_ainf_peb' object for physical * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists. * It stores the @lnum and @vol_id alongside, which can both be * %UBI_UNKNOWN if they are not available, not readable, or not assigned. * If @to_head is not zero, PEB will be added to the head of the list, which * basically means it will be processed first later. E.g., we add corrupted * PEBs (corrupted due to power cuts) to the head of the erase list to make * sure we erase them first and get rid of corruptions ASAP. This function * returns zero in case of success and a negative error code in case of * failure. */ static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id, int lnum, int ec, int to_head, struct list_head *list) { struct ubi_ainf_peb *aeb; if (list == &ai->free) { dbg_bld("add to free: PEB %d, EC %d", pnum, ec); } else if (list == &ai->erase) { dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); } else if (list == &ai->alien) { dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); ai->alien_peb_count += 1; } else BUG(); aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; aeb->vol_id = vol_id; aeb->lnum = lnum; if (to_head) list_add(&aeb->u.list, list); else list_add_tail(&aeb->u.list, list); return 0; } /** * add_corrupted - add a corrupted physical eraseblock. * @ai: attaching information * @pnum: physical eraseblock number to add * @ec: erase counter of the physical eraseblock * * This function allocates a 'struct ubi_ainf_peb' object for a corrupted * physical eraseblock @pnum and adds it to the 'corr' list. The corruption * was presumably not caused by a power cut. Returns zero in case of success * and a negative error code in case of failure. */ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) { struct ubi_ainf_peb *aeb; dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; ai->corr_peb_count += 1; list_add(&aeb->u.list, &ai->corr); return 0; } /** * add_fastmap - add a Fastmap related physical eraseblock. * @ai: attaching information * @pnum: physical eraseblock number the VID header came from * @vid_hdr: the volume identifier header * @ec: erase counter of the physical eraseblock * * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp * physical eraseblock @pnum and adds it to the 'fastmap' list. * Such blocks can be Fastmap super and data blocks from both the most * recent Fastmap we're attaching from or from old Fastmaps which will * be erased. */ static int add_fastmap(struct ubi_attach_info *ai, int pnum, struct ubi_vid_hdr *vid_hdr, int ec) { struct ubi_ainf_peb *aeb; aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; aeb->vol_id = be32_to_cpu(vid_hdr->vol_id); aeb->sqnum = be64_to_cpu(vid_hdr->sqnum); list_add(&aeb->u.list, &ai->fastmap); dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum, aeb->vol_id, aeb->sqnum); return 0; } /** * validate_vid_hdr - check volume identifier header. * @ubi: UBI device description object * @vid_hdr: the volume identifier header to check * @av: information about the volume this logical eraseblock belongs to * @pnum: physical eraseblock number the VID header came from * * This function checks that data stored in @vid_hdr is consistent. Returns * non-zero if an inconsistency was found and zero if not. * * Note, UBI does sanity check of everything it reads from the flash media. * Most of the checks are done in the I/O sub-system. Here we check that the * information in the VID header is consistent to the information in other VID * headers of the same volume. */ static int validate_vid_hdr(const struct ubi_device *ubi, const struct ubi_vid_hdr *vid_hdr, const struct ubi_ainf_volume *av, int pnum) { int vol_type = vid_hdr->vol_type; int vol_id = be32_to_cpu(vid_hdr->vol_id); int used_ebs = be32_to_cpu(vid_hdr->used_ebs); int data_pad = be32_to_cpu(vid_hdr->data_pad); if (av->leb_count != 0) { int av_vol_type; /* * This is not the first logical eraseblock belonging to this * volume. Ensure that the data in its VID header is consistent * to the data in previous logical eraseblock headers. */ if (vol_id != av->vol_id) { ubi_err(ubi, "inconsistent vol_id"); goto bad; } if (av->vol_type == UBI_STATIC_VOLUME) av_vol_type = UBI_VID_STATIC; else av_vol_type = UBI_VID_DYNAMIC; if (vol_type != av_vol_type) { ubi_err(ubi, "inconsistent vol_type"); goto bad; } if (used_ebs != av->used_ebs) { ubi_err(ubi, "inconsistent used_ebs"); goto bad; } if (data_pad != av->data_pad) { ubi_err(ubi, "inconsistent data_pad"); goto bad; } } return 0; bad: ubi_err(ubi, "inconsistent VID header at PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); ubi_dump_av(av); return -EINVAL; } /** * add_volume - add volume to the attaching information. * @ai: attaching information * @vol_id: ID of the volume to add * @pnum: physical eraseblock number * @vid_hdr: volume identifier header * * If the volume corresponding to the @vid_hdr logical eraseblock is already * present in the attaching information, this function does nothing. Otherwise * it adds corresponding volume to the attaching information. Returns a pointer * to the allocated "av" object in case of success and a negative error code in * case of failure. */ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, int vol_id, int pnum, const struct ubi_vid_hdr *vid_hdr) { struct ubi_ainf_volume *av; bool created; ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); av = ubi_find_or_add_av(ai, vol_id, &created); if (IS_ERR(av) || !created) return av; av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); av->data_pad = be32_to_cpu(vid_hdr->data_pad); av->compat = vid_hdr->compat; av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; return av; } /** * ubi_compare_lebs - find out which logical eraseblock is newer. * @ubi: UBI device description object * @aeb: first logical eraseblock to compare * @pnum: physical eraseblock number of the second logical eraseblock to * compare * @vid_hdr: volume identifier header of the second logical eraseblock * * This function compares 2 copies of a LEB and informs which one is newer. In * case of success this function returns a positive value, in case of failure, a * negative error code is returned. The success return codes use the following * bits: * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the * second PEB (described by @pnum and @vid_hdr); * o bit 0 is set: the second PEB is newer; * o bit 1 is cleared: no bit-flips were detected in the newer LEB; * o bit 1 is set: bit-flips were detected in the newer LEB; * o bit 2 is cleared: the older LEB is not corrupted; * o bit 2 is set: the older LEB is corrupted. */ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, int pnum, const struct ubi_vid_hdr *vid_hdr) { int len, err, second_is_newer, bitflips = 0, corrupted = 0; uint32_t data_crc, crc; struct ubi_vid_io_buf *vidb = NULL; unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); if (sqnum2 == aeb->sqnum) { /* * This must be a really ancient UBI image which has been * created before sequence numbers support has been added. At * that times we used 32-bit LEB versions stored in logical * eraseblocks. That was before UBI got into mainline. We do not * support these images anymore. Well, those images still work, * but only if no unclean reboots happened. */ ubi_err(ubi, "unsupported on-flash UBI format"); return -EINVAL; } /* Obviously the LEB with lower sequence counter is older */ second_is_newer = (sqnum2 > aeb->sqnum); /* * Now we know which copy is newer. If the copy flag of the PEB with * newer version is not set, then we just return, otherwise we have to * check data CRC. For the second PEB we already have the VID header, * for the first one - we'll need to re-read it from flash. * * Note: this may be optimized so that we wouldn't read twice. */ if (second_is_newer) { if (!vid_hdr->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("second PEB %d is newer, copy_flag is unset", pnum); return 1; } } else { if (!aeb->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("first PEB %d is newer, copy_flag is unset", pnum); return bitflips << 1; } vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!vidb) return -ENOMEM; pnum = aeb->pnum; err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); if (err) { if (err == UBI_IO_BITFLIPS) bitflips = 1; else { ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d", pnum, err); if (err > 0) err = -EIO; goto out_free_vidh; } } vid_hdr = ubi_get_vid_hdr(vidb); } /* Read the data of the copy and check the CRC */ len = be32_to_cpu(vid_hdr->data_size); mutex_lock(&ubi->buf_mutex); err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len); if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto out_unlock; data_crc = be32_to_cpu(vid_hdr->data_crc); crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len); if (crc != data_crc) { dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", pnum, crc, data_crc); corrupted = 1; bitflips = 0; second_is_newer = !second_is_newer; } else { dbg_bld("PEB %d CRC is OK", pnum); bitflips |= !!err; } mutex_unlock(&ubi->buf_mutex); ubi_free_vid_buf(vidb); if (second_is_newer) dbg_bld("second PEB %d is newer, copy_flag is set", pnum); else dbg_bld("first PEB %d is newer, copy_flag is set", pnum); return second_is_newer | (bitflips << 1) | (corrupted << 2); out_unlock: mutex_unlock(&ubi->buf_mutex); out_free_vidh: ubi_free_vid_buf(vidb); return err; } /** * ubi_add_to_av - add used physical eraseblock to the attaching information. * @ubi: UBI device description object * @ai: attaching information * @pnum: the physical eraseblock number * @ec: erase counter * @vid_hdr: the volume identifier header * @bitflips: if bit-flips were detected when this physical eraseblock was read * * This function adds information about a used physical eraseblock to the * 'used' tree of the corresponding volume. The function is rather complex * because it has to handle cases when this is not the first physical * eraseblock belonging to the same logical eraseblock, and the newer one has * to be picked, while the older one has to be dropped. This function returns * zero in case of success and a negative error code in case of failure. */ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips) { int err, vol_id, lnum; unsigned long long sqnum; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb; struct rb_node **p, *parent = NULL; vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); sqnum = be64_to_cpu(vid_hdr->sqnum); dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", pnum, vol_id, lnum, ec, sqnum, bitflips); av = add_volume(ai, vol_id, pnum, vid_hdr); if (IS_ERR(av)) return PTR_ERR(av); if (ai->max_sqnum < sqnum) ai->max_sqnum = sqnum; /* * Walk the RB-tree of logical eraseblocks of volume @vol_id to look * if this is the first instance of this logical eraseblock or not. */ p = &av->root.rb_node; while (*p) { int cmp_res; parent = *p; aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (lnum != aeb->lnum) { if (lnum < aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } /* * There is already a physical eraseblock describing the same * logical eraseblock present. */ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d", aeb->pnum, aeb->sqnum, aeb->ec); /* * Make sure that the logical eraseblocks have different * sequence numbers. Otherwise the image is bad. * * However, if the sequence number is zero, we assume it must * be an ancient UBI image from the era when UBI did not have * sequence numbers. We still can attach these images, unless * there is a need to distinguish between old and new * eraseblocks, in which case we'll refuse the image in * 'ubi_compare_lebs()'. In other words, we attach old clean * images, but refuse attaching old images with duplicated * logical eraseblocks because there was an unclean reboot. */ if (aeb->sqnum == sqnum && sqnum != 0) { ubi_err(ubi, "two LEBs with same sequence number %llu", sqnum); ubi_dump_aeb(aeb, 0); ubi_dump_vid_hdr(vid_hdr); return -EINVAL; } /* * Now we have to drop the older one and preserve the newer * one. */ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr); if (cmp_res < 0) return cmp_res; if (cmp_res & 1) { /* * This logical eraseblock is newer than the one * found earlier. */ err = validate_vid_hdr(ubi, vid_hdr, av, pnum); if (err) return err; err = add_to_list(ai, aeb->pnum, aeb->vol_id, aeb->lnum, aeb->ec, cmp_res & 4, &ai->erase); if (err) return err; aeb->ec = ec; aeb->pnum = pnum; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->scrub = ((cmp_res & 2) || bitflips); aeb->copy_flag = vid_hdr->copy_flag; aeb->sqnum = sqnum; if (av->highest_lnum == lnum) av->last_data_size = be32_to_cpu(vid_hdr->data_size); return 0; } else { /* * This logical eraseblock is older than the one found * previously. */ return add_to_list(ai, pnum, vol_id, lnum, ec, cmp_res & 4, &ai->erase); } } /* * We've met this logical eraseblock for the first time, add it to the * attaching information. */ err = validate_vid_hdr(ubi, vid_hdr, av, pnum); if (err) return err; aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->scrub = bitflips; aeb->copy_flag = vid_hdr->copy_flag; aeb->sqnum = sqnum; if (av->highest_lnum <= lnum) { av->highest_lnum = lnum; av->last_data_size = be32_to_cpu(vid_hdr->data_size); } av->leb_count += 1; rb_link_node(&aeb->u.rb, parent, p); rb_insert_color(&aeb->u.rb, &av->root); return 0; } /** * ubi_add_av - add volume to the attaching information. * @ai: attaching information * @vol_id: the requested volume ID * * This function returns a pointer to the new volume description or an * ERR_PTR if the operation failed. */ struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id) { bool created; return find_or_add_av(ai, vol_id, AV_ADD, &created); } /** * ubi_find_av - find volume in the attaching information. * @ai: attaching information * @vol_id: the requested volume ID * * This function returns a pointer to the volume description or %NULL if there * are no data about this volume in the attaching information. */ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, int vol_id) { bool created; return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND, &created); } static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct list_head *list); /** * ubi_remove_av - delete attaching information about a volume. * @ai: attaching information * @av: the volume attaching information to delete */ void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) { dbg_bld("remove attaching information about volume %d", av->vol_id); rb_erase(&av->rb, &ai->volumes); destroy_av(ai, av, &ai->erase); ai->vols_found -= 1; } /** * early_erase_peb - erase a physical eraseblock. * @ubi: UBI device description object * @ai: attaching information * @pnum: physical eraseblock number to erase; * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown) * * This function erases physical eraseblock 'pnum', and writes the erase * counter header to it. This function should only be used on UBI device * initialization stages, when the EBA sub-system had not been yet initialized. * This function returns zero in case of success and a negative error code in * case of failure. */ static int early_erase_peb(struct ubi_device *ubi, const struct ubi_attach_info *ai, int pnum, int ec) { int err; struct ubi_ec_hdr *ec_hdr; if ((long long)ec >= UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ ubi_err(ubi, "erase counter overflow at PEB %d, EC %d", pnum, ec); return -EINVAL; } ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ec_hdr) return -ENOMEM; ec_hdr->ec = cpu_to_be64(ec); err = ubi_io_sync_erase(ubi, pnum, 0); if (err < 0) goto out_free; err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); out_free: kfree(ec_hdr); return err; } /** * ubi_early_get_peb - get a free physical eraseblock. * @ubi: UBI device description object * @ai: attaching information * * This function returns a free physical eraseblock. It is supposed to be * called on the UBI initialization stages when the wear-leveling sub-system is * not initialized yet. This function picks a physical eraseblocks from one of * the lists, writes the EC header if it is needed, and removes it from the * list. * * This function returns a pointer to the "aeb" of the found free PEB in case * of success and an error code in case of failure. */ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err = 0; struct ubi_ainf_peb *aeb, *tmp_aeb; if (!list_empty(&ai->free)) { aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list); list_del(&aeb->u.list); dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } /* * We try to erase the first physical eraseblock from the erase list * and pick it if we succeed, or try to erase the next one if not. And * so forth. We don't want to take care about bad eraseblocks here - * they'll be handled later. */ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) { if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1); if (err) continue; aeb->ec += 1; list_del(&aeb->u.list); dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } ubi_err(ubi, "no free eraseblocks"); return ERR_PTR(-ENOSPC); } /** * check_corruption - check the data area of PEB. * @ubi: UBI device description object * @vid_hdr: the (corrupted) VID header of this PEB * @pnum: the physical eraseblock number to check * * This is a helper function which is used to distinguish between VID header * corruptions caused by power cuts and other reasons. If the PEB contains only * 0xFF bytes in the data area, the VID header is most probably corrupted * because of a power cut (%0 is returned in this case). Otherwise, it was * probably corrupted for some other reasons (%1 is returned in this case). A * negative error code is returned if a read error occurred. * * If the corruption reason was a power cut, UBI can safely erase this PEB. * Otherwise, it should preserve it to avoid possibly destroying important * information. */ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, int pnum) { int err; mutex_lock(&ubi->buf_mutex); memset(ubi->peb_buf, 0x00, ubi->leb_size); err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start, ubi->leb_size); if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { /* * Bit-flips or integrity errors while reading the data area. * It is difficult to say for sure what type of corruption is * this, but presumably a power cut happened while this PEB was * erased, so it became unstable and corrupted, and should be * erased. */ err = 0; goto out_unlock; } if (err) goto out_unlock; if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) goto out_unlock; ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF", pnum); ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection"); ubi_dump_vid_hdr(vid_hdr); pr_err("hexdump of PEB %d offset %d, length %d", pnum, ubi->leb_start, ubi->leb_size); ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ubi->peb_buf, ubi->leb_size, 1); err = 1; out_unlock: mutex_unlock(&ubi->buf_mutex); return err; } static bool vol_ignored(int vol_id) { switch (vol_id) { case UBI_LAYOUT_VOLUME_ID: return true; } #ifdef CONFIG_MTD_UBI_FASTMAP return ubi_is_fm_vol(vol_id); #else return false; #endif } /** * scan_peb - scan and process UBI headers of a PEB. * @ubi: UBI device description object * @ai: attaching information * @pnum: the physical eraseblock number * @fast: true if we're scanning for a Fastmap * * This function reads UBI headers of PEB @pnum, checks them, and adds * information about this PEB to the corresponding list or RB-tree in the * "attaching info" structure. Returns zero if the physical eraseblock was * successfully handled and a negative error code in case of failure. */ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, bool fast) { struct ubi_ec_hdr *ech = ai->ech; struct ubi_vid_io_buf *vidb = ai->vidb; struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); long long ec; int err, bitflips = 0, vol_id = -1, ec_err = 0; dbg_bld("scan PEB %d", pnum); /* Skip bad physical eraseblocks */ err = ubi_io_is_bad(ubi, pnum); if (err < 0) return err; else if (err) { ai->bad_peb_count += 1; return 0; } err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err < 0) return err; switch (err) { case 0: break; case UBI_IO_BITFLIPS: bitflips = 1; break; case UBI_IO_FF: ai->empty_peb_count += 1; return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, UBI_UNKNOWN, 0, &ai->erase); case UBI_IO_FF_BITFLIPS: ai->empty_peb_count += 1; return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, UBI_UNKNOWN, 1, &ai->erase); case UBI_IO_BAD_HDR_EBADMSG: case UBI_IO_BAD_HDR: /* * We have to also look at the VID header, possibly it is not * corrupted. Set %bitflips flag in order to make this PEB be * moved and EC be re-created. */ ec_err = err; ec = UBI_UNKNOWN; bitflips = 1; break; default: ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d", err); return -EINVAL; } if (!ec_err) { int image_seq; /* Make sure UBI version is OK */ if (ech->version != UBI_VERSION) { ubi_err(ubi, "this UBI version is %d, image version is %d", UBI_VERSION, (int)ech->version); return -EINVAL; } ec = be64_to_cpu(ech->ec); if (ec > UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. The EC headers have 64 bits * reserved, but we anyway make use of only 31 bit * values, as this seems to be enough for any existing * flash. Upgrade UBI and use 64-bit erase counters * internally. */ ubi_err(ubi, "erase counter overflow, max is %d", UBI_MAX_ERASECOUNTER); ubi_dump_ec_hdr(ech); return -EINVAL; } /* * Make sure that all PEBs have the same image sequence number. * This allows us to detect situations when users flash UBI * images incorrectly, so that the flash has the new UBI image * and leftovers from the old one. This feature was added * relatively recently, and the sequence number was always * zero, because old UBI implementations always set it to zero. * For this reasons, we do not panic if some PEBs have zero * sequence number, while other PEBs have non-zero sequence * number. */ image_seq = be32_to_cpu(ech->image_seq); if (!ubi->image_seq) ubi->image_seq = image_seq; if (image_seq && ubi->image_seq != image_seq) { ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d", image_seq, pnum, ubi->image_seq); ubi_dump_ec_hdr(ech); return -EINVAL; } } /* OK, we've done with the EC header, let's look at the VID header */ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); if (err < 0) return err; switch (err) { case 0: break; case UBI_IO_BITFLIPS: bitflips = 1; break; case UBI_IO_BAD_HDR_EBADMSG: if (ec_err == UBI_IO_BAD_HDR_EBADMSG) /* * Both EC and VID headers are corrupted and were read * with data integrity error, probably this is a bad * PEB, bit it is not marked as bad yet. This may also * be a result of power cut during erasure. */ ai->maybe_bad_peb_count += 1; fallthrough; case UBI_IO_BAD_HDR: /* * If we're facing a bad VID header we have to drop *all* * Fastmap data structures we find. The most recent Fastmap * could be bad and therefore there is a chance that we attach * from an old one. On a fine MTD stack a PEB must not render * bad all of a sudden, but the reality is different. * So, let's be paranoid and help finding the root cause by * falling back to scanning mode instead of attaching with a * bad EBA table and cause data corruption which is hard to * analyze. */ if (fast) ai->force_full_scan = 1; if (ec_err) /* * Both headers are corrupted. There is a possibility * that this a valid UBI PEB which has corresponding * LEB, but the headers are corrupted. However, it is * impossible to distinguish it from a PEB which just * contains garbage because of a power cut during erase * operation. So we just schedule this PEB for erasure. * * Besides, in case of NOR flash, we deliberately * corrupt both headers because NOR flash erasure is * slow and can start from the end. */ err = 0; else /* * The EC was OK, but the VID header is corrupted. We * have to check what is in the data area. */ err = check_corruption(ubi, vidh, pnum); if (err < 0) return err; else if (!err) /* This corruption is caused by a power cut */ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); else /* This is an unexpected corruption */ err = add_corrupted(ai, pnum, ec); if (err) return err; goto adjust_mean_ec; case UBI_IO_FF_BITFLIPS: err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); if (err) return err; goto adjust_mean_ec; case UBI_IO_FF: if (ec_err || bitflips) err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); else err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 0, &ai->free); if (err) return err; goto adjust_mean_ec; default: ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d", err); return -EINVAL; } vol_id = be32_to_cpu(vidh->vol_id); if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) { int lnum = be32_to_cpu(vidh->lnum); /* Unsupported internal volume */ switch (vidh->compat) { case UBI_COMPAT_DELETE: ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it", vol_id, lnum); err = add_to_list(ai, pnum, vol_id, lnum, ec, 1, &ai->erase); if (err) return err; return 0; case UBI_COMPAT_RO: ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode", vol_id, lnum); ubi->ro_mode = 1; break; case UBI_COMPAT_PRESERVE: ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found", vol_id, lnum); err = add_to_list(ai, pnum, vol_id, lnum, ec, 0, &ai->alien); if (err) return err; return 0; case UBI_COMPAT_REJECT: ubi_err(ubi, "incompatible internal volume %d:%d found", vol_id, lnum); return -EINVAL; } } if (ec_err) ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d", pnum); if (ubi_is_fm_vol(vol_id)) err = add_fastmap(ai, pnum, vidh, ec); else err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); if (err) return err; adjust_mean_ec: if (!ec_err) { ai->ec_sum += ec; ai->ec_count += 1; if (ec > ai->max_ec) ai->max_ec = ec; if (ec < ai->min_ec) ai->min_ec = ec; } return 0; } /** * late_analysis - analyze the overall situation with PEB. * @ubi: UBI device description object * @ai: attaching information * * This is a helper function which takes a look what PEBs we have after we * gather information about all of them ("ai" is compete). It decides whether * the flash is empty and should be formatted of whether there are too many * corrupted PEBs and we should not attach this MTD device. Returns zero if we * should proceed with attaching the MTD device, and %-EINVAL if we should not. */ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb; int max_corr, peb_count; peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count; max_corr = peb_count / 20 ?: 8; /* * Few corrupted PEBs is not a problem and may be just a result of * unclean reboots. However, many of them may indicate some problems * with the flash HW or driver. */ if (ai->corr_peb_count) { ubi_err(ubi, "%d PEBs are corrupted and preserved", ai->corr_peb_count); pr_err("Corrupted PEBs are:"); list_for_each_entry(aeb, &ai->corr, u.list) pr_cont(" %d", aeb->pnum); pr_cont("\n"); /* * If too many PEBs are corrupted, we refuse attaching, * otherwise, only print a warning. */ if (ai->corr_peb_count >= max_corr) { ubi_err(ubi, "too many corrupted PEBs, refusing"); return -EINVAL; } } if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) { /* * All PEBs are empty, or almost all - a couple PEBs look like * they may be bad PEBs which were not marked as bad yet. * * This piece of code basically tries to distinguish between * the following situations: * * 1. Flash is empty, but there are few bad PEBs, which are not * marked as bad so far, and which were read with error. We * want to go ahead and format this flash. While formatting, * the faulty PEBs will probably be marked as bad. * * 2. Flash contains non-UBI data and we do not want to format * it and destroy possibly important information. */ if (ai->maybe_bad_peb_count <= 2) { ai->is_empty = 1; ubi_msg(ubi, "empty MTD device detected"); get_random_bytes(&ubi->image_seq, sizeof(ubi->image_seq)); } else { ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it"); return -EINVAL; } } return 0; } /** * destroy_av - free volume attaching information. * @av: volume attaching information * @ai: attaching information * @list: put the aeb elements in there if !NULL, otherwise free them * * This function destroys the volume attaching information. */ static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct list_head *list) { struct ubi_ainf_peb *aeb; struct rb_node *this = av->root.rb_node; while (this) { if (this->rb_left) this = this->rb_left; else if (this->rb_right) this = this->rb_right; else { aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); this = rb_parent(this); if (this) { if (this->rb_left == &aeb->u.rb) this->rb_left = NULL; else this->rb_right = NULL; } if (list) list_add_tail(&aeb->u.list, list); else ubi_free_aeb(ai, aeb); } } kfree(av); } /** * destroy_ai - destroy attaching information. * @ai: attaching information */ static void destroy_ai(struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb, *aeb_tmp; struct ubi_ainf_volume *av; struct rb_node *rb; list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } /* Destroy the volume RB-tree */ rb = ai->volumes.rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { av = rb_entry(rb, struct ubi_ainf_volume, rb); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &av->rb) rb->rb_left = NULL; else rb->rb_right = NULL; } destroy_av(ai, av, NULL); } } kmem_cache_destroy(ai->aeb_slab_cache); kfree(ai); } /** * scan_all - scan entire MTD device. * @ubi: UBI device description object * @ai: attach info object * @start: start scanning at this PEB * * This function does full scanning of an MTD device and returns complete * information about it in form of a "struct ubi_attach_info" object. In case * of failure, an error code is returned. */ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, int start) { int err, pnum; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb; err = -ENOMEM; ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ai->ech) return err; ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!ai->vidb) goto out_ech; for (pnum = start; pnum < ubi->peb_count; pnum++) { cond_resched(); dbg_gen("process PEB %d", pnum); err = scan_peb(ubi, ai, pnum, false); if (err < 0) goto out_vidh; } ubi_msg(ubi, "scanning is finished"); /* Calculate mean erase counter */ if (ai->ec_count) ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); err = late_analysis(ubi, ai); if (err) goto out_vidh; /* * In case of unknown erase counter we use the mean erase counter * value. */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; } list_for_each_entry(aeb, &ai->free, u.list) { if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; } list_for_each_entry(aeb, &ai->corr, u.list) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; list_for_each_entry(aeb, &ai->erase, u.list) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; err = self_check_ai(ubi, ai); if (err) goto out_vidh; ubi_free_vid_buf(ai->vidb); kfree(ai->ech); return 0; out_vidh: ubi_free_vid_buf(ai->vidb); out_ech: kfree(ai->ech); return err; } static struct ubi_attach_info *alloc_ai(const char *slab_name) { struct ubi_attach_info *ai; ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); if (!ai) return ai; INIT_LIST_HEAD(&ai->corr); INIT_LIST_HEAD(&ai->free); INIT_LIST_HEAD(&ai->erase); INIT_LIST_HEAD(&ai->alien); INIT_LIST_HEAD(&ai->fastmap); ai->volumes = RB_ROOT; ai->aeb_slab_cache = kmem_cache_create(slab_name, sizeof(struct ubi_ainf_peb), 0, 0, NULL); if (!ai->aeb_slab_cache) { kfree(ai); ai = NULL; } return ai; } #ifdef CONFIG_MTD_UBI_FASTMAP /** * scan_fast - try to find a fastmap and attach from it. * @ubi: UBI device description object * @ai: attach info object * * Returns 0 on success, negative return values indicate an internal * error. * UBI_NO_FASTMAP denotes that no fastmap was found. * UBI_BAD_FASTMAP denotes that the found fastmap was invalid. */ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) { int err, pnum; struct ubi_attach_info *scan_ai; err = -ENOMEM; scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap"); if (!scan_ai) goto out; scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!scan_ai->ech) goto out_ai; scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!scan_ai->vidb) goto out_ech; for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { cond_resched(); dbg_gen("process PEB %d", pnum); err = scan_peb(ubi, scan_ai, pnum, true); if (err < 0) goto out_vidh; } ubi_free_vid_buf(scan_ai->vidb); kfree(scan_ai->ech); if (scan_ai->force_full_scan) err = UBI_NO_FASTMAP; else err = ubi_scan_fastmap(ubi, *ai, scan_ai); if (err) { /* * Didn't attach via fastmap, do a full scan but reuse what * we've aready scanned. */ destroy_ai(*ai); *ai = scan_ai; } else destroy_ai(scan_ai); return err; out_vidh: ubi_free_vid_buf(scan_ai->vidb); out_ech: kfree(scan_ai->ech); out_ai: destroy_ai(scan_ai); out: return err; } #endif /** * ubi_attach - attach an MTD device. * @ubi: UBI device descriptor * @force_scan: if set to non-zero attach by scanning * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubi_attach(struct ubi_device *ubi, int force_scan) { int err; struct ubi_attach_info *ai; ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; #ifdef CONFIG_MTD_UBI_FASTMAP /* On small flash devices we disable fastmap in any case. */ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) { ubi->fm_disabled = 1; force_scan = 1; } if (force_scan) err = scan_all(ubi, ai, 0); else { err = scan_fast(ubi, &ai); if (err > 0 || mtd_is_eccerr(err)) { if (err != UBI_NO_FASTMAP) { destroy_ai(ai); ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; err = scan_all(ubi, ai, 0); } else { err = scan_all(ubi, ai, UBI_FM_MAX_START); } } } #else err = scan_all(ubi, ai, 0); #endif if (err) goto out_ai; ubi->bad_peb_count = ai->bad_peb_count; ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; ubi->corr_peb_count = ai->corr_peb_count; ubi->max_ec = ai->max_ec; ubi->mean_ec = ai->mean_ec; dbg_gen("max. sequence number: %llu", ai->max_sqnum); err = ubi_read_volume_table(ubi, ai); if (err) goto out_ai; err = ubi_wl_init(ubi, ai); if (err) goto out_vtbl; err = ubi_eba_init(ubi, ai); if (err) goto out_wl; #ifdef CONFIG_MTD_UBI_FASTMAP if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) { struct ubi_attach_info *scan_ai; scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap"); if (!scan_ai) { err = -ENOMEM; goto out_wl; } err = scan_all(ubi, scan_ai, 0); if (err) { destroy_ai(scan_ai); goto out_wl; } err = self_check_eba(ubi, ai, scan_ai); destroy_ai(scan_ai); if (err) goto out_wl; } #endif destroy_ai(ai); return 0; out_wl: ubi_wl_close(ubi); out_vtbl: ubi_free_all_volumes(ubi); vfree(ubi->vtbl); out_ai: destroy_ai(ai); return err; } /** * self_check_ai - check the attaching information. * @ubi: UBI device description object * @ai: attaching information * * This function returns zero if the attaching information is all right, and a * negative error code if not or if an error occurred. */ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) { struct ubi_vid_io_buf *vidb = ai->vidb; struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); int pnum, err, vols_found = 0; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb, *last_aeb; uint8_t *buf; if (!ubi_dbg_chk_gen(ubi)) return 0; /* * At first, check that attaching information is OK. */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { int leb_count = 0; cond_resched(); vols_found += 1; if (ai->is_empty) { ubi_err(ubi, "bad is_empty flag"); goto bad_av; } if (av->vol_id < 0 || av->highest_lnum < 0 || av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || av->data_pad < 0 || av->last_data_size < 0) { ubi_err(ubi, "negative values"); goto bad_av; } if (av->vol_id >= UBI_MAX_VOLUMES && av->vol_id < UBI_INTERNAL_VOL_START) { ubi_err(ubi, "bad vol_id"); goto bad_av; } if (av->vol_id > ai->highest_vol_id) { ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there", ai->highest_vol_id, av->vol_id); goto out; } if (av->vol_type != UBI_DYNAMIC_VOLUME && av->vol_type != UBI_STATIC_VOLUME) { ubi_err(ubi, "bad vol_type"); goto bad_av; } if (av->data_pad > ubi->leb_size / 2) { ubi_err(ubi, "bad data_pad"); goto bad_av; } last_aeb = NULL; ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { cond_resched(); last_aeb = aeb; leb_count += 1; if (aeb->pnum < 0 || aeb->ec < 0) { ubi_err(ubi, "negative values"); goto bad_aeb; } if (aeb->ec < ai->min_ec) { ubi_err(ubi, "bad ai->min_ec (%d), %d found", ai->min_ec, aeb->ec); goto bad_aeb; } if (aeb->ec > ai->max_ec) { ubi_err(ubi, "bad ai->max_ec (%d), %d found", ai->max_ec, aeb->ec); goto bad_aeb; } if (aeb->pnum >= ubi->peb_count) { ubi_err(ubi, "too high PEB number %d, total PEBs %d", aeb->pnum, ubi->peb_count); goto bad_aeb; } if (av->vol_type == UBI_STATIC_VOLUME) { if (aeb->lnum >= av->used_ebs) { ubi_err(ubi, "bad lnum or used_ebs"); goto bad_aeb; } } else { if (av->used_ebs != 0) { ubi_err(ubi, "non-zero used_ebs"); goto bad_aeb; } } if (aeb->lnum > av->highest_lnum) { ubi_err(ubi, "incorrect highest_lnum or lnum"); goto bad_aeb; } } if (av->leb_count != leb_count) { ubi_err(ubi, "bad leb_count, %d objects in the tree", leb_count); goto bad_av; } if (!last_aeb) continue; aeb = last_aeb; if (aeb->lnum != av->highest_lnum) { ubi_err(ubi, "bad highest_lnum"); goto bad_aeb; } } if (vols_found != ai->vols_found) { ubi_err(ubi, "bad ai->vols_found %d, should be %d", ai->vols_found, vols_found); goto out; } /* Check that attaching information is correct */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { last_aeb = NULL; ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { int vol_type; cond_resched(); last_aeb = aeb; err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1); if (err && err != UBI_IO_BITFLIPS) { ubi_err(ubi, "VID header is not OK (%d)", err); if (err > 0) err = -EIO; return err; } vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; if (av->vol_type != vol_type) { ubi_err(ubi, "bad vol_type"); goto bad_vid_hdr; } if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { ubi_err(ubi, "bad sqnum %llu", aeb->sqnum); goto bad_vid_hdr; } if (av->vol_id != be32_to_cpu(vidh->vol_id)) { ubi_err(ubi, "bad vol_id %d", av->vol_id); goto bad_vid_hdr; } if (av->compat != vidh->compat) { ubi_err(ubi, "bad compat %d", vidh->compat); goto bad_vid_hdr; } if (aeb->lnum != be32_to_cpu(vidh->lnum)) { ubi_err(ubi, "bad lnum %d", aeb->lnum); goto bad_vid_hdr; } if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { ubi_err(ubi, "bad used_ebs %d", av->used_ebs); goto bad_vid_hdr; } if (av->data_pad != be32_to_cpu(vidh->data_pad)) { ubi_err(ubi, "bad data_pad %d", av->data_pad); goto bad_vid_hdr; } } if (!last_aeb) continue; if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum); goto bad_vid_hdr; } if (av->last_data_size != be32_to_cpu(vidh->data_size)) { ubi_err(ubi, "bad last_data_size %d", av->last_data_size); goto bad_vid_hdr; } } /* * Make sure that all the physical eraseblocks are in one of the lists * or trees. */ buf = kzalloc(ubi->peb_count, GFP_KERNEL); if (!buf) return -ENOMEM; for (pnum = 0; pnum < ubi->peb_count; pnum++) { err = ubi_io_is_bad(ubi, pnum); if (err < 0) { kfree(buf); return err; } else if (err) buf[pnum] = 1; } ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->free, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->corr, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->erase, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->alien, u.list) buf[aeb->pnum] = 1; err = 0; for (pnum = 0; pnum < ubi->peb_count; pnum++) if (!buf[pnum]) { ubi_err(ubi, "PEB %d is not referred", pnum); err = 1; } kfree(buf); if (err) goto out; return 0; bad_aeb: ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum); ubi_dump_aeb(aeb, 0); ubi_dump_av(av); goto out; bad_av: ubi_err(ubi, "bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); goto out; bad_vid_hdr: ubi_err(ubi, "bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); ubi_dump_vid_hdr(vidh); out: dump_stack(); return -EINVAL; }
7 92 32 1 12 5 10 10 42 1 49 28 21 1 1 29 28 3 3 9 9 9 9 4 5 3 1 50 2 1 5 7 7 7 7 7 1 1 1 2 1 5 1 4 4 2 9 2 5 18 18 10 15 15 15 15 41 6 8 27 27 27 25 1 25 2 41 49 49 6 5 32 6 31 7 49 3 2 1 1 1 3 6 2 1 3 68 39 38 17 35 20 82 74 75 75 3 7 78 80 80 4 3 3 3 1 2 1 1 1 53 53 53 53 53 36 36 8 32 203 203 11 7 2 2 1 2 2 1 1 16 6 62 199 34 34 12 60 2 3 40 34 34 33 33 111 107 4 4 105 49 55 68 35 81 1 18 74 16 4 5 20 2 2 1 2 2 11 5 6 2 9 5 72 6 6 46 18 60 4 2 60 21 42 1 60 27 31 50 8 42 1 15 40 2 15 2 51 2 49 4 2 53 53 7 2 60 36 35 1 36 36 17 6 1 10 15 1 13 3 9 2 16 7 3 5 121 16 3 16 194 3 1 1 185 16 16 16 1 1 194 175 65 1900 1899 85 83 22 7 3 1 2 1 1 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> * * The code this is based on carried the following copyright notice: * --- * (C) Copyright 2001-2006 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com * Re-worked by Ben Greear <greearb@candelatech.com> * --- */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/rculist.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/net_tstamp.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/if_link.h> #include <linux/if_macvlan.h> #include <linux/hash.h> #include <linux/workqueue.h> #include <net/rtnetlink.h> #include <net/xfrm.h> #include <linux/netpoll.h> #include <linux/phy.h> #define MACVLAN_HASH_BITS 8 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) #define MACVLAN_DEFAULT_BC_QUEUE_LEN 1000 #define MACVLAN_F_PASSTHRU 1 #define MACVLAN_F_ADDRCHANGE 2 struct macvlan_port { struct net_device *dev; struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; struct sk_buff_head bc_queue; struct work_struct bc_work; u32 bc_queue_len_used; int bc_cutoff; u32 flags; int count; struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; DECLARE_BITMAP(bc_filter, MACVLAN_MC_FILTER_SZ); DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); unsigned char perm_addr[ETH_ALEN]; }; struct macvlan_source_entry { struct hlist_node hlist; struct macvlan_dev *vlan; unsigned char addr[6+2] __aligned(sizeof(u16)); struct rcu_head rcu; }; struct macvlan_skb_cb { const struct macvlan_dev *src; }; #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) static void macvlan_port_destroy(struct net_device *dev); static void update_port_bc_queue_len(struct macvlan_port *port); static inline bool macvlan_passthru(const struct macvlan_port *port) { return port->flags & MACVLAN_F_PASSTHRU; } static inline void macvlan_set_passthru(struct macvlan_port *port) { port->flags |= MACVLAN_F_PASSTHRU; } static inline bool macvlan_addr_change(const struct macvlan_port *port) { return port->flags & MACVLAN_F_ADDRCHANGE; } static inline void macvlan_set_addr_change(struct macvlan_port *port) { port->flags |= MACVLAN_F_ADDRCHANGE; } static inline void macvlan_clear_addr_change(struct macvlan_port *port) { port->flags &= ~MACVLAN_F_ADDRCHANGE; } /* Hash Ethernet address */ static u32 macvlan_eth_hash(const unsigned char *addr) { u64 value = get_unaligned((u64 *)addr); /* only want 6 bytes */ #ifdef __BIG_ENDIAN value >>= 16; #else value <<= 16; #endif return hash_64(value, MACVLAN_HASH_BITS); } static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev) { return rtnl_dereference(dev->rx_handler_data); } static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { struct macvlan_dev *vlan; u32 idx = macvlan_eth_hash(addr); hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) return vlan; } return NULL; } static struct macvlan_source_entry *macvlan_hash_lookup_source( const struct macvlan_dev *vlan, const unsigned char *addr) { struct macvlan_source_entry *entry; u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(entry->addr, addr) && entry->vlan == vlan) return entry; } return NULL; } static int macvlan_hash_add_source(struct macvlan_dev *vlan, const unsigned char *addr) { struct macvlan_port *port = vlan->port; struct macvlan_source_entry *entry; struct hlist_head *h; entry = macvlan_hash_lookup_source(vlan, addr); if (entry) return 0; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; ether_addr_copy(entry->addr, addr); entry->vlan = vlan; h = &port->vlan_source_hash[macvlan_eth_hash(addr)]; hlist_add_head_rcu(&entry->hlist, h); vlan->macaddr_count++; return 0; } static void macvlan_hash_add(struct macvlan_dev *vlan) { struct macvlan_port *port = vlan->port; const unsigned char *addr = vlan->dev->dev_addr; u32 idx = macvlan_eth_hash(addr); hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[idx]); } static void macvlan_hash_del_source(struct macvlan_source_entry *entry) { hlist_del_rcu(&entry->hlist); kfree_rcu(entry, rcu); } static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) { hlist_del_rcu(&vlan->hlist); if (sync) synchronize_rcu(); } static void macvlan_hash_change_addr(struct macvlan_dev *vlan, const unsigned char *addr) { macvlan_hash_del(vlan, true); /* Now that we are unhashed it is safe to change the device * address without confusing packet delivery. */ eth_hw_addr_set(vlan->dev, addr); macvlan_hash_add(vlan); } static bool macvlan_addr_busy(const struct macvlan_port *port, const unsigned char *addr) { /* Test to see if the specified address is * currently in use by the underlying device or * another macvlan. */ if (!macvlan_passthru(port) && !macvlan_addr_change(port) && ether_addr_equal_64bits(port->dev->dev_addr, addr)) return true; if (macvlan_hash_lookup(port, addr)) return true; return false; } static int macvlan_broadcast_one(struct sk_buff *skb, const struct macvlan_dev *vlan, const struct ethhdr *eth, bool local) { struct net_device *dev = vlan->dev; if (local) return __dev_forward_skb(dev, skb); skb->dev = dev; if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; return 0; } static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) { return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT); } static unsigned int mc_hash(const struct macvlan_dev *vlan, const unsigned char *addr) { u32 val = __get_unaligned_cpu32(addr + 2); val ^= macvlan_hash_mix(vlan); return hash_32(val, MACVLAN_MC_FILTER_BITS); } static void macvlan_broadcast(struct sk_buff *skb, const struct macvlan_port *port, struct net_device *src, enum macvlan_mode mode) { const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; struct sk_buff *nskb; unsigned int i; int err; unsigned int hash; if (skb->protocol == htons(ETH_P_PAUSE)) return; hash_for_each_rcu(port->vlan_hash, i, vlan, hlist) { if (vlan->dev == src || !(vlan->mode & mode)) continue; hash = mc_hash(vlan, eth->h_dest); if (!test_bit(hash, vlan->mc_filter)) continue; err = NET_RX_DROP; nskb = skb_clone(skb, GFP_ATOMIC); if (likely(nskb)) err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE) ?: netif_rx(nskb); macvlan_count_rx(vlan, skb->len + ETH_HLEN, err == NET_RX_SUCCESS, true); } } static void macvlan_multicast_rx(const struct macvlan_port *port, const struct macvlan_dev *src, struct sk_buff *skb) { if (!src) /* frame comes from an external address */ macvlan_broadcast(skb, port, NULL, MACVLAN_MODE_PRIVATE | MACVLAN_MODE_VEPA | MACVLAN_MODE_PASSTHRU| MACVLAN_MODE_BRIDGE); else if (src->mode == MACVLAN_MODE_VEPA) /* flood to everyone except source */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA | MACVLAN_MODE_BRIDGE); else /* * flood only to VEPA ports, bridge ports * already saw the frame on the way out. */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA); } static void macvlan_process_broadcast(struct work_struct *w) { struct macvlan_port *port = container_of(w, struct macvlan_port, bc_work); struct sk_buff *skb; struct sk_buff_head list; __skb_queue_head_init(&list); spin_lock_bh(&port->bc_queue.lock); skb_queue_splice_tail_init(&port->bc_queue, &list); spin_unlock_bh(&port->bc_queue.lock); while ((skb = __skb_dequeue(&list))) { const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; rcu_read_lock(); macvlan_multicast_rx(port, src, skb); rcu_read_unlock(); if (src) dev_put(src->dev); consume_skb(skb); cond_resched(); } } static void macvlan_broadcast_enqueue(struct macvlan_port *port, const struct macvlan_dev *src, struct sk_buff *skb) { struct sk_buff *nskb; int err = -ENOMEM; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) goto err; MACVLAN_SKB_CB(nskb)->src = src; spin_lock(&port->bc_queue.lock); if (skb_queue_len(&port->bc_queue) < port->bc_queue_len_used) { if (src) dev_hold(src->dev); __skb_queue_tail(&port->bc_queue, nskb); err = 0; } spin_unlock(&port->bc_queue.lock); queue_work(system_unbound_wq, &port->bc_work); if (err) goto free_nskb; return; free_nskb: kfree_skb(nskb); err: dev_core_stats_rx_dropped_inc(skb->dev); } static void macvlan_flush_sources(struct macvlan_port *port, struct macvlan_dev *vlan) { struct macvlan_source_entry *entry; struct hlist_node *next; int i; hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist) if (entry->vlan == vlan) macvlan_hash_del_source(entry); vlan->macaddr_count = 0; } static void macvlan_forward_source_one(struct sk_buff *skb, struct macvlan_dev *vlan) { struct sk_buff *nskb; struct net_device *dev; int len; int ret; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) return; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return; len = nskb->len + ETH_HLEN; nskb->dev = dev; if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr)) nskb->pkt_type = PACKET_HOST; ret = __netif_rx(nskb); macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); } static bool macvlan_forward_source(struct sk_buff *skb, struct macvlan_port *port, const unsigned char *addr) { struct macvlan_source_entry *entry; u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &port->vlan_source_hash[idx]; bool consume = false; hlist_for_each_entry_rcu(entry, h, hlist) { if (ether_addr_equal_64bits(entry->addr, addr)) { if (entry->vlan->flags & MACVLAN_FLAG_NODST) consume = true; macvlan_forward_source_one(skb, entry->vlan); } } return consume; } /* called under rcu_read_lock() from netif_receive_skb */ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) { struct macvlan_port *port; struct sk_buff *skb = *pskb; const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; const struct macvlan_dev *src; struct net_device *dev; unsigned int len = 0; int ret; rx_handler_result_t handle_res; /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); if (!skb) return RX_HANDLER_CONSUMED; *pskb = skb; eth = eth_hdr(skb); if (macvlan_forward_source(skb, port, eth->h_source)) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { /* forward to original port. */ vlan = src; ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: __netif_rx(skb); handle_res = RX_HANDLER_CONSUMED; goto out; } hash = mc_hash(NULL, eth->h_dest); if (test_bit(hash, port->bc_filter)) macvlan_broadcast_enqueue(port, src, skb); else if (test_bit(hash, port->mc_filter)) macvlan_multicast_rx(port, src, skb); return RX_HANDLER_PASS; } if (macvlan_forward_source(skb, port, eth->h_source)) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) return RX_HANDLER_PASS; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } len = skb->len + ETH_HLEN; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) { ret = NET_RX_DROP; handle_res = RX_HANDLER_CONSUMED; goto out; } *pskb = skb; skb->dev = dev; skb->pkt_type = PACKET_HOST; ret = NET_RX_SUCCESS; handle_res = RX_HANDLER_ANOTHER; out: macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); return handle_res; } static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } } xmit_world: skb->dev = vlan->lowerdev; return dev_queue_xmit_accel(skb, netdev_get_sb_channel(dev) ? dev : NULL); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) { #ifdef CONFIG_NET_POLL_CONTROLLER return netpoll_send_skb(vlan->netpoll, skb); #else BUG(); return NETDEV_TX_OK; #endif } static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); unsigned int len = skb->len; int ret; if (unlikely(netpoll_tx_running(dev))) return macvlan_netpoll_send_skb(vlan, skb); ret = macvlan_queue_xmit(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); u64_stats_inc(&pcpu_stats->tx_packets); u64_stats_add(&pcpu_stats->tx_bytes, len); u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(vlan->pcpu_stats->tx_dropped); } return ret; } static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return dev_hard_header(skb, lowerdev, type, daddr, saddr ? : dev->dev_addr, len); } static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, .parse_protocol = eth_header_parse_protocol, }; static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; int err; if (macvlan_passthru(vlan->port)) { if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { err = dev_set_promiscuity(lowerdev, 1); if (err < 0) goto out; } goto hash_add; } err = -EADDRINUSE; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; /* Attempt to populate accel_priv which is used to offload the L2 * forwarding requests for unicast packets. */ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) vlan->accel_priv = lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); /* If earlier attempt to offload failed, or accel_priv is not * populated we must add the unicast address to the lower device. */ if (IS_ERR_OR_NULL(vlan->accel_priv)) { vlan->accel_priv = NULL; err = dev_uc_add(lowerdev, dev->dev_addr); if (err < 0) goto out; } if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(lowerdev, 1); if (err < 0) goto del_unicast; } if (dev->flags & IFF_PROMISC) { err = dev_set_promiscuity(lowerdev, 1); if (err < 0) goto clear_multi; } hash_add: macvlan_hash_add(vlan); return 0; clear_multi: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); del_unicast: if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, vlan->accel_priv); vlan->accel_priv = NULL; } else { dev_uc_del(lowerdev, dev->dev_addr); } out: return err; } static int macvlan_stop(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, vlan->accel_priv); vlan->accel_priv = NULL; } dev_uc_unsync(lowerdev, dev); dev_mc_unsync(lowerdev, dev); if (macvlan_passthru(vlan->port)) { if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) dev_set_promiscuity(lowerdev, -1); goto hash_del; } if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); if (dev->flags & IFF_PROMISC) dev_set_promiscuity(lowerdev, -1); dev_uc_del(lowerdev, dev->dev_addr); hash_del: macvlan_hash_del(vlan, !dev->dismantle); return 0; } static int macvlan_sync_address(struct net_device *dev, const unsigned char *addr) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; struct macvlan_port *port = vlan->port; int err; if (!(dev->flags & IFF_UP)) { /* Just copy in the new address */ eth_hw_addr_set(dev, addr); } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr)) return -EADDRINUSE; if (!macvlan_passthru(port)) { err = dev_uc_add(lowerdev, addr); if (err) return err; dev_uc_del(lowerdev, dev->dev_addr); } macvlan_hash_change_addr(vlan, addr); } if (macvlan_passthru(port) && !macvlan_addr_change(port)) { /* Since addr_change isn't set, we are here due to lower * device change. Save the lower-dev address so we can * restore it later. */ ether_addr_copy(vlan->port->perm_addr, lowerdev->dev_addr); } macvlan_clear_addr_change(port); return 0; } static int macvlan_set_mac_address(struct net_device *dev, void *p) { struct macvlan_dev *vlan = netdev_priv(dev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; /* If the addresses are the same, this is a no-op */ if (ether_addr_equal(dev->dev_addr, addr->sa_data)) return 0; if (vlan->mode == MACVLAN_MODE_PASSTHRU) { macvlan_set_addr_change(vlan->port); return dev_set_mac_address(vlan->lowerdev, addr, NULL); } if (macvlan_addr_busy(vlan->port, addr->sa_data)) return -EADDRINUSE; return macvlan_sync_address(dev, addr->sa_data); } static void macvlan_change_rx_flags(struct net_device *dev, int change) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC) dev_set_promiscuity(lowerdev, dev->flags & IFF_PROMISC ? 1 : -1); } } static void macvlan_compute_filter(unsigned long *mc_filter, struct net_device *dev, struct macvlan_dev *vlan, int cutoff) { if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ); } else { DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); struct netdev_hw_addr *ha; bitmap_zero(filter, MACVLAN_MC_FILTER_SZ); netdev_for_each_mc_addr(ha, dev) { if (!vlan && ha->synced <= cutoff) continue; __set_bit(mc_hash(vlan, ha->addr), filter); } __set_bit(mc_hash(vlan, dev->broadcast), filter); bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ); } } static void macvlan_recompute_bc_filter(struct macvlan_dev *vlan) { if (vlan->port->bc_cutoff < 0) { bitmap_zero(vlan->port->bc_filter, MACVLAN_MC_FILTER_SZ); return; } macvlan_compute_filter(vlan->port->bc_filter, vlan->lowerdev, NULL, vlan->port->bc_cutoff); } static void macvlan_set_mac_lists(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); macvlan_compute_filter(vlan->mc_filter, dev, vlan, 0); dev_uc_sync(vlan->lowerdev, dev); dev_mc_sync(vlan->lowerdev, dev); /* This is slightly inaccurate as we're including the subscription * list of vlan->lowerdev too. * * Bug alert: This only works if everyone has the same broadcast * address as lowerdev. As soon as someone changes theirs this * will break. * * However, this is already broken as when you change your broadcast * address we don't get called. * * The solution is to maintain a list of broadcast addresses like * we do for uc/mc, if you care. */ macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL, 0); macvlan_recompute_bc_filter(vlan); } static void update_port_bc_cutoff(struct macvlan_dev *vlan, int cutoff) { if (vlan->port->bc_cutoff == cutoff) return; vlan->port->bc_cutoff = cutoff; macvlan_recompute_bc_filter(vlan); } static int macvlan_change_mtu(struct net_device *dev, int new_mtu) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->lowerdev->mtu < new_mtu) return -EINVAL; WRITE_ONCE(dev->mtu, new_mtu); return 0; } static int macvlan_hwtstamp_get(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { struct net_device *real_dev = macvlan_dev_real_dev(dev); return generic_hwtstamp_get_lower(real_dev, cfg); } static int macvlan_hwtstamp_set(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { struct net_device *real_dev = macvlan_dev_real_dev(dev); if (!net_eq(dev_net(dev), &init_net)) return -EOPNOTSUPP; return generic_hwtstamp_set_lower(real_dev, cfg, extack); } /* * macvlan network devices have devices nesting below it and are a special * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_OFFLOADS \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) #define ALWAYS_ON_FEATURES ALWAYS_ON_OFFLOADS #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) static void macvlan_set_lockdep_class(struct net_device *dev) { netdev_lockdep_set_classes(dev); lockdep_set_class(&dev->addr_list_lock, &macvlan_netdev_addr_lock_key); } static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; struct macvlan_port *port = vlan->port; dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; dev->features |= ALWAYS_ON_FEATURES; dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; dev->hw_enc_features |= dev->features; dev->lltx = true; netif_inherit_tso_max(dev, lowerdev); dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->pcpu_stats) return -ENOMEM; port->count += 1; /* Get macvlan's reference to lowerdev */ netdev_hold(lowerdev, &vlan->dev_tracker, GFP_KERNEL); return 0; } static void macvlan_uninit(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port = vlan->port; free_percpu(vlan->pcpu_stats); macvlan_flush_sources(port, vlan); port->count -= 1; if (!port->count) macvlan_port_destroy(port->dev); } static void macvlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->pcpu_stats) { struct vlan_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; u32 rx_errors = 0, tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->pcpu_stats, i); do { start = u64_stats_fetch_begin(&p->syncp); rx_packets = u64_stats_read(&p->rx_packets); rx_bytes = u64_stats_read(&p->rx_bytes); rx_multicast = u64_stats_read(&p->rx_multicast); tx_packets = u64_stats_read(&p->tx_packets); tx_bytes = u64_stats_read(&p->tx_bytes); } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->multicast += rx_multicast; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* rx_errors & tx_dropped are u32, updated * without syncp protection. */ rx_errors += READ_ONCE(p->rx_errors); tx_dropped += READ_ONCE(p->tx_dropped); } stats->rx_errors = rx_errors; stats->rx_dropped = rx_errors; stats->tx_dropped = tx_dropped; } } static int macvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return vlan_vid_add(lowerdev, proto, vid); } static int macvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; vlan_vid_del(lowerdev, proto, vid); return 0; } static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags, bool *notified, struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; /* Support unicast filter only on passthru devices. * Multicast filter should be allowed on all devices. */ if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (flags & NLM_F_REPLACE) return -EOPNOTSUPP; if (is_unicast_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_add_excl(dev, addr); return err; } static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, bool *notified, struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; /* Support unicast filter only on passthru devices. * Multicast filter should be allowed on all devices. */ if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (is_unicast_ether_addr(addr)) err = dev_uc_del(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_del(dev, addr); return err; } static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver)); strscpy(drvinfo->version, "0.1", sizeof(drvinfo->version)); } static int macvlan_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { const struct macvlan_dev *vlan = netdev_priv(dev); return __ethtool_get_link_ksettings(vlan->lowerdev, cmd); } static int macvlan_ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { struct net_device *real_dev = macvlan_dev_real_dev(dev); return ethtool_get_ts_info_by_layer(real_dev, info); } static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { struct macvlan_dev *vlan = netdev_priv(dev); netdev_features_t lowerdev_features = vlan->lowerdev->features; netdev_features_t mask; features |= NETIF_F_ALL_FOR_ALL; features &= (vlan->set_features | ~MACVLAN_FEATURES); mask = features; lowerdev_features &= (features | ~NETIF_F_LRO); features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); return features; } #ifdef CONFIG_NET_POLL_CONTROLLER static void macvlan_dev_poll_controller(struct net_device *dev) { return; } static int macvlan_dev_netpoll_setup(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *real_dev = vlan->lowerdev; struct netpoll *netpoll; int err; netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); err = -ENOMEM; if (!netpoll) goto out; err = __netpoll_setup(netpoll, real_dev); if (err) { kfree(netpoll); goto out; } vlan->netpoll = netpoll; out: return err; } static void macvlan_dev_netpoll_cleanup(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct netpoll *netpoll = vlan->netpoll; if (!netpoll) return; vlan->netpoll = NULL; __netpoll_free(netpoll); } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int macvlan_dev_get_iflink(const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); return READ_ONCE(vlan->lowerdev->ifindex); } static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = macvlan_ethtool_get_link_ksettings, .get_drvinfo = macvlan_ethtool_get_drvinfo, .get_ts_info = macvlan_ethtool_get_ts_info, }; static const struct net_device_ops macvlan_netdev_ops = { .ndo_init = macvlan_init, .ndo_uninit = macvlan_uninit, .ndo_open = macvlan_open, .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, .ndo_fix_features = macvlan_fix_features, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_rx_mode = macvlan_set_mac_lists, .ndo_get_stats64 = macvlan_dev_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = macvlan_dev_poll_controller, .ndo_netpoll_setup = macvlan_dev_netpoll_setup, .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup, #endif .ndo_get_iflink = macvlan_dev_get_iflink, .ndo_features_check = passthru_features_check, .ndo_hwtstamp_get = macvlan_hwtstamp_get, .ndo_hwtstamp_set = macvlan_hwtstamp_set, }; static void macvlan_dev_free(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); /* Get rid of the macvlan's reference to lowerdev */ netdev_put(vlan->lowerdev, &vlan->dev_tracker); } void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->change_proto_down = true; dev->netdev_ops = &macvlan_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = macvlan_dev_free; dev->header_ops = &macvlan_hard_header_ops; dev->ethtool_ops = &macvlan_ethtool_ops; } EXPORT_SYMBOL_GPL(macvlan_common_setup); static void macvlan_setup(struct net_device *dev) { macvlan_common_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; } static int macvlan_port_create(struct net_device *dev) { struct macvlan_port *port; unsigned int i; int err; if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; if (netdev_is_rx_handler_busy(dev)) return -EBUSY; port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; port->dev = dev; ether_addr_copy(port->perm_addr, dev->dev_addr); INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_source_hash[i]); port->bc_queue_len_used = 0; port->bc_cutoff = 1; skb_queue_head_init(&port->bc_queue); INIT_WORK(&port->bc_work, macvlan_process_broadcast); err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); if (err) kfree(port); else dev->priv_flags |= IFF_MACVLAN_PORT; return err; } static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get_rtnl(dev); struct sk_buff *skb; dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); /* After this point, no packet can schedule bc_work anymore, * but we need to cancel it and purge left skbs if any. */ cancel_work_sync(&port->bc_work); while ((skb = __skb_dequeue(&port->bc_queue))) { const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; if (src) dev_put(src->dev); kfree_skb(skb); } /* If the lower device address has been changed by passthru * macvlan, put it back. */ if (macvlan_passthru(port) && !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { struct sockaddr sa; sa.sa_family = port->dev->type; memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); dev_set_mac_address(port->dev, &sa, NULL); } kfree(port); } static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct nlattr *nla, *head; int rem, len; if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) return 0; if (data[IFLA_MACVLAN_FLAGS] && nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~(MACVLAN_FLAG_NOPROMISC | MACVLAN_FLAG_NODST)) return -EINVAL; if (data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: case MACVLAN_MODE_VEPA: case MACVLAN_MODE_BRIDGE: case MACVLAN_MODE_PASSTHRU: case MACVLAN_MODE_SOURCE: break; default: return -EINVAL; } } if (data[IFLA_MACVLAN_MACADDR_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) { case MACVLAN_MACADDR_ADD: case MACVLAN_MACADDR_DEL: case MACVLAN_MACADDR_FLUSH: case MACVLAN_MACADDR_SET: break; default: return -EINVAL; } } if (data[IFLA_MACVLAN_MACADDR]) { if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(data[IFLA_MACVLAN_MACADDR]))) return -EADDRNOTAVAIL; } if (data[IFLA_MACVLAN_MACADDR_DATA]) { head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { if (nla_type(nla) != IFLA_MACVLAN_MACADDR || nla_len(nla) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(nla))) return -EADDRNOTAVAIL; } } if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; return 0; } /* * reconfigure list of remote source mac address * (only for macvlan devices in source mode) * Note regarding alignment: all netlink data is aligned to 4 Byte, which * suffices for both ether_addr_copy and ether_addr_equal_64bits usage. */ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, struct nlattr *data[]) { char *addr = NULL; int ret, rem, len; struct nlattr *nla, *head; struct macvlan_source_entry *entry; if (data[IFLA_MACVLAN_MACADDR]) addr = nla_data(data[IFLA_MACVLAN_MACADDR]); if (mode == MACVLAN_MACADDR_ADD) { if (!addr) return -EINVAL; return macvlan_hash_add_source(vlan, addr); } else if (mode == MACVLAN_MACADDR_DEL) { if (!addr) return -EINVAL; entry = macvlan_hash_lookup_source(vlan, addr); if (entry) { macvlan_hash_del_source(entry); vlan->macaddr_count--; } } else if (mode == MACVLAN_MACADDR_FLUSH) { macvlan_flush_sources(vlan->port, vlan); } else if (mode == MACVLAN_MACADDR_SET) { macvlan_flush_sources(vlan->port, vlan); if (addr) { ret = macvlan_hash_add_source(vlan, addr); if (ret) return ret; } if (!data[IFLA_MACVLAN_MACADDR_DATA]) return 0; head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) return ret; } } else { return -EINVAL; } return 0; } int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port; struct net_device *lowerdev; int err; int macmode; bool create = false; if (!tb[IFLA_LINK]) return -EINVAL; lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; /* When creating macvlans or macvtaps on top of other macvlans - use * the real device as the lowerdev. */ if (netif_is_macvlan(lowerdev)) lowerdev = macvlan_dev_real_dev(lowerdev); if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu; else if (dev->mtu > lowerdev->mtu) return -EINVAL; /* MTU range: 68 - lowerdev->max_mtu */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = lowerdev->max_mtu; if (!tb[IFLA_ADDRESS]) eth_hw_addr_random(dev); if (!netif_is_macvlan_port(lowerdev)) { err = macvlan_port_create(lowerdev); if (err < 0) return err; create = true; } port = macvlan_port_get_rtnl(lowerdev); /* Only 1 macvlan device can be created in passthru mode */ if (macvlan_passthru(port)) { /* The macvlan port must be not created this time, * still goto destroy_macvlan_port for readability. */ err = -EINVAL; goto destroy_macvlan_port; } vlan->lowerdev = lowerdev; vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); if (data && data[IFLA_MACVLAN_FLAGS]) vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); if (vlan->mode == MACVLAN_MODE_PASSTHRU) { if (port->count) { err = -EINVAL; goto destroy_macvlan_port; } macvlan_set_passthru(port); eth_hw_addr_inherit(dev, lowerdev); } if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { if (vlan->mode != MACVLAN_MODE_SOURCE) { err = -EINVAL; goto destroy_macvlan_port; } macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); err = macvlan_changelink_sources(vlan, macmode, data); if (err) goto destroy_macvlan_port; } vlan->bc_queue_len_req = MACVLAN_DEFAULT_BC_QUEUE_LEN; if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); if (data && data[IFLA_MACVLAN_BC_CUTOFF]) update_port_bc_cutoff( vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); err = register_netdevice(dev); if (err < 0) goto destroy_macvlan_port; dev->priv_flags |= IFF_MACVLAN; err = netdev_upper_dev_link(lowerdev, dev, extack); if (err) goto unregister_netdev; list_add_tail_rcu(&vlan->list, &port->vlans); update_port_bc_queue_len(vlan->port); netif_stacked_transfer_operstate(lowerdev, dev); linkwatch_fire_event(dev); return 0; unregister_netdev: /* macvlan_uninit would free the macvlan port */ unregister_netdevice(dev); return err; destroy_macvlan_port: /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ if (create && macvlan_port_get_rtnl(lowerdev)) { macvlan_flush_sources(port, vlan); macvlan_port_destroy(port->dev); } return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); static int macvlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { return macvlan_common_newlink(src_net, dev, tb, data, extack); } void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->mode == MACVLAN_MODE_SOURCE) macvlan_flush_sources(vlan->port, vlan); list_del_rcu(&vlan->list); update_port_bc_queue_len(vlan->port); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } EXPORT_SYMBOL_GPL(macvlan_dellink); static int macvlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); enum macvlan_mode mode; bool set_mode = false; enum macvlan_macaddr_mode macmode; int ret; /* Validate mode, but don't set yet: setting flags may fail. */ if (data && data[IFLA_MACVLAN_MODE]) { set_mode = true; mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); /* Passthrough mode can't be set or cleared dynamically */ if ((mode == MACVLAN_MODE_PASSTHRU) != (vlan->mode == MACVLAN_MODE_PASSTHRU)) return -EINVAL; if (vlan->mode == MACVLAN_MODE_SOURCE && vlan->mode != mode) macvlan_flush_sources(vlan->port, vlan); } if (data && data[IFLA_MACVLAN_FLAGS]) { __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; if (macvlan_passthru(vlan->port) && promisc) { int err; if (flags & MACVLAN_FLAG_NOPROMISC) err = dev_set_promiscuity(vlan->lowerdev, -1); else err = dev_set_promiscuity(vlan->lowerdev, 1); if (err < 0) return err; } vlan->flags = flags; } if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) { vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); update_port_bc_queue_len(vlan->port); } if (data && data[IFLA_MACVLAN_BC_CUTOFF]) update_port_bc_cutoff( vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); if (set_mode) vlan->mode = mode; if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { if (vlan->mode != MACVLAN_MODE_SOURCE) return -EINVAL; macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); ret = macvlan_changelink_sources(vlan, macmode, data); if (ret) return ret; } return 0; } static size_t macvlan_get_size_mac(const struct macvlan_dev *vlan) { if (vlan->macaddr_count == 0) return 0; return nla_total_size(0) /* IFLA_MACVLAN_MACADDR_DATA */ + vlan->macaddr_count * nla_total_size(sizeof(u8) * ETH_ALEN); } static size_t macvlan_get_size(const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); return (0 + nla_total_size(4) /* IFLA_MACVLAN_MODE */ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ + nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */ + macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */ + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */ + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */ ); } static int macvlan_fill_info_macaddr(struct sk_buff *skb, const struct macvlan_dev *vlan, const int i) { struct hlist_head *h = &vlan->port->vlan_source_hash[i]; struct macvlan_source_entry *entry; hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (entry->vlan != vlan) continue; if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) return 1; } return 0; } static int macvlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port = vlan->port; int i; struct nlattr *nest; if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count)) goto nla_put_failure; if (vlan->macaddr_count > 0) { nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA); if (nest == NULL) goto nla_put_failure; for (i = 0; i < MACVLAN_HASH_SIZE; i++) { if (macvlan_fill_info_macaddr(skb, vlan, i)) goto nla_put_failure; } nla_nest_end(skb, nest); } if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN, vlan->bc_queue_len_req)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, port->bc_queue_len_used)) goto nla_put_failure; if (port->bc_cutoff != 1 && nla_put_s32(skb, IFLA_MACVLAN_BC_CUTOFF, port->bc_cutoff)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 }, [IFLA_MACVLAN_MACADDR_MODE] = { .type = NLA_U32 }, [IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED }, [IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 }, [IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 }, [IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT }, [IFLA_MACVLAN_BC_CUTOFF] = { .type = NLA_S32 }, }; int macvlan_link_register(struct rtnl_link_ops *ops) { /* common fields */ ops->validate = macvlan_validate; ops->maxtype = IFLA_MACVLAN_MAX; ops->policy = macvlan_policy; ops->changelink = macvlan_changelink; ops->get_size = macvlan_get_size; ops->fill_info = macvlan_fill_info; return rtnl_link_register(ops); }; EXPORT_SYMBOL_GPL(macvlan_link_register); static struct net *macvlan_get_link_net(const struct net_device *dev) { return dev_net(macvlan_dev_real_dev(dev)); } static struct rtnl_link_ops macvlan_link_ops = { .kind = "macvlan", .setup = macvlan_setup, .newlink = macvlan_newlink, .dellink = macvlan_dellink, .get_link_net = macvlan_get_link_net, .priv_size = sizeof(struct macvlan_dev), }; static void update_port_bc_queue_len(struct macvlan_port *port) { u32 max_bc_queue_len_req = 0; struct macvlan_dev *vlan; list_for_each_entry(vlan, &port->vlans, list) { if (vlan->bc_queue_len_req > max_bc_queue_len_req) max_bc_queue_len_req = vlan->bc_queue_len_req; } port->bc_queue_len_used = max_bc_queue_len_req; } static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); if (!netif_is_macvlan_port(dev)) return NOTIFY_DONE; port = macvlan_port_get_rtnl(dev); switch (event) { case NETDEV_UP: case NETDEV_DOWN: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, vlan->dev); break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { netif_inherit_tso_max(vlan->dev, dev); netdev_update_features(vlan->dev); } break; case NETDEV_CHANGEMTU: list_for_each_entry(vlan, &port->vlans, list) { if (vlan->dev->mtu <= dev->mtu) continue; dev_set_mtu(vlan->dev, dev->mtu); } break; case NETDEV_CHANGEADDR: if (!macvlan_passthru(port)) return NOTIFY_DONE; vlan = list_first_entry_or_null(&port->vlans, struct macvlan_dev, list); if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) return NOTIFY_BAD; break; case NETDEV_UNREGISTER: /* twiddle thumbs on netns device moves */ if (dev->reg_state != NETREG_UNREGISTERING) break; list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); unregister_netdevice_many(&list_kill); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlying device to change its type. */ return NOTIFY_BAD; case NETDEV_NOTIFY_PEERS: case NETDEV_BONDING_FAILOVER: case NETDEV_RESEND_IGMP: /* Propagate to all vlans */ list_for_each_entry(vlan, &port->vlans, list) call_netdevice_notifiers(event, vlan->dev); } return NOTIFY_DONE; } static struct notifier_block macvlan_notifier_block __read_mostly = { .notifier_call = macvlan_device_event, }; static int __init macvlan_init_module(void) { int err; register_netdevice_notifier(&macvlan_notifier_block); err = macvlan_link_register(&macvlan_link_ops); if (err < 0) goto err1; return 0; err1: unregister_netdevice_notifier(&macvlan_notifier_block); return err; } static void __exit macvlan_cleanup_module(void) { rtnl_link_unregister(&macvlan_link_ops); unregister_netdevice_notifier(&macvlan_notifier_block); } module_init(macvlan_init_module); module_exit(macvlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Driver for MAC address based VLANs"); MODULE_ALIAS_RTNL_LINK("macvlan");
34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_IO_READ_H #define _BCACHEFS_IO_READ_H #include "bkey_buf.h" #include "reflink.h" struct bch_read_bio { struct bch_fs *c; u64 start_time; u64 submit_time; /* * Reads will often have to be split, and if the extent being read from * was checksummed or compressed we'll also have to allocate bounce * buffers and copy the data back into the original bio. * * If we didn't have to split, we have to save and restore the original * bi_end_io - @split below indicates which: */ union { struct bch_read_bio *parent; bio_end_io_t *end_io; }; /* * Saved copy of bio->bi_iter, from submission time - allows us to * resubmit on IO error, and also to copy data back to the original bio * when we're bouncing: */ struct bvec_iter bvec_iter; unsigned offset_into_extent; u16 flags; union { struct { u16 promote:1, bounce:1, split:1, have_ioref:1, narrow_crcs:1, hole:1, saw_error:1, retry:2, context:2; }; u16 _state; }; struct extent_ptr_decoded pick; /* * pos we read from - different from data_pos for indirect extents: */ u32 subvol; struct bpos read_pos; /* * start pos of data we read (may not be pos of data we want) - for * promote, narrow extents paths: */ enum btree_id data_btree; struct bpos data_pos; struct bversion version; struct bch_io_opts opts; struct work_struct work; struct bio bio; }; #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio) struct bch_devs_mask; struct cache_promote_op; struct extent_ptr_decoded; static inline int bch2_read_indirect_extent(struct btree_trans *trans, enum btree_id *data_btree, s64 *offset_into_extent, struct bkey_buf *extent) { if (extent->k->k.type != KEY_TYPE_reflink_p) return 0; *data_btree = BTREE_ID_reflink; struct btree_iter iter; struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter, offset_into_extent, bkey_i_to_s_c_reflink_p(extent->k), true, 0); int ret = bkey_err(k); if (ret) return ret; if (bkey_deleted(k.k)) { bch2_trans_iter_exit(trans, &iter); return -BCH_ERR_missing_indirect_extent; } bch2_bkey_buf_reassemble(extent, trans->c, k); bch2_trans_iter_exit(trans, &iter); return 0; } #define BCH_READ_FLAGS() \ x(retry_if_stale) \ x(may_promote) \ x(user_mapped) \ x(data_update) \ x(last_fragment) \ x(must_bounce) \ x(must_clone) \ x(in_retry) enum __bch_read_flags { #define x(n) __BCH_READ_##n, BCH_READ_FLAGS() #undef x }; enum bch_read_flags { #define x(n) BCH_READ_##n = BIT(__BCH_READ_##n), BCH_READ_FLAGS() #undef x }; int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *, struct bvec_iter, struct bpos, enum btree_id, struct bkey_s_c, unsigned, struct bch_io_failures *, unsigned, int); static inline void bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, struct bpos read_pos, enum btree_id data_btree, struct bkey_s_c k, unsigned offset_into_extent, unsigned flags) { __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos, data_btree, k, offset_into_extent, NULL, flags, -1); } void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter, subvol_inum, struct bch_io_failures *, unsigned flags); static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, subvol_inum inum) { struct bch_io_failures failed = { .nr = 0 }; BUG_ON(rbio->_state); rbio->subvol = inum.subvol; __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed, BCH_READ_retry_if_stale| BCH_READ_may_promote| BCH_READ_user_mapped); } static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio, struct bch_read_bio *orig) { struct bch_read_bio *rbio = to_rbio(bio); rbio->c = orig->c; rbio->_state = 0; rbio->split = true; rbio->parent = orig; rbio->opts = orig->opts; return rbio; } static inline struct bch_read_bio *rbio_init(struct bio *bio, struct bch_fs *c, struct bch_io_opts opts, bio_end_io_t end_io) { struct bch_read_bio *rbio = to_rbio(bio); rbio->start_time = local_clock(); rbio->c = c; rbio->_state = 0; rbio->opts = opts; rbio->bio.bi_end_io = end_io; return rbio; } void bch2_fs_io_read_exit(struct bch_fs *); int bch2_fs_io_read_init(struct bch_fs *); #endif /* _BCACHEFS_IO_READ_H */
353 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PATH_H #define _LINUX_PATH_H struct dentry; struct vfsmount; struct path { struct vfsmount *mnt; struct dentry *dentry; } __randomize_layout; extern void path_get(const struct path *); extern void path_put(const struct path *); static inline int path_equal(const struct path *path1, const struct path *path2) { return path1->mnt == path2->mnt && path1->dentry == path2->dentry; } /* * Cleanup macro for use with __free(path_put). Avoids dereference and * copying @path unlike DEFINE_FREE(). path_put() will handle the empty * path correctly just ensure @path is initialized: * * struct path path __free(path_put) = {}; */ #define __free_path_put path_put #endif /* _LINUX_PATH_H */
23 14 22 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 /* * linux/fs/nls/nls_cp869.c * * Charset cp869 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0386, 0x0000, 0x00b7, 0x00ac, 0x00a6, 0x2018, 0x2019, 0x0388, 0x2015, 0x0389, /* 0x90*/ 0x038a, 0x03aa, 0x038c, 0x0000, 0x0000, 0x038e, 0x03ab, 0x00a9, 0x038f, 0x00b2, 0x00b3, 0x03ac, 0x00a3, 0x03ad, 0x03ae, 0x03af, /* 0xa0*/ 0x03ca, 0x0390, 0x03cc, 0x03cd, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x00bd, 0x0398, 0x0399, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x039a, 0x039b, 0x039c, 0x039d, 0x2563, 0x2551, 0x2557, 0x255d, 0x039e, 0x039f, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x03a0, 0x03a1, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x03a3, /* 0xd0*/ 0x03a4, 0x03a5, 0x03a6, 0x03a7, 0x03a8, 0x03a9, 0x03b1, 0x03b2, 0x03b3, 0x2518, 0x250c, 0x2588, 0x2584, 0x03b4, 0x03b5, 0x2580, /* 0xe0*/ 0x03b6, 0x03b7, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bc, 0x03bd, 0x03be, 0x03bf, 0x03c0, 0x03c1, 0x03c3, 0x03c2, 0x03c4, 0x0384, /* 0xf0*/ 0x00ad, 0x00b1, 0x03c5, 0x03c6, 0x03c7, 0x00a7, 0x03c8, 0x0385, 0x00b0, 0x00a8, 0x03c9, 0x03cb, 0x03b0, 0x03ce, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x8a, 0xf5, /* 0xa0-0xa7 */ 0xf9, 0x97, 0x00, 0xae, 0x89, 0xf0, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0x99, 0x9a, 0x00, 0x00, 0x00, 0x88, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xaf, 0x00, 0xab, 0x00, 0x00, /* 0xb8-0xbf */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0xef, 0xf7, 0x86, 0x00, /* 0x80-0x87 */ 0x8d, 0x8f, 0x90, 0x00, 0x92, 0x00, 0x95, 0x98, /* 0x88-0x8f */ 0xa1, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, /* 0x90-0x97 */ 0xac, 0xad, 0xb5, 0xb6, 0xb7, 0xb8, 0xbd, 0xbe, /* 0x98-0x9f */ 0xc6, 0xc7, 0x00, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, /* 0xa0-0xa7 */ 0xd4, 0xd5, 0x91, 0x96, 0x9b, 0x9d, 0x9e, 0x9f, /* 0xa8-0xaf */ 0xfc, 0xd6, 0xd7, 0xd8, 0xdd, 0xde, 0xe0, 0xe1, /* 0xb0-0xb7 */ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, /* 0xb8-0xbf */ 0xea, 0xeb, 0xed, 0xec, 0xee, 0xf2, 0xf3, 0xf4, /* 0xc0-0xc7 */ 0xf6, 0xfa, 0xa0, 0xfb, 0xa2, 0xa3, 0xfd, 0x00, /* 0xc8-0xcf */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, /* 0x10-0x17 */ 0x8b, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */ 0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */ 0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */ 0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x00, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x9d, 0x8e, 0x9e, /* 0x88-0x8f */ 0x9f, 0xa0, 0xa2, 0x00, 0x00, 0xa3, 0xfb, 0x97, /* 0x90-0x97 */ 0xfd, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xd6, 0xd7, 0xd8, 0xdd, /* 0xa0-0xa7 */ 0xde, 0xe0, 0xe1, 0xab, 0xe2, 0xe3, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xe4, 0xe5, 0xe6, /* 0xb0-0xb7 */ 0xe7, 0xb9, 0xba, 0xbb, 0xbc, 0xe8, 0xe9, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xea, 0xeb, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xec, /* 0xc8-0xcf */ 0xee, 0xf2, 0xf3, 0xf4, 0xf6, 0xfa, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x00, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x00, 0x00, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x86, 0x9c, 0x8d, 0x8f, 0x90, /* 0x98-0x9f */ 0x91, 0xa1, 0x92, 0x95, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xa4, 0xa5, /* 0xd0-0xd7 */ 0xa6, 0xd9, 0xda, 0xdb, 0xdc, 0xa7, 0xa8, 0xdf, /* 0xd8-0xdf */ 0xa9, 0xaa, 0xac, 0xad, 0xb5, 0xb6, 0xb7, 0xb8, /* 0xe0-0xe7 */ 0xbd, 0xbe, 0xc6, 0xc7, 0xcf, 0xcf, 0xd0, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xd1, 0xd2, 0xd3, 0xf5, 0xd4, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xd5, 0x96, 0xfc, 0x98, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp869", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_cp869(void) { return register_nls(&table); } static void __exit exit_nls_cp869(void) { unregister_nls(&table); } module_init(init_nls_cp869) module_exit(exit_nls_cp869) MODULE_DESCRIPTION("NLS Codepage 869 (Greek)"); MODULE_LICENSE("Dual BSD/GPL");
446 446 86 162 216 163 211 30 6 30 30 30 30 30 28 30 4 2 2 594 14 683 51 48 65 172 19 107 105 62 62 54 54 21 2 1 18 10 5 2 4 9 4 21 25 25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 // SPDX-License-Identifier: GPL-2.0 /* * High-level sync()-related operations */ #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <linux/linkage.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/backing-dev.h> #include "internal.h" #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) /* * Write out and wait upon all dirty data associated with this * superblock. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ int sync_filesystem(struct super_block *sb) { int ret = 0; /* * We need to be protected against the filesystem going from * r/o to r/w or vice versa. */ WARN_ON(!rwsem_is_locked(&sb->s_umount)); /* * No point in syncing out anything if the filesystem is read-only. */ if (sb_rdonly(sb)) return 0; /* * Do the filesystem syncing work. For simple filesystems * writeback_inodes_sb(sb) just dirties buffers with inodes so we have * to submit I/O for these buffers via sync_blockdev(). This also * speeds up the wait == 1 case since in that case write_inode() * methods call sync_dirty_buffer() and thus effectively write one block * at a time. */ writeback_inodes_sb(sb, WB_REASON_SYNC); if (sb->s_op->sync_fs) { ret = sb->s_op->sync_fs(sb, 0); if (ret) return ret; } ret = sync_blockdev_nowait(sb->s_bdev); if (ret) return ret; sync_inodes_sb(sb); if (sb->s_op->sync_fs) { ret = sb->s_op->sync_fs(sb, 1); if (ret) return ret; } return sync_blockdev(sb->s_bdev); } EXPORT_SYMBOL(sync_filesystem); static void sync_inodes_one_sb(struct super_block *sb, void *arg) { if (!sb_rdonly(sb)) sync_inodes_sb(sb); } static void sync_fs_one_sb(struct super_block *sb, void *arg) { if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) && sb->s_op->sync_fs) sb->s_op->sync_fs(sb, *(int *)arg); } /* * Sync everything. We start by waking flusher threads so that most of * writeback runs on all devices in parallel. Then we sync all inodes reliably * which effectively also waits for all flusher threads to finish doing * writeback. At this point all data is on disk so metadata should be stable * and we tell filesystems to sync their metadata via ->sync_fs() calls. * Finally, we writeout all block devices because some filesystems (e.g. ext2) * just write metadata (such as inodes or bitmaps) to block device page cache * and do not sync it on their own in ->sync_fs(). */ void ksys_sync(void) { int nowait = 0, wait = 1; wakeup_flusher_threads(WB_REASON_SYNC); iterate_supers(sync_inodes_one_sb, NULL); iterate_supers(sync_fs_one_sb, &nowait); iterate_supers(sync_fs_one_sb, &wait); sync_bdevs(false); sync_bdevs(true); if (unlikely(laptop_mode)) laptop_sync_completion(); } SYSCALL_DEFINE0(sync) { ksys_sync(); return 0; } static void do_sync_work(struct work_struct *work) { int nowait = 0; /* * Sync twice to reduce the possibility we skipped some inodes / pages * because they were temporarily locked */ iterate_supers(sync_inodes_one_sb, &nowait); iterate_supers(sync_fs_one_sb, &nowait); sync_bdevs(false); iterate_supers(sync_inodes_one_sb, &nowait); iterate_supers(sync_fs_one_sb, &nowait); sync_bdevs(false); printk("Emergency Sync complete\n"); kfree(work); } void emergency_sync(void) { struct work_struct *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) { INIT_WORK(work, do_sync_work); schedule_work(work); } } /* * sync a single super */ SYSCALL_DEFINE1(syncfs, int, fd) { CLASS(fd, f)(fd); struct super_block *sb; int ret, ret2; if (fd_empty(f)) return -EBADF; sb = fd_file(f)->f_path.dentry->d_sb; down_read(&sb->s_umount); ret = sync_filesystem(sb); up_read(&sb->s_umount); ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err); return ret ? ret : ret2; } /** * vfs_fsync_range - helper to sync a range of data & metadata to disk * @file: file to sync * @start: offset in bytes of the beginning of data range to sync * @end: offset in bytes of the end of data range (inclusive) * @datasync: perform only datasync * * Write back data in range @start..@end and metadata for @file to disk. If * @datasync is set only metadata needed to access modified file data is * written. */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; if (!file->f_op->fsync) return -EINVAL; if (!datasync && (inode->i_state & I_DIRTY_TIME)) mark_inode_dirty_sync(inode); return file->f_op->fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); /** * vfs_fsync - perform a fsync or fdatasync on a file * @file: file to sync * @datasync: only perform a fdatasync operation * * Write back data and metadata for @file to disk. If @datasync is * set only metadata needed to access modified file data is written. */ int vfs_fsync(struct file *file, int datasync) { return vfs_fsync_range(file, 0, LLONG_MAX, datasync); } EXPORT_SYMBOL(vfs_fsync); static int do_fsync(unsigned int fd, int datasync) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_fsync(fd_file(f), datasync); } SYSCALL_DEFINE1(fsync, unsigned int, fd) { return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { return do_fsync(fd, 1); } int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, unsigned int flags) { int ret; struct address_space *mapping; loff_t endbyte; /* inclusive */ umode_t i_mode; ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; endbyte = offset + nbytes; if ((s64)offset < 0) goto out; if ((s64)endbyte < 0) goto out; if (endbyte < offset) goto out; if (sizeof(pgoff_t) == 4) { if (offset >= (0x100000000ULL << PAGE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" */ ret = 0; goto out; } if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) { /* * Out to EOF */ nbytes = 0; } } if (nbytes == 0) endbyte = LLONG_MAX; else endbyte--; /* inclusive */ i_mode = file_inode(file)->i_mode; ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out; mapping = file->f_mapping; ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { ret = file_fdatawait_range(file, offset, endbyte); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WRITE) { int sync_mode = WB_SYNC_NONE; if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) == SYNC_FILE_RANGE_WRITE_AND_WAIT) sync_mode = WB_SYNC_ALL; ret = __filemap_fdatawrite_range(mapping, offset, endbyte, sync_mode); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WAIT_AFTER) ret = file_fdatawait_range(file, offset, endbyte); out: return ret; } /* * ksys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then ksys_sync_file_range() will operate from offset out to EOF. * * The flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the * range which are not presently under writeback. Note that this may block for * significant periods due to exhaustion of disk request structures. * * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. * * Useful combinations of the flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages * in the range which were dirty on entry to ksys_sync_file_range() are placed * under writeout. This is a start-write-for-data-integrity operation. * * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which * are not presently under writeout. This is an asynchronous flush-to-disk * operation. Not suitable for data integrity operations. * * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for * completion of writeout of all pages in the range. This will be used after an * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait * for that operation to complete and to return the result. * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER * (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT): * a traditional sync() operation. This is a write-for-data-integrity operation * which will ensure that all pages in the range which were dirty on entry to * ksys_sync_file_range() are written to disk. It should be noted that disk * caches are not flushed by this call, so there are no guarantees here that the * data will be available on disk after a crash. * * * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any * I/O errors or ENOSPC conditions and will return those to the caller, after * clearing the EIO and ENOSPC flags in the address_space. * * It should be noted that none of these operations write out the file's * metadata. So unless the application is strictly performing overwrites of * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return sync_file_range(fd_file(f), offset, nbytes, flags); } SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, unsigned int, flags) { return ksys_sync_file_range(fd, offset, nbytes, flags); } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE) COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset), compat_arg_u64_dual(nbytes), unsigned int, flags) { return ksys_sync_file_range(fd, compat_arg_u64_glue(offset), compat_arg_u64_glue(nbytes), flags); } #endif /* It would be nice if people remember that not all the world's an i386 when they introduce new system calls */ SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags, loff_t, offset, loff_t, nbytes) { return ksys_sync_file_range(fd, offset, nbytes, flags); }
528 3 503 47 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 IBM Corporation * * Author: * Mimi Zohar <zohar@us.ibm.com> */ #include <linux/xattr.h> #include <linux/evm.h> int posix_xattr_acl(const char *xattr) { int xattr_len = strlen(xattr); if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len) && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0)) return 1; if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len) && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0)) return 1; return 0; }
122 2 102 123 5 3 2 109 4 104 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 // SPDX-License-Identifier: GPL-2.0-only /* * VMware vSockets Driver * * Copyright (C) 2007-2012 VMware, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/socket.h> #include <linux/stddef.h> #include <net/sock.h> #include <net/vsock_addr.h> void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port) { memset(addr, 0, sizeof(*addr)); addr->svm_family = AF_VSOCK; addr->svm_cid = cid; addr->svm_port = port; } EXPORT_SYMBOL_GPL(vsock_addr_init); int vsock_addr_validate(const struct sockaddr_vm *addr) { __u8 svm_valid_flags = VMADDR_FLAG_TO_HOST; if (!addr) return -EFAULT; if (addr->svm_family != AF_VSOCK) return -EAFNOSUPPORT; if (addr->svm_flags & ~svm_valid_flags) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(vsock_addr_validate); bool vsock_addr_bound(const struct sockaddr_vm *addr) { return addr->svm_port != VMADDR_PORT_ANY; } EXPORT_SYMBOL_GPL(vsock_addr_bound); void vsock_addr_unbind(struct sockaddr_vm *addr) { vsock_addr_init(addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); } EXPORT_SYMBOL_GPL(vsock_addr_unbind); bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, const struct sockaddr_vm *other) { return addr->svm_cid == other->svm_cid && addr->svm_port == other->svm_port; } EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); int vsock_addr_cast(const struct sockaddr *addr, size_t len, struct sockaddr_vm **out_addr) { if (len < sizeof(**out_addr)) return -EFAULT; *out_addr = (struct sockaddr_vm *)addr; return vsock_addr_validate(*out_addr); } EXPORT_SYMBOL_GPL(vsock_addr_cast);
10473 10475 8039 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 // SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/spinlock.h> #include <linux/atomic.h> /* * This is an implementation of the notion of "decrement a * reference count, and return locked if it decremented to zero". * * NOTE NOTE NOTE! This is _not_ equivalent to * * if (atomic_dec_and_test(&atomic)) { * spin_lock(&lock); * return 1; * } * return 0; * * because the spin-lock and the decrement must be * "atomic". */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; spin_unlock(lock); return 0; } EXPORT_SYMBOL(_atomic_dec_and_lock); int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, unsigned long *flags) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ spin_lock_irqsave(lock, *flags); if (atomic_dec_and_test(atomic)) return 1; spin_unlock_irqrestore(lock, *flags); return 0; } EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ raw_spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; raw_spin_unlock(lock); return 0; } EXPORT_SYMBOL(_atomic_dec_and_raw_lock); int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, unsigned long *flags) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ raw_spin_lock_irqsave(lock, *flags); if (atomic_dec_and_test(atomic)) return 1; raw_spin_unlock_irqrestore(lock, *flags); return 0; } EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
10 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ #ifndef _UVERBS_IOCTL_ #define _UVERBS_IOCTL_ #include <rdma/uverbs_types.h> #include <linux/uaccess.h> #include <rdma/rdma_user_ioctl.h> #include <rdma/ib_user_ioctl_verbs.h> #include <rdma/ib_user_ioctl_cmds.h> /* * ======================================= * Verbs action specifications * ======================================= */ enum uverbs_attr_type { UVERBS_ATTR_TYPE_NA, UVERBS_ATTR_TYPE_PTR_IN, UVERBS_ATTR_TYPE_PTR_OUT, UVERBS_ATTR_TYPE_IDR, UVERBS_ATTR_TYPE_FD, UVERBS_ATTR_TYPE_RAW_FD, UVERBS_ATTR_TYPE_ENUM_IN, UVERBS_ATTR_TYPE_IDRS_ARRAY, }; enum uverbs_obj_access { UVERBS_ACCESS_READ, UVERBS_ACCESS_WRITE, UVERBS_ACCESS_NEW, UVERBS_ACCESS_DESTROY }; /* Specification of a single attribute inside the ioctl message */ /* good size 16 */ struct uverbs_attr_spec { u8 type; /* * Support extending attributes by length. Allow the user to provide * more bytes than ptr.len, but check that everything after is zero'd * by the user. */ u8 zero_trailing:1; /* * Valid only for PTR_IN. Allocate and copy the data inside * the parser */ u8 alloc_and_copy:1; u8 mandatory:1; /* True if this is from UVERBS_ATTR_UHW */ u8 is_udata:1; union { struct { /* Current known size to kernel */ u16 len; /* User isn't allowed to provide something < min_len */ u16 min_len; } ptr; struct { /* * higher bits mean the namespace and lower bits mean * the type id within the namespace. */ u16 obj_type; u8 access; } obj; struct { u8 num_elems; } enum_def; } u; /* This weird split lets us remove some padding */ union { struct { /* * The enum attribute can select one of the attributes * contained in the ids array. Currently only PTR_IN * attributes are supported in the ids array. */ const struct uverbs_attr_spec *ids; } enum_def; struct { /* * higher bits mean the namespace and lower bits mean * the type id within the namespace. */ u16 obj_type; u16 min_len; u16 max_len; u8 access; } objs_arr; } u2; }; /* * Information about the API is loaded into a radix tree. For IOCTL we start * with a tuple of: * object_id, attr_id, method_id * * Which is a 48 bit value, with most of the bits guaranteed to be zero. Based * on the current kernel support this is compressed into 16 bit key for the * radix tree. Since this compression is entirely internal to the kernel the * below limits can be revised if the kernel gains additional data. * * With 64 leafs per node this is a 3 level radix tree. * * The tree encodes multiple types, and uses a scheme where OBJ_ID,0,0 returns * the object slot, and OBJ_ID,METH_ID,0 and returns the method slot. * * This also encodes the tables for the write() and write() extended commands * using the coding * OBJ_ID,UVERBS_API_METHOD_IS_WRITE,command # * OBJ_ID,UVERBS_API_METHOD_IS_WRITE_EX,command_ex # * ie the WRITE path is treated as a special method type in the ioctl * framework. */ enum uapi_radix_data { UVERBS_API_NS_FLAG = 1U << UVERBS_ID_NS_SHIFT, UVERBS_API_ATTR_KEY_BITS = 6, UVERBS_API_ATTR_KEY_MASK = GENMASK(UVERBS_API_ATTR_KEY_BITS - 1, 0), UVERBS_API_ATTR_BKEY_LEN = (1 << UVERBS_API_ATTR_KEY_BITS) - 1, UVERBS_API_WRITE_KEY_NUM = 1 << UVERBS_API_ATTR_KEY_BITS, UVERBS_API_METHOD_KEY_BITS = 5, UVERBS_API_METHOD_KEY_SHIFT = UVERBS_API_ATTR_KEY_BITS, UVERBS_API_METHOD_KEY_NUM_CORE = 22, UVERBS_API_METHOD_IS_WRITE = 30 << UVERBS_API_METHOD_KEY_SHIFT, UVERBS_API_METHOD_IS_WRITE_EX = 31 << UVERBS_API_METHOD_KEY_SHIFT, UVERBS_API_METHOD_KEY_NUM_DRIVER = (UVERBS_API_METHOD_IS_WRITE >> UVERBS_API_METHOD_KEY_SHIFT) - UVERBS_API_METHOD_KEY_NUM_CORE, UVERBS_API_METHOD_KEY_MASK = GENMASK( UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT - 1, UVERBS_API_METHOD_KEY_SHIFT), UVERBS_API_OBJ_KEY_BITS = 5, UVERBS_API_OBJ_KEY_SHIFT = UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT, UVERBS_API_OBJ_KEY_NUM_CORE = 20, UVERBS_API_OBJ_KEY_NUM_DRIVER = (1 << UVERBS_API_OBJ_KEY_BITS) - UVERBS_API_OBJ_KEY_NUM_CORE, UVERBS_API_OBJ_KEY_MASK = GENMASK(31, UVERBS_API_OBJ_KEY_SHIFT), /* This id guaranteed to not exist in the radix tree */ UVERBS_API_KEY_ERR = 0xFFFFFFFF, }; static inline __attribute_const__ u32 uapi_key_obj(u32 id) { if (id & UVERBS_API_NS_FLAG) { id &= ~UVERBS_API_NS_FLAG; if (id >= UVERBS_API_OBJ_KEY_NUM_DRIVER) return UVERBS_API_KEY_ERR; id = id + UVERBS_API_OBJ_KEY_NUM_CORE; } else { if (id >= UVERBS_API_OBJ_KEY_NUM_CORE) return UVERBS_API_KEY_ERR; } return id << UVERBS_API_OBJ_KEY_SHIFT; } static inline __attribute_const__ bool uapi_key_is_object(u32 key) { return (key & ~UVERBS_API_OBJ_KEY_MASK) == 0; } static inline __attribute_const__ u32 uapi_key_ioctl_method(u32 id) { if (id & UVERBS_API_NS_FLAG) { id &= ~UVERBS_API_NS_FLAG; if (id >= UVERBS_API_METHOD_KEY_NUM_DRIVER) return UVERBS_API_KEY_ERR; id = id + UVERBS_API_METHOD_KEY_NUM_CORE; } else { id++; if (id >= UVERBS_API_METHOD_KEY_NUM_CORE) return UVERBS_API_KEY_ERR; } return id << UVERBS_API_METHOD_KEY_SHIFT; } static inline __attribute_const__ u32 uapi_key_write_method(u32 id) { if (id >= UVERBS_API_WRITE_KEY_NUM) return UVERBS_API_KEY_ERR; return UVERBS_API_METHOD_IS_WRITE | id; } static inline __attribute_const__ u32 uapi_key_write_ex_method(u32 id) { if (id >= UVERBS_API_WRITE_KEY_NUM) return UVERBS_API_KEY_ERR; return UVERBS_API_METHOD_IS_WRITE_EX | id; } static inline __attribute_const__ u32 uapi_key_attr_to_ioctl_method(u32 attr_key) { return attr_key & (UVERBS_API_OBJ_KEY_MASK | UVERBS_API_METHOD_KEY_MASK); } static inline __attribute_const__ bool uapi_key_is_ioctl_method(u32 key) { unsigned int method = key & UVERBS_API_METHOD_KEY_MASK; return method != 0 && method < UVERBS_API_METHOD_IS_WRITE && (key & UVERBS_API_ATTR_KEY_MASK) == 0; } static inline __attribute_const__ bool uapi_key_is_write_method(u32 key) { return (key & UVERBS_API_METHOD_KEY_MASK) == UVERBS_API_METHOD_IS_WRITE; } static inline __attribute_const__ bool uapi_key_is_write_ex_method(u32 key) { return (key & UVERBS_API_METHOD_KEY_MASK) == UVERBS_API_METHOD_IS_WRITE_EX; } static inline __attribute_const__ u32 uapi_key_attrs_start(u32 ioctl_method_key) { /* 0 is the method slot itself */ return ioctl_method_key + 1; } static inline __attribute_const__ u32 uapi_key_attr(u32 id) { /* * The attr is designed to fit in the typical single radix tree node * of 64 entries. Since allmost all methods have driver attributes we * organize things so that the driver and core attributes interleave to * reduce the length of the attributes array in typical cases. */ if (id & UVERBS_API_NS_FLAG) { id &= ~UVERBS_API_NS_FLAG; id++; if (id >= 1 << (UVERBS_API_ATTR_KEY_BITS - 1)) return UVERBS_API_KEY_ERR; id = (id << 1) | 0; } else { if (id >= 1 << (UVERBS_API_ATTR_KEY_BITS - 1)) return UVERBS_API_KEY_ERR; id = (id << 1) | 1; } return id; } /* Only true for ioctl methods */ static inline __attribute_const__ bool uapi_key_is_attr(u32 key) { unsigned int method = key & UVERBS_API_METHOD_KEY_MASK; return method != 0 && method < UVERBS_API_METHOD_IS_WRITE && (key & UVERBS_API_ATTR_KEY_MASK) != 0; } /* * This returns a value in the range [0 to UVERBS_API_ATTR_BKEY_LEN), * basically it undoes the reservation of 0 in the ID numbering. attr_key * must already be masked with UVERBS_API_ATTR_KEY_MASK, or be the output of * uapi_key_attr(). */ static inline __attribute_const__ u32 uapi_bkey_attr(u32 attr_key) { return attr_key - 1; } static inline __attribute_const__ u32 uapi_bkey_to_key_attr(u32 attr_bkey) { return attr_bkey + 1; } /* * ======================================= * Verbs definitions * ======================================= */ struct uverbs_attr_def { u16 id; struct uverbs_attr_spec attr; }; struct uverbs_method_def { u16 id; /* Combination of bits from enum UVERBS_ACTION_FLAG_XXXX */ u32 flags; size_t num_attrs; const struct uverbs_attr_def * const (*attrs)[]; int (*handler)(struct uverbs_attr_bundle *attrs); }; struct uverbs_object_def { u16 id; const struct uverbs_obj_type *type_attrs; size_t num_methods; const struct uverbs_method_def * const (*methods)[]; }; enum uapi_definition_kind { UAPI_DEF_END = 0, UAPI_DEF_OBJECT_START, UAPI_DEF_WRITE, UAPI_DEF_CHAIN_OBJ_TREE, UAPI_DEF_CHAIN, UAPI_DEF_IS_SUPPORTED_FUNC, UAPI_DEF_IS_SUPPORTED_DEV_FN, }; enum uapi_definition_scope { UAPI_SCOPE_OBJECT = 1, UAPI_SCOPE_METHOD = 2, }; struct uapi_definition { u8 kind; u8 scope; union { struct { u16 object_id; } object_start; struct { u16 command_num; u8 is_ex:1; u8 has_udata:1; u8 has_resp:1; u8 req_size; u8 resp_size; } write; }; union { bool (*func_is_supported)(struct ib_device *device); int (*func_write)(struct uverbs_attr_bundle *attrs); const struct uapi_definition *chain; const struct uverbs_object_def *chain_obj_tree; size_t needs_fn_offset; }; }; /* Define things connected to object_id */ #define DECLARE_UVERBS_OBJECT(_object_id, ...) \ { \ .kind = UAPI_DEF_OBJECT_START, \ .object_start = { .object_id = _object_id }, \ }, \ ##__VA_ARGS__ /* Use in a var_args of DECLARE_UVERBS_OBJECT */ #define DECLARE_UVERBS_WRITE(_command_num, _func, _cmd_desc, ...) \ { \ .kind = UAPI_DEF_WRITE, \ .scope = UAPI_SCOPE_OBJECT, \ .write = { .is_ex = 0, .command_num = _command_num }, \ .func_write = _func, \ _cmd_desc, \ }, \ ##__VA_ARGS__ /* Use in a var_args of DECLARE_UVERBS_OBJECT */ #define DECLARE_UVERBS_WRITE_EX(_command_num, _func, _cmd_desc, ...) \ { \ .kind = UAPI_DEF_WRITE, \ .scope = UAPI_SCOPE_OBJECT, \ .write = { .is_ex = 1, .command_num = _command_num }, \ .func_write = _func, \ _cmd_desc, \ }, \ ##__VA_ARGS__ /* * Object is only supported if the function pointer named ibdev_fn in struct * ib_device is not NULL. */ #define UAPI_DEF_OBJ_NEEDS_FN(ibdev_fn) \ { \ .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \ .scope = UAPI_SCOPE_OBJECT, \ .needs_fn_offset = \ offsetof(struct ib_device_ops, ibdev_fn) + \ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \ ibdev_fn) != \ sizeof(void *)), \ } /* * Method is only supported if the function pointer named ibdev_fn in struct * ib_device is not NULL. */ #define UAPI_DEF_METHOD_NEEDS_FN(ibdev_fn) \ { \ .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \ .scope = UAPI_SCOPE_METHOD, \ .needs_fn_offset = \ offsetof(struct ib_device_ops, ibdev_fn) + \ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \ ibdev_fn) != \ sizeof(void *)), \ } /* Call a function to determine if the entire object is supported or not */ #define UAPI_DEF_IS_OBJ_SUPPORTED(_func) \ { \ .kind = UAPI_DEF_IS_SUPPORTED_FUNC, \ .scope = UAPI_SCOPE_OBJECT, .func_is_supported = _func, \ } /* Include another struct uapi_definition in this one */ #define UAPI_DEF_CHAIN(_def_var) \ { \ .kind = UAPI_DEF_CHAIN, .chain = _def_var, \ } /* Temporary until the tree base description is replaced */ #define UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, _object_ptr, ...) \ { \ .kind = UAPI_DEF_CHAIN_OBJ_TREE, \ .object_start = { .object_id = _object_enum }, \ .chain_obj_tree = _object_ptr, \ }, \ ##__VA_ARGS__ #define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \ UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, \ PTR_IF(IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS), \ &UVERBS_OBJECT(_object_enum)), \ ##__VA_ARGS__) /* * ======================================= * Attribute Specifications * ======================================= */ #define UVERBS_ATTR_SIZE(_min_len, _len) \ .u.ptr.min_len = _min_len, .u.ptr.len = _len #define UVERBS_ATTR_NO_DATA() UVERBS_ATTR_SIZE(0, 0) /* * Specifies a uapi structure that cannot be extended. The user must always * supply the whole structure and nothing more. The structure must be declared * in a header under include/uapi/rdma. */ #define UVERBS_ATTR_TYPE(_type) \ .u.ptr.min_len = sizeof(_type), .u.ptr.len = sizeof(_type) /* * Specifies a uapi structure where the user must provide at least up to * member 'last'. Anything after last and up until the end of the structure * can be non-zero, anything longer than the end of the structure must be * zero. The structure must be declared in a header under include/uapi/rdma. */ #define UVERBS_ATTR_STRUCT(_type, _last) \ .zero_trailing = 1, \ UVERBS_ATTR_SIZE(offsetofend(_type, _last), sizeof(_type)) /* * Specifies at least min_len bytes must be passed in, but the amount can be * larger, up to the protocol maximum size. No check for zeroing is done. */ #define UVERBS_ATTR_MIN_SIZE(_min_len) UVERBS_ATTR_SIZE(_min_len, USHRT_MAX) /* Must be used in the '...' of any UVERBS_ATTR */ #define UA_ALLOC_AND_COPY .alloc_and_copy = 1 #define UA_MANDATORY .mandatory = 1 #define UA_OPTIONAL .mandatory = 0 /* * min_len must be bigger than 0 and _max_len must be smaller than 4095. Only * READ\WRITE accesses are supported. */ #define UVERBS_ATTR_IDRS_ARR(_attr_id, _idr_type, _access, _min_len, _max_len, \ ...) \ (&(const struct uverbs_attr_def){ \ .id = (_attr_id) + \ BUILD_BUG_ON_ZERO((_min_len) == 0 || \ (_max_len) > \ PAGE_SIZE / sizeof(void *) || \ (_min_len) > (_max_len) || \ (_access) == UVERBS_ACCESS_NEW || \ (_access) == UVERBS_ACCESS_DESTROY), \ .attr = { .type = UVERBS_ATTR_TYPE_IDRS_ARRAY, \ .u2.objs_arr.obj_type = _idr_type, \ .u2.objs_arr.access = _access, \ .u2.objs_arr.min_len = _min_len, \ .u2.objs_arr.max_len = _max_len, \ __VA_ARGS__ } }) /* * Only for use with UVERBS_ATTR_IDR, allows any uobject type to be accepted, * the user must validate the type of the uobject instead. */ #define UVERBS_IDR_ANY_OBJECT 0xFFFF #define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \ (&(const struct uverbs_attr_def){ \ .id = _attr_id, \ .attr = { .type = UVERBS_ATTR_TYPE_IDR, \ .u.obj.obj_type = _idr_type, \ .u.obj.access = _access, \ __VA_ARGS__ } }) #define UVERBS_ATTR_FD(_attr_id, _fd_type, _access, ...) \ (&(const struct uverbs_attr_def){ \ .id = (_attr_id) + \ BUILD_BUG_ON_ZERO((_access) != UVERBS_ACCESS_NEW && \ (_access) != UVERBS_ACCESS_READ), \ .attr = { .type = UVERBS_ATTR_TYPE_FD, \ .u.obj.obj_type = _fd_type, \ .u.obj.access = _access, \ __VA_ARGS__ } }) #define UVERBS_ATTR_RAW_FD(_attr_id, ...) \ (&(const struct uverbs_attr_def){ \ .id = (_attr_id), \ .attr = { .type = UVERBS_ATTR_TYPE_RAW_FD, __VA_ARGS__ } }) #define UVERBS_ATTR_PTR_IN(_attr_id, _type, ...) \ (&(const struct uverbs_attr_def){ \ .id = _attr_id, \ .attr = { .type = UVERBS_ATTR_TYPE_PTR_IN, \ _type, \ __VA_ARGS__ } }) #define UVERBS_ATTR_PTR_OUT(_attr_id, _type, ...) \ (&(const struct uverbs_attr_def){ \ .id = _attr_id, \ .attr = { .type = UVERBS_ATTR_TYPE_PTR_OUT, \ _type, \ __VA_ARGS__ } }) /* _enum_arry should be a 'static const union uverbs_attr_spec[]' */ #define UVERBS_ATTR_ENUM_IN(_attr_id, _enum_arr, ...) \ (&(const struct uverbs_attr_def){ \ .id = _attr_id, \ .attr = { .type = UVERBS_ATTR_TYPE_ENUM_IN, \ .u2.enum_def.ids = _enum_arr, \ .u.enum_def.num_elems = ARRAY_SIZE(_enum_arr), \ __VA_ARGS__ }, \ }) /* An input value that is a member in the enum _enum_type. */ #define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \ UVERBS_ATTR_PTR_IN( \ _attr_id, \ UVERBS_ATTR_SIZE( \ sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \ sizeof(u64)), \ __VA_ARGS__) /* * An input value that is a bitwise combination of values of _enum_type. * This permits the flag value to be passed as either a u32 or u64, it must * be retrieved via uverbs_get_flag(). */ #define UVERBS_ATTR_FLAGS_IN(_attr_id, _enum_type, ...) \ UVERBS_ATTR_PTR_IN( \ _attr_id, \ UVERBS_ATTR_SIZE(sizeof(u32) + BUILD_BUG_ON_ZERO( \ !sizeof(_enum_type *)), \ sizeof(u64)), \ __VA_ARGS__) /* * This spec is used in order to pass information to the hardware driver in a * legacy way. Every verb that could get driver specific data should get this * spec. */ #define UVERBS_ATTR_UHW() \ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_UHW_IN, \ UVERBS_ATTR_MIN_SIZE(0), \ UA_OPTIONAL, \ .is_udata = 1), \ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_UHW_OUT, \ UVERBS_ATTR_MIN_SIZE(0), \ UA_OPTIONAL, \ .is_udata = 1) /* ================================================= * Parsing infrastructure * ================================================= */ struct uverbs_ptr_attr { /* * If UVERBS_ATTR_SPEC_F_ALLOC_AND_COPY is set then the 'ptr' is * used. */ union { void *ptr; u64 data; }; u16 len; u16 uattr_idx; u8 enum_id; }; struct uverbs_obj_attr { struct ib_uobject *uobject; const struct uverbs_api_attr *attr_elm; }; struct uverbs_objs_arr_attr { struct ib_uobject **uobjects; u16 len; }; struct uverbs_attr { union { struct uverbs_ptr_attr ptr_attr; struct uverbs_obj_attr obj_attr; struct uverbs_objs_arr_attr objs_arr_attr; }; }; struct uverbs_attr_bundle { struct_group_tagged(uverbs_attr_bundle_hdr, hdr, struct ib_udata driver_udata; struct ib_udata ucore; struct ib_uverbs_file *ufile; struct ib_ucontext *context; struct ib_uobject *uobject; DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN); ); struct uverbs_attr attrs[]; }; static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_bundle, unsigned int idx) { return test_bit(uapi_bkey_attr(uapi_key_attr(idx)), attrs_bundle->attr_present); } /** * rdma_udata_to_drv_context - Helper macro to get the driver's context out of * ib_udata which is embedded in uverbs_attr_bundle. * * If udata is not NULL this cannot fail. Otherwise a NULL udata will result * in a NULL ucontext pointer, as a safety precaution. Callers should be using * 'udata' to determine if the driver call is in user or kernel mode, not * 'ucontext'. * */ static inline struct uverbs_attr_bundle * rdma_udata_to_uverbs_attr_bundle(struct ib_udata *udata) { return container_of(udata, struct uverbs_attr_bundle, driver_udata); } #define rdma_udata_to_drv_context(udata, drv_dev_struct, member) \ (udata ? container_of(rdma_udata_to_uverbs_attr_bundle(udata)->context, \ drv_dev_struct, member) : (drv_dev_struct *)NULL) #define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT) static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { if (!uverbs_attr_is_valid(attrs_bundle, idx)) return ERR_PTR(-ENOENT); return &attrs_bundle->attrs[uapi_bkey_attr(uapi_key_attr(idx))]; } static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) return PTR_ERR(attr); return attr->ptr_attr.enum_id; } static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { const struct uverbs_attr *attr; attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) return ERR_CAST(attr); return attr->obj_attr.uobject->object; } static inline struct ib_uobject *uverbs_attr_get_uobject(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) return ERR_CAST(attr); return attr->obj_attr.uobject; } static inline int uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) return PTR_ERR(attr); return attr->ptr_attr.len; } void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *attrs_bundle, u16 idx); /* * uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr * attribute. * @attrs: The attribute bundle * @idx: The ID of the attribute * @elem_size: The size of the element in the array */ static inline int uverbs_attr_ptr_get_array_size(struct uverbs_attr_bundle *attrs, u16 idx, size_t elem_size) { int size = uverbs_attr_get_len(attrs, idx); if (size < 0) return size; if (size % elem_size) return -EINVAL; return size / elem_size; } /** * uverbs_attr_get_uobjs_arr() - Provides array's properties for attribute for * UVERBS_ATTR_TYPE_IDRS_ARRAY. * @arr: Returned pointer to array of pointers for uobjects or NULL if * the attribute isn't provided. * * Return: The array length or 0 if no attribute was provided. */ static inline int uverbs_attr_get_uobjs_arr( const struct uverbs_attr_bundle *attrs_bundle, u16 attr_idx, struct ib_uobject ***arr) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, attr_idx); if (IS_ERR(attr)) { *arr = NULL; return 0; } *arr = attr->objs_arr_attr.uobjects; return attr->objs_arr_attr.len; } static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr) { return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data); } static inline void *uverbs_attr_get_alloced_ptr( const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) return (void *)attr; return uverbs_attr_ptr_is_inline(attr) ? (void *)&attr->ptr_attr.data : attr->ptr_attr.ptr; } static inline int _uverbs_copy_from(void *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, size_t size) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); if (IS_ERR(attr)) return PTR_ERR(attr); /* * Validation ensures attr->ptr_attr.len >= size. If the caller is * using UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO then it must call * uverbs_copy_from_or_zero. */ if (unlikely(size < attr->ptr_attr.len)) return -EINVAL; if (uverbs_attr_ptr_is_inline(attr)) memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len); else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data), attr->ptr_attr.len)) return -EFAULT; return 0; } static inline int _uverbs_copy_from_or_zero(void *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, size_t size) { const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); size_t min_size; if (IS_ERR(attr)) return PTR_ERR(attr); min_size = min_t(size_t, size, attr->ptr_attr.len); if (uverbs_attr_ptr_is_inline(attr)) memcpy(to, &attr->ptr_attr.data, min_size); else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data), min_size)) return -EFAULT; if (size > min_size) memset(to + min_size, 0, size - min_size); return 0; } #define uverbs_copy_from(to, attrs_bundle, idx) \ _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to)) #define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \ _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to)) static inline struct ib_ucontext * ib_uverbs_get_ucontext(const struct uverbs_attr_bundle *attrs) { return ib_uverbs_get_ucontext_file(attrs->ufile); } #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits); int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits); int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, size_t idx, const void *from, size_t size); __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size, gfp_t flags); static inline __malloc void *uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size) { return _uverbs_alloc(bundle, size, GFP_KERNEL); } static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, size_t size) { return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO); } static inline __malloc void *uverbs_kcalloc(struct uverbs_attr_bundle *bundle, size_t n, size_t size) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return ERR_PTR(-EOVERFLOW); return uverbs_zalloc(bundle, bytes); } int _uverbs_get_const_signed(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val); int _uverbs_get_const_unsigned(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 upper_bound, u64 *def_val); int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size); #else static inline int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits) { return -EINVAL; } static inline int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits) { return -EINVAL; } static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, size_t idx, const void *from, size_t size) { return -EINVAL; } static inline __malloc void *uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size) { return ERR_PTR(-EINVAL); } static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, size_t size) { return ERR_PTR(-EINVAL); } static inline int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val) { return -EINVAL; } static inline int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size) { return -EINVAL; } static inline int _uverbs_get_const_signed(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val) { return -EINVAL; } static inline int _uverbs_get_const_unsigned(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 upper_bound, u64 *def_val) { return -EINVAL; } #endif #define uverbs_get_const_signed(_to, _attrs_bundle, _idx) \ ({ \ s64 _val; \ int _ret = \ _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \ type_min(typeof(*(_to))), \ type_max(typeof(*(_to))), NULL); \ (*(_to)) = _val; \ _ret; \ }) #define uverbs_get_const_unsigned(_to, _attrs_bundle, _idx) \ ({ \ u64 _val; \ int _ret = \ _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \ type_max(typeof(*(_to))), NULL); \ (*(_to)) = _val; \ _ret; \ }) #define uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, _default) \ ({ \ s64 _val; \ s64 _def_val = _default; \ int _ret = \ _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \ type_min(typeof(*(_to))), \ type_max(typeof(*(_to))), &_def_val); \ (*(_to)) = _val; \ _ret; \ }) #define uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, _default) \ ({ \ u64 _val; \ u64 _def_val = _default; \ int _ret = \ _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \ type_max(typeof(*(_to))), &_def_val); \ (*(_to)) = _val; \ _ret; \ }) #define uverbs_get_const(_to, _attrs_bundle, _idx) \ (is_signed_type(typeof(*(_to))) ? \ uverbs_get_const_signed(_to, _attrs_bundle, _idx) : \ uverbs_get_const_unsigned(_to, _attrs_bundle, _idx)) \ #define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \ (is_signed_type(typeof(*(_to))) ? \ uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, \ _default) : \ uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, \ _default)) static inline int uverbs_get_raw_fd(int *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx) { return uverbs_get_const_signed(to, attrs_bundle, idx); } #endif
25 25 13640 13631 7 7 7 7 7 7 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 // SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * NOHZ implementation for low and high resolution timers * * Started by: Thomas Gleixner and Ingo Molnar */ #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> #include <linux/nmi.h> #include <linux/profile.h> #include <linux/sched/signal.h> #include <linux/sched/clock.h> #include <linux/sched/stat.h> #include <linux/sched/nohz.h> #include <linux/sched/loadavg.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> #include <linux/context_tracking.h> #include <linux/mm.h> #include <asm/irq_regs.h> #include "tick-internal.h" #include <trace/events/timer.h> /* * Per-CPU nohz control structure */ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); struct tick_sched *tick_get_tick_sched(int cpu) { return &per_cpu(tick_cpu_sched, cpu); } /* * The time when the last jiffy update happened. Write access must hold * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a * consistent view of jiffies and last_jiffies_update. */ static ktime_t last_jiffies_update; /* * Must be called with interrupts disabled ! */ static void tick_do_update_jiffies64(ktime_t now) { unsigned long ticks = 1; ktime_t delta, nextp; /* * 64-bit can do a quick check without holding the jiffies lock and * without looking at the sequence count. The smp_load_acquire() * pairs with the update done later in this function. * * 32-bit cannot do that because the store of 'tick_next_period' * consists of two 32-bit stores, and the first store could be * moved by the CPU to a random point in the future. */ if (IS_ENABLED(CONFIG_64BIT)) { if (ktime_before(now, smp_load_acquire(&tick_next_period))) return; } else { unsigned int seq; /* * Avoid contention on 'jiffies_lock' and protect the quick * check with the sequence count. */ do { seq = read_seqcount_begin(&jiffies_seq); nextp = tick_next_period; } while (read_seqcount_retry(&jiffies_seq, seq)); if (ktime_before(now, nextp)) return; } /* Quick check failed, i.e. update is required. */ raw_spin_lock(&jiffies_lock); /* * Re-evaluate with the lock held. Another CPU might have done the * update already. */ if (ktime_before(now, tick_next_period)) { raw_spin_unlock(&jiffies_lock); return; } write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, tick_next_period); if (unlikely(delta >= TICK_NSEC)) { /* Slow path for long idle sleep times */ s64 incr = TICK_NSEC; ticks += ktime_divns(delta, incr); last_jiffies_update = ktime_add_ns(last_jiffies_update, incr * ticks); } else { last_jiffies_update = ktime_add_ns(last_jiffies_update, TICK_NSEC); } /* Advance jiffies to complete the 'jiffies_seq' protected job */ jiffies_64 += ticks; /* Keep the tick_next_period variable up to date */ nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); if (IS_ENABLED(CONFIG_64BIT)) { /* * Pairs with smp_load_acquire() in the lockless quick * check above, and ensures that the update to 'jiffies_64' is * not reordered vs. the store to 'tick_next_period', neither * by the compiler nor by the CPU. */ smp_store_release(&tick_next_period, nextp); } else { /* * A plain store is good enough on 32-bit, as the quick check * above is protected by the sequence count. */ tick_next_period = nextp; } /* * Release the sequence count. calc_global_load() below is not * protected by it, but 'jiffies_lock' needs to be held to prevent * concurrent invocations. */ write_seqcount_end(&jiffies_seq); calc_global_load(); raw_spin_unlock(&jiffies_lock); update_wall_time(); } /* * Initialize and return retrieve the jiffies update. */ static ktime_t tick_init_jiffy_update(void) { ktime_t period; raw_spin_lock(&jiffies_lock); write_seqcount_begin(&jiffies_seq); /* Have we started the jiffies update yet ? */ if (last_jiffies_update == 0) { u32 rem; /* * Ensure that the tick is aligned to a multiple of * TICK_NSEC. */ div_u64_rem(tick_next_period, TICK_NSEC, &rem); if (rem) tick_next_period += TICK_NSEC - rem; last_jiffies_update = tick_next_period; } period = last_jiffies_update; write_seqcount_end(&jiffies_seq); raw_spin_unlock(&jiffies_lock); return period; } static inline int tick_sched_flag_test(struct tick_sched *ts, unsigned long flag) { return !!(ts->flags & flag); } static inline void tick_sched_flag_set(struct tick_sched *ts, unsigned long flag) { lockdep_assert_irqs_disabled(); ts->flags |= flag; } static inline void tick_sched_flag_clear(struct tick_sched *ts, unsigned long flag) { lockdep_assert_irqs_disabled(); ts->flags &= ~flag; } #define MAX_STALLED_JIFFIES 5 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) { int tick_cpu, cpu = smp_processor_id(); /* * Check if the do_timer duty was dropped. We don't care about * concurrency: This happens only when the CPU in charge went * into a long sleep. If two CPUs happen to assign themselves to * this duty, then the jiffies update is still serialized by * 'jiffies_lock'. * * If nohz_full is enabled, this should not happen because the * 'tick_do_timer_cpu' CPU never relinquishes. */ tick_cpu = READ_ONCE(tick_do_timer_cpu); if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) { #ifdef CONFIG_NO_HZ_FULL WARN_ON_ONCE(tick_nohz_full_running); #endif WRITE_ONCE(tick_do_timer_cpu, cpu); tick_cpu = cpu; } /* Check if jiffies need an update */ if (tick_cpu == cpu) tick_do_update_jiffies64(now); /* * If the jiffies update stalled for too long (timekeeper in stop_machine() * or VMEXIT'ed for several msecs), force an update. */ if (ts->last_tick_jiffies != jiffies) { ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } else { if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { tick_do_update_jiffies64(now); ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } } if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) ts->got_idle_tick = 1; } static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) { /* * When we are idle and the tick is stopped, we have to touch * the watchdog as we might not schedule for a really long * time. This happens on completely idle SMP systems while * waiting on the login prompt. We also increment the "start of * idle" jiffy stamp so the idle accounting adjustment we do * when we go busy again does not account too many ticks. */ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; /* * In case the current tick fired too early past its expected * expiration, make sure we don't bypass the next clock reprogramming * to the same deadline. */ ts->next_tick = 0; } update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); } /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled. */ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); tick_sched_do_timer(ts, now); /* * Do not call when we are not in IRQ context and have * no valid 'regs' pointer */ if (regs) tick_sched_handle(ts, regs); else ts->next_tick = 0; /* * In dynticks mode, tick reprogram is deferred: * - to the idle task if in dynticks-idle * - to IRQ exit if in full-dynticks. */ if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED))) return HRTIMER_NORESTART; hrtimer_forward(timer, now, TICK_NSEC); return HRTIMER_RESTART; } #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; EXPORT_SYMBOL_GPL(tick_nohz_full_mask); bool tick_nohz_full_running; EXPORT_SYMBOL_GPL(tick_nohz_full_running); static atomic_t tick_dep_mask; static bool check_tick_dependency(atomic_t *dep) { int val = atomic_read(dep); if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); return true; } if (val & TICK_DEP_MASK_PERF_EVENTS) { trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); return true; } if (val & TICK_DEP_MASK_SCHED) { trace_tick_stop(0, TICK_DEP_MASK_SCHED); return true; } if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); return true; } if (val & TICK_DEP_MASK_RCU) { trace_tick_stop(0, TICK_DEP_MASK_RCU); return true; } if (val & TICK_DEP_MASK_RCU_EXP) { trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP); return true; } return false; } static bool can_stop_full_tick(int cpu, struct tick_sched *ts) { lockdep_assert_irqs_disabled(); if (unlikely(!cpu_online(cpu))) return false; if (check_tick_dependency(&tick_dep_mask)) return false; if (check_tick_dependency(&ts->tick_dep_mask)) return false; if (check_tick_dependency(&current->tick_dep_mask)) return false; if (check_tick_dependency(&current->signal->tick_dep_mask)) return false; return true; } static void nohz_full_kick_func(struct irq_work *work) { /* Empty, the tick restart happens on tick_nohz_irq_exit() */ } static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = IRQ_WORK_INIT_HARD(nohz_full_kick_func); /* * Kick this CPU if it's full dynticks in order to force it to * re-evaluate its dependency on the tick and restart it if necessary. * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), * is NMI safe. */ static void tick_nohz_full_kick(void) { if (!tick_nohz_full_cpu(smp_processor_id())) return; irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); } /* * Kick the CPU if it's full dynticks in order to force it to * re-evaluate its dependency on the tick and restart it if necessary. */ void tick_nohz_full_kick_cpu(int cpu) { if (!tick_nohz_full_cpu(cpu)) return; irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); } static void tick_nohz_kick_task(struct task_struct *tsk) { int cpu; /* * If the task is not running, run_posix_cpu_timers() * has nothing to elapse, and an IPI can then be optimized out. * * activate_task() STORE p->tick_dep_mask * STORE p->on_rq * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) * LOCK rq->lock LOAD p->on_rq * smp_mb__after_spin_lock() * tick_nohz_task_switch() * LOAD p->tick_dep_mask * * XXX given a task picks up the dependency on schedule(), should we * only care about tasks that are currently on the CPU instead of all * that are on the runqueue? * * That is, does this want to be: task_on_cpu() / task_curr()? */ if (!sched_task_on_rq(tsk)) return; /* * If the task concurrently migrates to another CPU, * we guarantee it sees the new tick dependency upon * schedule. * * set_task_cpu(p, cpu); * STORE p->cpu = @cpu * __schedule() (switch to task 'p') * LOCK rq->lock * smp_mb__after_spin_lock() STORE p->tick_dep_mask * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) * LOAD p->tick_dep_mask LOAD p->cpu */ cpu = task_cpu(tsk); preempt_disable(); if (cpu_online(cpu)) tick_nohz_full_kick_cpu(cpu); preempt_enable(); } /* * Kick all full dynticks CPUs in order to force these to re-evaluate * their dependency on the tick and restart it if necessary. */ static void tick_nohz_full_kick_all(void) { int cpu; if (!tick_nohz_full_running) return; preempt_disable(); for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) tick_nohz_full_kick_cpu(cpu); preempt_enable(); } static void tick_nohz_dep_set_all(atomic_t *dep, enum tick_dep_bits bit) { int prev; prev = atomic_fetch_or(BIT(bit), dep); if (!prev) tick_nohz_full_kick_all(); } /* * Set a global tick dependency. Used by perf events that rely on freq and * unstable clocks. */ void tick_nohz_dep_set(enum tick_dep_bits bit) { tick_nohz_dep_set_all(&tick_dep_mask, bit); } void tick_nohz_dep_clear(enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &tick_dep_mask); } /* * Set per-CPU tick dependency. Used by scheduler and perf events in order to * manage event-throttling. */ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { int prev; struct tick_sched *ts; ts = per_cpu_ptr(&tick_cpu_sched, cpu); prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); if (!prev) { preempt_disable(); /* Perf needs local kick that is NMI safe */ if (cpu == smp_processor_id()) { tick_nohz_full_kick(); } else { /* Remote IRQ work not NMI-safe */ if (!WARN_ON_ONCE(in_nmi())) tick_nohz_full_kick_cpu(cpu); } preempt_enable(); } } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); atomic_andnot(BIT(bit), &ts->tick_dep_mask); } EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); /* * Set a per-task tick dependency. RCU needs this. Also posix CPU timers * in order to elapse per task timers. */ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) tick_nohz_kick_task(tsk); } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &tsk->tick_dep_mask); } EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); /* * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse * per process timers. */ void tick_nohz_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { int prev; struct signal_struct *sig = tsk->signal; prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); if (!prev) { struct task_struct *t; lockdep_assert_held(&tsk->sighand->siglock); __for_each_thread(sig, t) tick_nohz_kick_task(t); } } void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &sig->tick_dep_mask); } /* * Re-evaluate the need for the tick as we switch the current task. * It might need the tick due to per task/process properties: * perf events, posix CPU timers, ... */ void __tick_nohz_task_switch(void) { struct tick_sched *ts; if (!tick_nohz_full_cpu(smp_processor_id())) return; ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { if (atomic_read(&current->tick_dep_mask) || atomic_read(&current->signal->tick_dep_mask)) tick_nohz_full_kick(); } } /* Get the boot-time nohz CPU list from the kernel parameters. */ void __init tick_nohz_full_setup(cpumask_var_t cpumask) { alloc_bootmem_cpumask_var(&tick_nohz_full_mask); cpumask_copy(tick_nohz_full_mask, cpumask); tick_nohz_full_running = true; } bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { /* * The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound * timers, workqueues, timekeeping, ...) on behalf of full dynticks * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu) return false; return true; } static int tick_nohz_cpu_down(unsigned int cpu) { return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; } void __init tick_nohz_init(void) { int cpu, ret; if (!tick_nohz_full_running) return; /* * Full dynticks uses IRQ work to drive the tick rescheduling on safe * locking contexts. But then we need IRQ work to raise its own * interrupts to avoid circular dependency on the tick. */ if (!arch_irq_work_has_interrupt()) { pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n"); cpumask_clear(tick_nohz_full_mask); tick_nohz_full_running = false; return; } if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { pr_warn("NO_HZ: Clearing %d from nohz_full range " "for timekeeping\n", cpu); cpumask_clear_cpu(cpu, tick_nohz_full_mask); } } for_each_cpu(cpu, tick_nohz_full_mask) ct_cpu_track_user(cpu); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "kernel/nohz:predown", NULL, tick_nohz_cpu_down); WARN_ON(ret < 0); pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", cpumask_pr_args(tick_nohz_full_mask)); } #endif /* #ifdef CONFIG_NO_HZ_FULL */ /* * NOHZ - aka dynamic tick functionality */ #ifdef CONFIG_NO_HZ_COMMON /* * NO HZ enabled ? */ bool tick_nohz_enabled __read_mostly = true; unsigned long tick_nohz_active __read_mostly; /* * Enable / Disable tickless mode */ static int __init setup_tick_nohz(char *str) { return (kstrtobool(str, &tick_nohz_enabled) == 0); } __setup("nohz=", setup_tick_nohz); bool tick_nohz_tick_stopped(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } bool tick_nohz_tick_stopped_cpu(int cpu) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } /** * tick_nohz_update_jiffies - update jiffies when idle was interrupted * @now: current ktime_t * * Called from interrupt entry when the CPU was idle * * In case the sched_tick was stopped on this CPU, we have to check if jiffies * must be updated. Otherwise an interrupt handler could use a stale jiffy * value. We do this unconditionally on any CPU, as we don't know whether the * CPU, which has the update task assigned, is in a long sleep. */ static void tick_nohz_update_jiffies(ktime_t now) { unsigned long flags; __this_cpu_write(tick_cpu_sched.idle_waketime, now); local_irq_save(flags); tick_do_update_jiffies64(now); local_irq_restore(flags); touch_softlockup_watchdog_sched(); } static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) { ktime_t delta; if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))) return; delta = ktime_sub(now, ts->idle_entrytime); write_seqcount_begin(&ts->idle_sleeptime_seq); if (nr_iowait_cpu(smp_processor_id()) > 0) ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); else ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); ts->idle_entrytime = now; tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_wakeup_event(); } static void tick_nohz_start_idle(struct tick_sched *ts) { write_seqcount_begin(&ts->idle_sleeptime_seq); ts->idle_entrytime = ktime_get(); tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_sleep_event(); } static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, bool compute_delta, u64 *last_update_time) { ktime_t now, idle; unsigned int seq; if (!tick_nohz_active) return -1; now = ktime_get(); if (last_update_time) *last_update_time = ktime_to_us(now); do { seq = read_seqcount_begin(&ts->idle_sleeptime_seq); if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) { ktime_t delta = ktime_sub(now, ts->idle_entrytime); idle = ktime_add(*sleeptime, delta); } else { idle = *sleeptime; } } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); return ktime_to_us(idle); } /** * get_cpu_idle_time_us - get the total idle time of a CPU * @cpu: CPU number to query * @last_update_time: variable to store update time in. Do not update * counters if NULL. * * Return the cumulative idle time (since boot) for a given * CPU, in microseconds. Note that this is partially broken due to * the counter of iowait tasks that can be remotely updated without * any synchronization. Therefore it is possible to observe backward * values within two consecutive reads. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu */ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, !nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); /** * get_cpu_iowait_time_us - get the total iowait time of a CPU * @cpu: CPU number to query * @last_update_time: variable to store update time in. Do not update * counters if NULL. * * Return the cumulative iowait time (since boot) for a given * CPU, in microseconds. Note this is partially broken due to * the counter of iowait tasks that can be remotely updated without * any synchronization. Therefore it is possible to observe backward * values within two consecutive reads. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu */ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) { hrtimer_cancel(&ts->sched_timer); hrtimer_set_expires(&ts->sched_timer, ts->last_tick); /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); } else { tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } /* * Reset to make sure the next tick stop doesn't get fooled by past * cached clock deadline. */ ts->next_tick = 0; } static inline bool local_timer_softirq_pending(void) { return local_timers_pending() & BIT(TIMER_SOFTIRQ); } /* * Read jiffies and the time when jiffies were updated last */ u64 get_jiffies_update(unsigned long *basej) { unsigned long basejiff; unsigned int seq; u64 basemono; do { seq = read_seqcount_begin(&jiffies_seq); basemono = last_jiffies_update; basejiff = jiffies; } while (read_seqcount_retry(&jiffies_seq, seq)); *basej = basejiff; return basemono; } /** * tick_nohz_next_event() - return the clock monotonic based next event * @ts: pointer to tick_sched struct * @cpu: CPU number * * Return: * *%0 - When the next event is a maximum of TICK_NSEC in the future * and the tick is not stopped yet * *%next_event - Next event based on clock monotonic */ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { u64 basemono, next_tick, delta, expires; unsigned long basejiff; int tick_cpu; basemono = get_jiffies_update(&basejiff); ts->last_jiffies = basejiff; ts->timer_expires_base = basemono; /* * Keep the periodic tick, when RCU, architecture or irq_work * requests it. * Aside of that, check whether the local timer softirq is * pending. If so, its a bad idea to call get_next_timer_interrupt(), * because there is an already expired timer, so it will request * immediate expiry, which rearms the hardware timer with a * minimal delta, which brings us back to this place * immediately. Lather, rinse and repeat... */ if (rcu_needs_cpu() || arch_needs_cpu() || irq_work_needs_cpu() || local_timer_softirq_pending()) { next_tick = basemono + TICK_NSEC; } else { /* * Get the next pending timer. If high resolution * timers are enabled this only takes the timer wheel * timers into account. If high resolution timers are * disabled this also looks at the next expiring * hrtimer. */ next_tick = get_next_timer_interrupt(basejiff, basemono); ts->next_timer = next_tick; } /* Make sure next_tick is never before basemono! */ if (WARN_ON_ONCE(basemono > next_tick)) next_tick = basemono; /* * If the tick is due in the next period, keep it ticking or * force prod the timer. */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { /* * We've not stopped the tick yet, and there's a timer in the * next period, so no point in stopping it either, bail. */ if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->timer_expires = 0; goto out; } } /* * If this CPU is the one which had the do_timer() duty last, we limit * the sleep time to the timekeeping 'max_deferment' value. * Otherwise we can sleep as long as we want. */ delta = timekeeping_max_deferment(); tick_cpu = READ_ONCE(tick_do_timer_cpu); if (tick_cpu != cpu && (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) delta = KTIME_MAX; /* Calculate the next expiry time */ if (delta < (KTIME_MAX - basemono)) expires = basemono + delta; else expires = KTIME_MAX; ts->timer_expires = min_t(u64, expires, next_tick); out: return ts->timer_expires; } static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); unsigned long basejiff = ts->last_jiffies; u64 basemono = ts->timer_expires_base; bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); int tick_cpu; u64 expires; /* Make sure we won't be trying to stop it twice in a row. */ ts->timer_expires_base = 0; /* * Now the tick should be stopped definitely - so the timer base needs * to be marked idle as well to not miss a newly queued timer. */ expires = timer_base_try_to_set_idle(basejiff, basemono, &timer_idle); if (expires > ts->timer_expires) { /* * This path could only happen when the first timer was removed * between calculating the possible sleep length and now (when * high resolution mode is not active, timer could also be a * hrtimer). * * We have to stick to the original calculated expiry value to * not stop the tick for too long with a shallow C-state (which * was programmed by cpuidle because of an early next expiration * value). */ expires = ts->timer_expires; } /* If the timer base is not idle, retain the not yet stopped tick. */ if (!timer_idle) return; /* * If this CPU is the one which updates jiffies, then give up * the assignment and let it be taken by the CPU which runs * the tick timer next, which might be this CPU as well. If we * don't drop this here, the jiffies might be stale and * do_timer() never gets invoked. Keep track of the fact that it * was the one which had the do_timer() duty last. */ tick_cpu = READ_ONCE(tick_do_timer_cpu); if (tick_cpu == cpu) { WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE); tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); } else if (tick_cpu != TICK_DO_TIMER_NONE) { tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); } /* Skip reprogram of event if it's not changed */ if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { /* Sanity check: make sure clockevent is actually programmed */ if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) return; WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu " "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick, dev->next_event, hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); } /* * tick_nohz_stop_tick() can be called several times before * tick_nohz_restart_sched_tick() is called. This happens when * interrupts arrive which do not cause a reschedule. In the first * call we save the current tick time, so we can restart the * scheduler tick in tick_nohz_restart_sched_tick(). */ if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { calc_load_nohz_start(); quiet_vmstat(); ts->last_tick = hrtimer_get_expires(&ts->sched_timer); tick_sched_flag_set(ts, TS_FLAG_STOPPED); trace_tick_stop(1, TICK_DEP_MASK_NONE); } ts->next_tick = expires; /* * If the expiration time == KTIME_MAX, then we simply stop * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); else tick_program_event(KTIME_MAX, 1); return; } if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED_HARD); } else { hrtimer_set_expires(&ts->sched_timer, expires); tick_program_event(expires, 1); } } static void tick_nohz_retain_tick(struct tick_sched *ts) { ts->timer_expires_base = 0; } #ifdef CONFIG_NO_HZ_FULL static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu) { if (tick_nohz_next_event(ts, cpu)) tick_nohz_stop_tick(ts, cpu); else tick_nohz_retain_tick(ts); } #endif /* CONFIG_NO_HZ_FULL */ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { /* Update jiffies first */ tick_do_update_jiffies64(now); /* * Clear the timer idle flag, so we avoid IPIs on remote queueing and * the clock forward checks in the enqueue path: */ timer_clear_idle(); calc_load_nohz_stop(); touch_softlockup_watchdog_sched(); /* Cancel the scheduled timer and restore the tick: */ tick_sched_flag_clear(ts, TS_FLAG_STOPPED); tick_nohz_restart(ts, now); } static void __tick_nohz_full_update_tick(struct tick_sched *ts, ktime_t now) { #ifdef CONFIG_NO_HZ_FULL int cpu = smp_processor_id(); if (can_stop_full_tick(cpu, ts)) tick_nohz_full_stop_tick(ts, cpu); else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_restart_sched_tick(ts, now); #endif } static void tick_nohz_full_update_tick(struct tick_sched *ts) { if (!tick_nohz_full_cpu(smp_processor_id())) return; if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return; __tick_nohz_full_update_tick(ts, ktime_get()); } /* * A pending softirq outside an IRQ (or softirq disabled section) context * should be waiting for ksoftirqd to handle it. Therefore we shouldn't * reach this code due to the need_resched() early check in can_stop_idle_tick(). * * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, * triggering the code below, since wakep_softirqd() is ignored. * */ static bool report_idle_softirq(void) { static int ratelimit; unsigned int pending = local_softirq_pending(); if (likely(!pending)) return false; /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ if (!cpu_active(smp_processor_id())) { pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; if (!pending) return false; } if (ratelimit >= 10) return false; /* On RT, softirq handling may be waiting on some lock */ if (local_bh_blocked()) return false; pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", pending); ratelimit++; return true; } static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) { WARN_ON_ONCE(cpu_is_offline(cpu)); if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) return false; if (need_resched()) return false; if (unlikely(report_idle_softirq())) return false; if (tick_nohz_full_enabled()) { int tick_cpu = READ_ONCE(tick_do_timer_cpu); /* * Keep the tick alive to guarantee timekeeping progression * if there are full dynticks CPUs around */ if (tick_cpu == cpu) return false; /* Should not happen for nohz-full */ if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE)) return false; } return true; } /** * tick_nohz_idle_stop_tick - stop the idle tick from the idle task * * When the next event is more than a tick into the future, stop the idle tick */ void tick_nohz_idle_stop_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); int cpu = smp_processor_id(); ktime_t expires; /* * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the * tick timer expiration time is known already. */ if (ts->timer_expires_base) expires = ts->timer_expires; else if (can_stop_idle_tick(cpu, ts)) expires = tick_nohz_next_event(ts, cpu); else return; ts->idle_calls++; if (expires > 0LL) { int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); tick_nohz_stop_tick(ts, cpu); ts->idle_sleeps++; ts->idle_expires = expires; if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->idle_jiffies = ts->last_jiffies; nohz_balance_enter_idle(cpu); } } else { tick_nohz_retain_tick(ts); } } void tick_nohz_idle_retain_tick(void) { tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); } /** * tick_nohz_idle_enter - prepare for entering idle on the current CPU * * Called when we start the idle loop. */ void tick_nohz_idle_enter(void) { struct tick_sched *ts; lockdep_assert_irqs_enabled(); local_irq_disable(); ts = this_cpu_ptr(&tick_cpu_sched); WARN_ON_ONCE(ts->timer_expires_base); tick_sched_flag_set(ts, TS_FLAG_INIDLE); tick_nohz_start_idle(ts); local_irq_enable(); } /** * tick_nohz_irq_exit - Notify the tick about IRQ exit * * A timer may have been added/modified/deleted either by the current IRQ, * or by another place using this IRQ as a notification. This IRQ may have * also updated the RCU callback list. These events may require a * re-evaluation of the next tick. Depending on the context: * * 1) If the CPU is idle and no resched is pending, just proceed with idle * time accounting. The next tick will be re-evaluated on the next idle * loop iteration. * * 2) If the CPU is nohz_full: * * 2.1) If there is any tick dependency, restart the tick if stopped. * * 2.2) If there is no tick dependency, (re-)evaluate the next tick and * stop/update it accordingly. */ void tick_nohz_irq_exit(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) tick_nohz_start_idle(ts); else tick_nohz_full_update_tick(ts); } /** * tick_nohz_idle_got_tick - Check whether or not the tick handler has run * * Return: %true if the tick handler has run, otherwise %false */ bool tick_nohz_idle_got_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (ts->got_idle_tick) { ts->got_idle_tick = 0; return true; } return false; } /** * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer * or the tick, whichever expires first. Note that, if the tick has been * stopped, it returns the next hrtimer. * * Called from power state control code with interrupts disabled * * Return: the next expiration time */ ktime_t tick_nohz_get_next_hrtimer(void) { return __this_cpu_read(tick_cpu_device.evtdev)->next_event; } /** * tick_nohz_get_sleep_leng